2 * kernel/locking/mutex.c
4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/locking/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/rt.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/debug.h>
26 #include <linux/export.h>
27 #include <linux/spinlock.h>
28 #include <linux/interrupt.h>
29 #include <linux/debug_locks.h>
30 #include <linux/osq_lock.h>
32 #ifdef CONFIG_DEBUG_MUTEXES
33 # include "mutex-debug.h"
39 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41 atomic_long_set(&lock->owner, 0);
42 spin_lock_init(&lock->wait_lock);
43 INIT_LIST_HEAD(&lock->wait_list);
44 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
45 osq_lock_init(&lock->osq);
48 debug_mutex_init(lock, name, key);
50 EXPORT_SYMBOL(__mutex_init);
53 * @owner: contains: 'struct task_struct *' to the current lock owner,
54 * NULL means not owned. Since task_struct pointers are aligned at
55 * at least L1_CACHE_BYTES, we have low bits to store extra state.
57 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
58 * Bit1 indicates unlock needs to hand the lock to the top-waiter
59 * Bit2 indicates handoff has been done and we're waiting for pickup.
61 #define MUTEX_FLAG_WAITERS 0x01
62 #define MUTEX_FLAG_HANDOFF 0x02
63 #define MUTEX_FLAG_PICKUP 0x04
65 #define MUTEX_FLAGS 0x07
67 static inline struct task_struct *__owner_task(unsigned long owner)
69 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
72 static inline unsigned long __owner_flags(unsigned long owner)
74 return owner & MUTEX_FLAGS;
78 * Trylock variant that retuns the owning task on failure.
80 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
82 unsigned long owner, curr = (unsigned long)current;
84 owner = atomic_long_read(&lock->owner);
85 for (;;) { /* must loop, can race against a flag */
86 unsigned long old, flags = __owner_flags(owner);
87 unsigned long task = owner & ~MUTEX_FLAGS;
90 if (likely(task != curr))
93 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
96 flags &= ~MUTEX_FLAG_PICKUP;
98 #ifdef CONFIG_DEBUG_MUTEXES
99 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
104 * We set the HANDOFF bit, we must make sure it doesn't live
105 * past the point where we acquire it. This would be possible
106 * if we (accidentally) set the bit on an unlocked mutex.
108 flags &= ~MUTEX_FLAG_HANDOFF;
110 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
117 return __owner_task(owner);
121 * Actual trylock that will work on any unlocked state.
123 static inline bool __mutex_trylock(struct mutex *lock)
125 return !__mutex_trylock_or_owner(lock);
128 #ifndef CONFIG_DEBUG_LOCK_ALLOC
130 * Lockdep annotations are contained to the slow paths for simplicity.
131 * There is nothing that would stop spreading the lockdep annotations outwards
136 * Optimistic trylock that only works in the uncontended case. Make sure to
137 * follow with a __mutex_trylock() before failing.
139 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
141 unsigned long curr = (unsigned long)current;
142 unsigned long zero = 0UL;
144 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
150 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
152 unsigned long curr = (unsigned long)current;
154 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
161 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
163 atomic_long_or(flag, &lock->owner);
166 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
168 atomic_long_andnot(flag, &lock->owner);
171 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
173 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
177 * Give up ownership to a specific task, when @task = NULL, this is equivalent
178 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
179 * WAITERS. Provides RELEASE semantics like a regular unlock, the
180 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
182 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
184 unsigned long owner = atomic_long_read(&lock->owner);
187 unsigned long old, new;
189 #ifdef CONFIG_DEBUG_MUTEXES
190 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
191 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
194 new = (owner & MUTEX_FLAG_WAITERS);
195 new |= (unsigned long)task;
197 new |= MUTEX_FLAG_PICKUP;
199 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
207 #ifndef CONFIG_DEBUG_LOCK_ALLOC
209 * We split the mutex lock/unlock logic into separate fastpath and
210 * slowpath functions, to reduce the register pressure on the fastpath.
211 * We also put the fastpath first in the kernel image, to make sure the
212 * branch is predicted by the CPU as default-untaken.
214 static void __sched __mutex_lock_slowpath(struct mutex *lock);
217 * mutex_lock - acquire the mutex
218 * @lock: the mutex to be acquired
220 * Lock the mutex exclusively for this task. If the mutex is not
221 * available right now, it will sleep until it can get it.
223 * The mutex must later on be released by the same task that
224 * acquired it. Recursive locking is not allowed. The task
225 * may not exit without first unlocking the mutex. Also, kernel
226 * memory where the mutex resides must not be freed with
227 * the mutex still locked. The mutex must first be initialized
228 * (or statically defined) before it can be locked. memset()-ing
229 * the mutex to 0 is not allowed.
231 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
232 * checks that will enforce the restrictions and will also do
233 * deadlock debugging)
235 * This function is similar to (but not equivalent to) down().
237 void __sched mutex_lock(struct mutex *lock)
241 if (!__mutex_trylock_fast(lock))
242 __mutex_lock_slowpath(lock);
244 EXPORT_SYMBOL(mutex_lock);
247 static __always_inline void
248 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
250 #ifdef CONFIG_DEBUG_MUTEXES
252 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
253 * but released with a normal mutex_unlock in this call.
255 * This should never happen, always use ww_mutex_unlock.
257 DEBUG_LOCKS_WARN_ON(ww->ctx);
260 * Not quite done after calling ww_acquire_done() ?
262 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
264 if (ww_ctx->contending_lock) {
266 * After -EDEADLK you tried to
267 * acquire a different ww_mutex? Bad!
269 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
272 * You called ww_mutex_lock after receiving -EDEADLK,
273 * but 'forgot' to unlock everything else first?
275 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
276 ww_ctx->contending_lock = NULL;
280 * Naughty, using a different class will lead to undefined behavior!
282 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
287 static inline bool __sched
288 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
290 return a->stamp - b->stamp <= LONG_MAX &&
291 (a->stamp != b->stamp || a > b);
295 * Wake up any waiters that may have to back off when the lock is held by the
298 * Due to the invariants on the wait list, this can only affect the first
299 * waiter with a context.
301 * The current task must not be on the wait list.
304 __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
306 struct mutex_waiter *cur;
308 lockdep_assert_held(&lock->wait_lock);
310 list_for_each_entry(cur, &lock->wait_list, list) {
314 if (cur->ww_ctx->acquired > 0 &&
315 __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
316 debug_mutex_wake_waiter(lock, cur);
317 wake_up_process(cur->task);
325 * After acquiring lock with fastpath or when we lost out in contested
326 * slowpath, set ctx and wake up any waiters so they can recheck.
328 static __always_inline void
329 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
331 ww_mutex_lock_acquired(lock, ctx);
336 * The lock->ctx update should be visible on all cores before
337 * the atomic read is done, otherwise contended waiters might be
338 * missed. The contended waiters will either see ww_ctx == NULL
339 * and keep spinning, or it will acquire wait_lock, add itself
340 * to waiter list and sleep.
345 * Check if lock is contended, if not there is nobody to wake up
347 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
351 * Uh oh, we raced in fastpath, wake up everyone in this case,
352 * so they can see the new lock->ctx.
354 spin_lock(&lock->base.wait_lock);
355 __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
356 spin_unlock(&lock->base.wait_lock);
360 * After acquiring lock in the slowpath set ctx.
362 * Unlike for the fast path, the caller ensures that waiters are woken up where
365 * Callers must hold the mutex wait_lock.
367 static __always_inline void
368 ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
370 ww_mutex_lock_acquired(lock, ctx);
374 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
377 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
378 struct mutex_waiter *waiter)
382 ww = container_of(lock, struct ww_mutex, base);
385 * If ww->ctx is set the contents are undefined, only
386 * by acquiring wait_lock there is a guarantee that
387 * they are not invalid when reading.
389 * As such, when deadlock detection needs to be
390 * performed the optimistic spinning cannot be done.
392 * Check this in every inner iteration because we may
393 * be racing against another thread's ww_mutex_lock.
395 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
399 * If we aren't on the wait list yet, cancel the spin
400 * if there are waiters. We want to avoid stealing the
401 * lock from a waiter with an earlier stamp, since the
402 * other thread may already own a lock that we also
405 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
409 * Similarly, stop spinning if we are no longer the
412 if (waiter && !__mutex_waiter_is_first(lock, waiter))
419 * Look out! "owner" is an entirely speculative pointer access and not
422 * "noinline" so that this function shows up on perf profiles.
425 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
426 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
431 while (__mutex_owner(lock) == owner) {
433 * Ensure we emit the owner->on_cpu, dereference _after_
434 * checking lock->owner still matches owner. If that fails,
435 * owner might point to freed memory. If it still matches,
436 * the rcu_read_lock() ensures the memory stays valid.
441 * Use vcpu_is_preempted to detect lock holder preemption issue.
443 if (!owner->on_cpu || need_resched() ||
444 vcpu_is_preempted(task_cpu(owner))) {
449 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
462 * Initial check for entering the mutex spinning loop
464 static inline int mutex_can_spin_on_owner(struct mutex *lock)
466 struct task_struct *owner;
473 owner = __mutex_owner(lock);
476 * As lock holder preemption issue, we both skip spinning if task is not
477 * on cpu or its cpu is preempted
480 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
484 * If lock->owner is not set, the mutex has been released. Return true
485 * such that we'll trylock in the spin path, which is a faster option
486 * than the blocking slow path.
492 * Optimistic spinning.
494 * We try to spin for acquisition when we find that the lock owner
495 * is currently running on a (different) CPU and while we don't
496 * need to reschedule. The rationale is that if the lock owner is
497 * running, it is likely to release the lock soon.
499 * The mutex spinners are queued up using MCS lock so that only one
500 * spinner can compete for the mutex. However, if mutex spinning isn't
501 * going to happen, there is no point in going through the lock/unlock
504 * Returns true when the lock was taken, otherwise false, indicating
505 * that we need to jump to the slowpath and sleep.
507 * The waiter flag is set to true if the spinner is a waiter in the wait
508 * queue. The waiter-spinner will spin on the lock directly and concurrently
509 * with the spinner at the head of the OSQ, if present, until the owner is
512 static __always_inline bool
513 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
514 const bool use_ww_ctx, struct mutex_waiter *waiter)
518 * The purpose of the mutex_can_spin_on_owner() function is
519 * to eliminate the overhead of osq_lock() and osq_unlock()
520 * in case spinning isn't possible. As a waiter-spinner
521 * is not going to take OSQ lock anyway, there is no need
522 * to call mutex_can_spin_on_owner().
524 if (!mutex_can_spin_on_owner(lock))
528 * In order to avoid a stampede of mutex spinners trying to
529 * acquire the mutex all at once, the spinners need to take a
530 * MCS (queued) lock first before spinning on the owner field.
532 if (!osq_lock(&lock->osq))
537 struct task_struct *owner;
539 /* Try to acquire the mutex... */
540 owner = __mutex_trylock_or_owner(lock);
545 * There's an owner, wait for it to either
546 * release the lock or go to sleep.
548 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
552 * The cpu_relax() call is a compiler barrier which forces
553 * everything in this loop to be re-loaded. We don't need
554 * memory barriers as we'll eventually observe the right
555 * values at the cost of a few extra spins.
561 osq_unlock(&lock->osq);
568 osq_unlock(&lock->osq);
572 * If we fell out of the spin path because of need_resched(),
573 * reschedule now, before we try-lock the mutex. This avoids getting
574 * scheduled out right after we obtained the mutex.
576 if (need_resched()) {
578 * We _should_ have TASK_RUNNING here, but just in case
579 * we do not, make it so, otherwise we might get stuck.
581 __set_current_state(TASK_RUNNING);
582 schedule_preempt_disabled();
588 static __always_inline bool
589 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
590 const bool use_ww_ctx, struct mutex_waiter *waiter)
596 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
599 * mutex_unlock - release the mutex
600 * @lock: the mutex to be released
602 * Unlock a mutex that has been locked by this task previously.
604 * This function must not be used in interrupt context. Unlocking
605 * of a not locked mutex is not allowed.
607 * This function is similar to (but not equivalent to) up().
609 void __sched mutex_unlock(struct mutex *lock)
611 #ifndef CONFIG_DEBUG_LOCK_ALLOC
612 if (__mutex_unlock_fast(lock))
615 __mutex_unlock_slowpath(lock, _RET_IP_);
617 EXPORT_SYMBOL(mutex_unlock);
620 * ww_mutex_unlock - release the w/w mutex
621 * @lock: the mutex to be released
623 * Unlock a mutex that has been locked by this task previously with any of the
624 * ww_mutex_lock* functions (with or without an acquire context). It is
625 * forbidden to release the locks after releasing the acquire context.
627 * This function must not be used in interrupt context. Unlocking
628 * of a unlocked mutex is not allowed.
630 void __sched ww_mutex_unlock(struct ww_mutex *lock)
633 * The unlocking fastpath is the 0->1 transition from 'locked'
634 * into 'unlocked' state:
637 #ifdef CONFIG_DEBUG_MUTEXES
638 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
640 if (lock->ctx->acquired > 0)
641 lock->ctx->acquired--;
645 mutex_unlock(&lock->base);
647 EXPORT_SYMBOL(ww_mutex_unlock);
649 static inline int __sched
650 __ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
651 struct ww_acquire_ctx *ctx)
653 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
654 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
655 struct mutex_waiter *cur;
657 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
661 * If there is a waiter in front of us that has a context, then its
662 * stamp is earlier than ours and we must back off.
665 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
673 #ifdef CONFIG_DEBUG_MUTEXES
674 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
675 ctx->contending_lock = ww;
680 static inline int __sched
681 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
683 struct ww_acquire_ctx *ww_ctx)
685 struct mutex_waiter *cur;
686 struct list_head *pos;
689 list_add_tail(&waiter->list, &lock->wait_list);
694 * Add the waiter before the first waiter with a higher stamp.
695 * Waiters without a context are skipped to avoid starving
698 pos = &lock->wait_list;
699 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
703 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
704 /* Back off immediately if necessary. */
705 if (ww_ctx->acquired > 0) {
706 #ifdef CONFIG_DEBUG_MUTEXES
709 ww = container_of(lock, struct ww_mutex, base);
710 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
711 ww_ctx->contending_lock = ww;
722 * Wake up the waiter so that it gets a chance to back
725 if (cur->ww_ctx->acquired > 0) {
726 debug_mutex_wake_waiter(lock, cur);
727 wake_up_process(cur->task);
731 list_add_tail(&waiter->list, pos);
736 * Lock a mutex (possibly interruptible), slowpath:
738 static __always_inline int __sched
739 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
740 struct lockdep_map *nest_lock, unsigned long ip,
741 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
743 struct mutex_waiter waiter;
750 ww = container_of(lock, struct ww_mutex, base);
751 if (use_ww_ctx && ww_ctx) {
752 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
757 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
759 if (__mutex_trylock(lock) ||
760 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
761 /* got the lock, yay! */
762 lock_acquired(&lock->dep_map, ip);
763 if (use_ww_ctx && ww_ctx)
764 ww_mutex_set_context_fastpath(ww, ww_ctx);
769 spin_lock(&lock->wait_lock);
771 * After waiting to acquire the wait_lock, try again.
773 if (__mutex_trylock(lock)) {
774 if (use_ww_ctx && ww_ctx)
775 __ww_mutex_wakeup_for_backoff(lock, ww_ctx);
780 debug_mutex_lock_common(lock, &waiter);
781 debug_mutex_add_waiter(lock, &waiter, current);
783 lock_contended(&lock->dep_map, ip);
786 /* add waiting tasks to the end of the waitqueue (FIFO): */
787 list_add_tail(&waiter.list, &lock->wait_list);
789 #ifdef CONFIG_DEBUG_MUTEXES
790 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
793 /* Add in stamp order, waking up waiters that must back off. */
794 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
796 goto err_early_backoff;
798 waiter.ww_ctx = ww_ctx;
801 waiter.task = current;
803 if (__mutex_waiter_is_first(lock, &waiter))
804 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
806 set_current_state(state);
809 * Once we hold wait_lock, we're serialized against
810 * mutex_unlock() handing the lock off to us, do a trylock
811 * before testing the error conditions to make sure we pick up
814 if (__mutex_trylock(lock))
818 * Check for signals and wound conditions while holding
819 * wait_lock. This ensures the lock cancellation is ordered
820 * against mutex_unlock() and wake-ups do not go missing.
822 if (unlikely(signal_pending_state(state, current))) {
827 if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
828 ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
833 spin_unlock(&lock->wait_lock);
834 schedule_preempt_disabled();
837 * ww_mutex needs to always recheck its position since its waiter
838 * list is not FIFO ordered.
840 if ((use_ww_ctx && ww_ctx) || !first) {
841 first = __mutex_waiter_is_first(lock, &waiter);
843 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
846 set_current_state(state);
848 * Here we order against unlock; we must either see it change
849 * state back to RUNNING and fall through the next schedule(),
850 * or we must see its unlock and acquire.
852 if (__mutex_trylock(lock) ||
853 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
856 spin_lock(&lock->wait_lock);
858 spin_lock(&lock->wait_lock);
860 __set_current_state(TASK_RUNNING);
862 mutex_remove_waiter(lock, &waiter, current);
863 if (likely(list_empty(&lock->wait_list)))
864 __mutex_clear_flag(lock, MUTEX_FLAGS);
866 debug_mutex_free_waiter(&waiter);
869 /* got the lock - cleanup and rejoice! */
870 lock_acquired(&lock->dep_map, ip);
872 if (use_ww_ctx && ww_ctx)
873 ww_mutex_set_context_slowpath(ww, ww_ctx);
875 spin_unlock(&lock->wait_lock);
880 __set_current_state(TASK_RUNNING);
881 mutex_remove_waiter(lock, &waiter, current);
883 spin_unlock(&lock->wait_lock);
884 debug_mutex_free_waiter(&waiter);
885 mutex_release(&lock->dep_map, 1, ip);
891 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
892 struct lockdep_map *nest_lock, unsigned long ip)
894 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
898 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
899 struct lockdep_map *nest_lock, unsigned long ip,
900 struct ww_acquire_ctx *ww_ctx)
902 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
905 #ifdef CONFIG_DEBUG_LOCK_ALLOC
907 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
909 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
912 EXPORT_SYMBOL_GPL(mutex_lock_nested);
915 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
917 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
919 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
922 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
924 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
926 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
929 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
931 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
933 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
936 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
942 token = io_schedule_prepare();
943 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
944 subclass, NULL, _RET_IP_, NULL, 0);
945 io_schedule_finish(token);
947 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
950 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
952 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
955 if (ctx->deadlock_inject_countdown-- == 0) {
956 tmp = ctx->deadlock_inject_interval;
957 if (tmp > UINT_MAX/4)
960 tmp = tmp*2 + tmp + tmp/2;
962 ctx->deadlock_inject_interval = tmp;
963 ctx->deadlock_inject_countdown = tmp;
964 ctx->contending_lock = lock;
966 ww_mutex_unlock(lock);
976 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
981 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
982 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
984 if (!ret && ctx && ctx->acquired > 1)
985 return ww_mutex_deadlock_injection(lock, ctx);
989 EXPORT_SYMBOL_GPL(ww_mutex_lock);
992 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
997 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
998 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1001 if (!ret && ctx && ctx->acquired > 1)
1002 return ww_mutex_deadlock_injection(lock, ctx);
1006 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1011 * Release the lock, slowpath:
1013 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1015 struct task_struct *next = NULL;
1016 DEFINE_WAKE_Q(wake_q);
1017 unsigned long owner;
1019 mutex_release(&lock->dep_map, 1, ip);
1022 * Release the lock before (potentially) taking the spinlock such that
1023 * other contenders can get on with things ASAP.
1025 * Except when HANDOFF, in that case we must not clear the owner field,
1026 * but instead set it to the top waiter.
1028 owner = atomic_long_read(&lock->owner);
1032 #ifdef CONFIG_DEBUG_MUTEXES
1033 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1034 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1037 if (owner & MUTEX_FLAG_HANDOFF)
1040 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1041 __owner_flags(owner));
1043 if (owner & MUTEX_FLAG_WAITERS)
1052 spin_lock(&lock->wait_lock);
1053 debug_mutex_unlock(lock);
1054 if (!list_empty(&lock->wait_list)) {
1055 /* get the first entry from the wait-list: */
1056 struct mutex_waiter *waiter =
1057 list_first_entry(&lock->wait_list,
1058 struct mutex_waiter, list);
1060 next = waiter->task;
1062 debug_mutex_wake_waiter(lock, waiter);
1063 wake_q_add(&wake_q, next);
1066 if (owner & MUTEX_FLAG_HANDOFF)
1067 __mutex_handoff(lock, next);
1069 spin_unlock(&lock->wait_lock);
1074 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1076 * Here come the less common (and hence less performance-critical) APIs:
1077 * mutex_lock_interruptible() and mutex_trylock().
1079 static noinline int __sched
1080 __mutex_lock_killable_slowpath(struct mutex *lock);
1082 static noinline int __sched
1083 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1086 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1087 * @lock: The mutex to be acquired.
1089 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1090 * process is sleeping, this function will return without acquiring the
1093 * Context: Process context.
1094 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1097 int __sched mutex_lock_interruptible(struct mutex *lock)
1101 if (__mutex_trylock_fast(lock))
1104 return __mutex_lock_interruptible_slowpath(lock);
1107 EXPORT_SYMBOL(mutex_lock_interruptible);
1110 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1111 * @lock: The mutex to be acquired.
1113 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1114 * the current process is delivered while the process is sleeping, this
1115 * function will return without acquiring the mutex.
1117 * Context: Process context.
1118 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1119 * fatal signal arrived.
1121 int __sched mutex_lock_killable(struct mutex *lock)
1125 if (__mutex_trylock_fast(lock))
1128 return __mutex_lock_killable_slowpath(lock);
1130 EXPORT_SYMBOL(mutex_lock_killable);
1133 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1134 * @lock: The mutex to be acquired.
1136 * Lock the mutex like mutex_lock(). While the task is waiting for this
1137 * mutex, it will be accounted as being in the IO wait state by the
1140 * Context: Process context.
1142 void __sched mutex_lock_io(struct mutex *lock)
1146 token = io_schedule_prepare();
1148 io_schedule_finish(token);
1150 EXPORT_SYMBOL_GPL(mutex_lock_io);
1152 static noinline void __sched
1153 __mutex_lock_slowpath(struct mutex *lock)
1155 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1158 static noinline int __sched
1159 __mutex_lock_killable_slowpath(struct mutex *lock)
1161 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1164 static noinline int __sched
1165 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1167 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1170 static noinline int __sched
1171 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1173 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1177 static noinline int __sched
1178 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1179 struct ww_acquire_ctx *ctx)
1181 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1188 * mutex_trylock - try to acquire the mutex, without waiting
1189 * @lock: the mutex to be acquired
1191 * Try to acquire the mutex atomically. Returns 1 if the mutex
1192 * has been acquired successfully, and 0 on contention.
1194 * NOTE: this function follows the spin_trylock() convention, so
1195 * it is negated from the down_trylock() return values! Be careful
1196 * about this when converting semaphore users to mutexes.
1198 * This function must not be used in interrupt context. The
1199 * mutex must be released by the same task that acquired it.
1201 int __sched mutex_trylock(struct mutex *lock)
1203 bool locked = __mutex_trylock(lock);
1206 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1210 EXPORT_SYMBOL(mutex_trylock);
1212 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1214 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1218 if (__mutex_trylock_fast(&lock->base)) {
1220 ww_mutex_set_context_fastpath(lock, ctx);
1224 return __ww_mutex_lock_slowpath(lock, ctx);
1226 EXPORT_SYMBOL(ww_mutex_lock);
1229 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1233 if (__mutex_trylock_fast(&lock->base)) {
1235 ww_mutex_set_context_fastpath(lock, ctx);
1239 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1241 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1246 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1247 * @cnt: the atomic which we are to dec
1248 * @lock: the mutex to return holding if we dec to 0
1250 * return true and hold lock if we dec to 0, return false otherwise
1252 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1254 /* dec if we can't possibly hit 0 */
1255 if (atomic_add_unless(cnt, -1, 1))
1257 /* we might hit 0, so take the lock */
1259 if (!atomic_dec_and_test(cnt)) {
1260 /* when we actually did the dec, we didn't hit 0 */
1264 /* we hit 0, and we hold the lock */
1267 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);