2 * kernel/locking/mutex.c
4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/locking/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
28 #include <linux/osq_lock.h>
30 #ifdef CONFIG_DEBUG_MUTEXES
31 # include "mutex-debug.h"
37 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
39 atomic_long_set(&lock->owner, 0);
40 spin_lock_init(&lock->wait_lock);
41 INIT_LIST_HEAD(&lock->wait_list);
42 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
43 osq_lock_init(&lock->osq);
46 debug_mutex_init(lock, name, key);
48 EXPORT_SYMBOL(__mutex_init);
51 * @owner: contains: 'struct task_struct *' to the current lock owner,
52 * NULL means not owned. Since task_struct pointers are aligned at
53 * at least L1_CACHE_BYTES, we have low bits to store extra state.
55 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
56 * Bit1 indicates unlock needs to hand the lock to the top-waiter
57 * Bit2 indicates handoff has been done and we're waiting for pickup.
59 #define MUTEX_FLAG_WAITERS 0x01
60 #define MUTEX_FLAG_HANDOFF 0x02
61 #define MUTEX_FLAG_PICKUP 0x04
63 #define MUTEX_FLAGS 0x07
65 static inline struct task_struct *__owner_task(unsigned long owner)
67 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
70 static inline unsigned long __owner_flags(unsigned long owner)
72 return owner & MUTEX_FLAGS;
76 * Trylock variant that retuns the owning task on failure.
78 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
80 unsigned long owner, curr = (unsigned long)current;
82 owner = atomic_long_read(&lock->owner);
83 for (;;) { /* must loop, can race against a flag */
84 unsigned long old, flags = __owner_flags(owner);
85 unsigned long task = owner & ~MUTEX_FLAGS;
88 if (likely(task != curr))
91 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
94 flags &= ~MUTEX_FLAG_PICKUP;
96 #ifdef CONFIG_DEBUG_MUTEXES
97 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
102 * We set the HANDOFF bit, we must make sure it doesn't live
103 * past the point where we acquire it. This would be possible
104 * if we (accidentally) set the bit on an unlocked mutex.
106 flags &= ~MUTEX_FLAG_HANDOFF;
108 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
115 return __owner_task(owner);
119 * Actual trylock that will work on any unlocked state.
121 static inline bool __mutex_trylock(struct mutex *lock)
123 return !__mutex_trylock_or_owner(lock);
126 #ifndef CONFIG_DEBUG_LOCK_ALLOC
128 * Lockdep annotations are contained to the slow paths for simplicity.
129 * There is nothing that would stop spreading the lockdep annotations outwards
134 * Optimistic trylock that only works in the uncontended case. Make sure to
135 * follow with a __mutex_trylock() before failing.
137 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
139 unsigned long curr = (unsigned long)current;
141 if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
147 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
149 unsigned long curr = (unsigned long)current;
151 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
158 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
160 atomic_long_or(flag, &lock->owner);
163 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
165 atomic_long_andnot(flag, &lock->owner);
168 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
170 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
174 * Give up ownership to a specific task, when @task = NULL, this is equivalent
175 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
176 * WAITERS. Provides RELEASE semantics like a regular unlock, the
177 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
179 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
181 unsigned long owner = atomic_long_read(&lock->owner);
184 unsigned long old, new;
186 #ifdef CONFIG_DEBUG_MUTEXES
187 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
188 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
191 new = (owner & MUTEX_FLAG_WAITERS);
192 new |= (unsigned long)task;
194 new |= MUTEX_FLAG_PICKUP;
196 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
204 #ifndef CONFIG_DEBUG_LOCK_ALLOC
206 * We split the mutex lock/unlock logic into separate fastpath and
207 * slowpath functions, to reduce the register pressure on the fastpath.
208 * We also put the fastpath first in the kernel image, to make sure the
209 * branch is predicted by the CPU as default-untaken.
211 static void __sched __mutex_lock_slowpath(struct mutex *lock);
214 * mutex_lock - acquire the mutex
215 * @lock: the mutex to be acquired
217 * Lock the mutex exclusively for this task. If the mutex is not
218 * available right now, it will sleep until it can get it.
220 * The mutex must later on be released by the same task that
221 * acquired it. Recursive locking is not allowed. The task
222 * may not exit without first unlocking the mutex. Also, kernel
223 * memory where the mutex resides must not be freed with
224 * the mutex still locked. The mutex must first be initialized
225 * (or statically defined) before it can be locked. memset()-ing
226 * the mutex to 0 is not allowed.
228 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
229 * checks that will enforce the restrictions and will also do
230 * deadlock debugging. )
232 * This function is similar to (but not equivalent to) down().
234 void __sched mutex_lock(struct mutex *lock)
238 if (!__mutex_trylock_fast(lock))
239 __mutex_lock_slowpath(lock);
241 EXPORT_SYMBOL(mutex_lock);
244 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
245 struct ww_acquire_ctx *ww_ctx)
247 #ifdef CONFIG_DEBUG_MUTEXES
249 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
250 * but released with a normal mutex_unlock in this call.
252 * This should never happen, always use ww_mutex_unlock.
254 DEBUG_LOCKS_WARN_ON(ww->ctx);
257 * Not quite done after calling ww_acquire_done() ?
259 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
261 if (ww_ctx->contending_lock) {
263 * After -EDEADLK you tried to
264 * acquire a different ww_mutex? Bad!
266 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
269 * You called ww_mutex_lock after receiving -EDEADLK,
270 * but 'forgot' to unlock everything else first?
272 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
273 ww_ctx->contending_lock = NULL;
277 * Naughty, using a different class will lead to undefined behavior!
279 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
284 static inline bool __sched
285 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
287 return a->stamp - b->stamp <= LONG_MAX &&
288 (a->stamp != b->stamp || a > b);
292 * After acquiring lock with fastpath or when we lost out in contested
293 * slowpath, set ctx and wake up any waiters so they can recheck.
295 static __always_inline void
296 ww_mutex_set_context_fastpath(struct ww_mutex *lock,
297 struct ww_acquire_ctx *ctx)
300 struct mutex_waiter *cur;
302 ww_mutex_lock_acquired(lock, ctx);
307 * The lock->ctx update should be visible on all cores before
308 * the atomic read is done, otherwise contended waiters might be
309 * missed. The contended waiters will either see ww_ctx == NULL
310 * and keep spinning, or it will acquire wait_lock, add itself
311 * to waiter list and sleep.
316 * Check if lock is contended, if not there is nobody to wake up
318 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
322 * Uh oh, we raced in fastpath, wake up everyone in this case,
323 * so they can see the new lock->ctx.
325 spin_lock_mutex(&lock->base.wait_lock, flags);
326 list_for_each_entry(cur, &lock->base.wait_list, list) {
327 debug_mutex_wake_waiter(&lock->base, cur);
328 wake_up_process(cur->task);
330 spin_unlock_mutex(&lock->base.wait_lock, flags);
334 * After acquiring lock in the slowpath set ctx and wake up any
335 * waiters so they can recheck.
337 * Callers must hold the mutex wait_lock.
339 static __always_inline void
340 ww_mutex_set_context_slowpath(struct ww_mutex *lock,
341 struct ww_acquire_ctx *ctx)
343 struct mutex_waiter *cur;
345 ww_mutex_lock_acquired(lock, ctx);
349 * Give any possible sleeping processes the chance to wake up,
350 * so they can recheck if they have to back off.
352 list_for_each_entry(cur, &lock->base.wait_list, list) {
353 debug_mutex_wake_waiter(&lock->base, cur);
354 wake_up_process(cur->task);
358 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
360 * Look out! "owner" is an entirely speculative pointer
361 * access and not reliable.
364 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
369 while (__mutex_owner(lock) == owner) {
371 * Ensure we emit the owner->on_cpu, dereference _after_
372 * checking lock->owner still matches owner. If that fails,
373 * owner might point to freed memory. If it still matches,
374 * the rcu_read_lock() ensures the memory stays valid.
379 * Use vcpu_is_preempted to detect lock holder preemption issue.
381 if (!owner->on_cpu || need_resched() ||
382 vcpu_is_preempted(task_cpu(owner))) {
395 * Initial check for entering the mutex spinning loop
397 static inline int mutex_can_spin_on_owner(struct mutex *lock)
399 struct task_struct *owner;
406 owner = __mutex_owner(lock);
409 * As lock holder preemption issue, we both skip spinning if task is not
410 * on cpu or its cpu is preempted
413 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
417 * If lock->owner is not set, the mutex has been released. Return true
418 * such that we'll trylock in the spin path, which is a faster option
419 * than the blocking slow path.
425 * Optimistic spinning.
427 * We try to spin for acquisition when we find that the lock owner
428 * is currently running on a (different) CPU and while we don't
429 * need to reschedule. The rationale is that if the lock owner is
430 * running, it is likely to release the lock soon.
432 * The mutex spinners are queued up using MCS lock so that only one
433 * spinner can compete for the mutex. However, if mutex spinning isn't
434 * going to happen, there is no point in going through the lock/unlock
437 * Returns true when the lock was taken, otherwise false, indicating
438 * that we need to jump to the slowpath and sleep.
440 * The waiter flag is set to true if the spinner is a waiter in the wait
441 * queue. The waiter-spinner will spin on the lock directly and concurrently
442 * with the spinner at the head of the OSQ, if present, until the owner is
445 static bool mutex_optimistic_spin(struct mutex *lock,
446 struct ww_acquire_ctx *ww_ctx,
447 const bool use_ww_ctx, const bool waiter)
451 * The purpose of the mutex_can_spin_on_owner() function is
452 * to eliminate the overhead of osq_lock() and osq_unlock()
453 * in case spinning isn't possible. As a waiter-spinner
454 * is not going to take OSQ lock anyway, there is no need
455 * to call mutex_can_spin_on_owner().
457 if (!mutex_can_spin_on_owner(lock))
461 * In order to avoid a stampede of mutex spinners trying to
462 * acquire the mutex all at once, the spinners need to take a
463 * MCS (queued) lock first before spinning on the owner field.
465 if (!osq_lock(&lock->osq))
470 struct task_struct *owner;
472 if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
475 ww = container_of(lock, struct ww_mutex, base);
477 * If ww->ctx is set the contents are undefined, only
478 * by acquiring wait_lock there is a guarantee that
479 * they are not invalid when reading.
481 * As such, when deadlock detection needs to be
482 * performed the optimistic spinning cannot be done.
484 if (READ_ONCE(ww->ctx))
488 /* Try to acquire the mutex... */
489 owner = __mutex_trylock_or_owner(lock);
494 * There's an owner, wait for it to either
495 * release the lock or go to sleep.
497 if (!mutex_spin_on_owner(lock, owner))
501 * The cpu_relax() call is a compiler barrier which forces
502 * everything in this loop to be re-loaded. We don't need
503 * memory barriers as we'll eventually observe the right
504 * values at the cost of a few extra spins.
510 osq_unlock(&lock->osq);
517 osq_unlock(&lock->osq);
521 * If we fell out of the spin path because of need_resched(),
522 * reschedule now, before we try-lock the mutex. This avoids getting
523 * scheduled out right after we obtained the mutex.
525 if (need_resched()) {
527 * We _should_ have TASK_RUNNING here, but just in case
528 * we do not, make it so, otherwise we might get stuck.
530 __set_current_state(TASK_RUNNING);
531 schedule_preempt_disabled();
537 static bool mutex_optimistic_spin(struct mutex *lock,
538 struct ww_acquire_ctx *ww_ctx,
539 const bool use_ww_ctx, const bool waiter)
545 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
548 * mutex_unlock - release the mutex
549 * @lock: the mutex to be released
551 * Unlock a mutex that has been locked by this task previously.
553 * This function must not be used in interrupt context. Unlocking
554 * of a not locked mutex is not allowed.
556 * This function is similar to (but not equivalent to) up().
558 void __sched mutex_unlock(struct mutex *lock)
560 #ifndef CONFIG_DEBUG_LOCK_ALLOC
561 if (__mutex_unlock_fast(lock))
564 __mutex_unlock_slowpath(lock, _RET_IP_);
566 EXPORT_SYMBOL(mutex_unlock);
569 * ww_mutex_unlock - release the w/w mutex
570 * @lock: the mutex to be released
572 * Unlock a mutex that has been locked by this task previously with any of the
573 * ww_mutex_lock* functions (with or without an acquire context). It is
574 * forbidden to release the locks after releasing the acquire context.
576 * This function must not be used in interrupt context. Unlocking
577 * of a unlocked mutex is not allowed.
579 void __sched ww_mutex_unlock(struct ww_mutex *lock)
582 * The unlocking fastpath is the 0->1 transition from 'locked'
583 * into 'unlocked' state:
586 #ifdef CONFIG_DEBUG_MUTEXES
587 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
589 if (lock->ctx->acquired > 0)
590 lock->ctx->acquired--;
594 mutex_unlock(&lock->base);
596 EXPORT_SYMBOL(ww_mutex_unlock);
598 static inline int __sched
599 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
601 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
602 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
607 if (__ww_ctx_stamp_after(ctx, hold_ctx)) {
608 #ifdef CONFIG_DEBUG_MUTEXES
609 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
610 ctx->contending_lock = ww;
618 static inline int __sched
619 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
621 struct ww_acquire_ctx *ww_ctx)
623 struct mutex_waiter *cur;
624 struct list_head *pos;
627 list_add_tail(&waiter->list, &lock->wait_list);
632 * Add the waiter before the first waiter with a higher stamp.
633 * Waiters without a context are skipped to avoid starving
636 pos = &lock->wait_list;
637 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
641 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
642 /* Back off immediately if necessary. */
643 if (ww_ctx->acquired > 0) {
644 #ifdef CONFIG_DEBUG_MUTEXES
647 ww = container_of(lock, struct ww_mutex, base);
648 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
649 ww_ctx->contending_lock = ww;
660 list_add_tail(&waiter->list, pos);
665 * Lock a mutex (possibly interruptible), slowpath:
667 static __always_inline int __sched
668 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
669 struct lockdep_map *nest_lock, unsigned long ip,
670 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
672 struct mutex_waiter waiter;
678 ww = container_of(lock, struct ww_mutex, base);
680 if (use_ww_ctx && ww_ctx) {
681 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
686 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
688 if (__mutex_trylock(lock) ||
689 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
690 /* got the lock, yay! */
691 lock_acquired(&lock->dep_map, ip);
692 if (use_ww_ctx && ww_ctx)
693 ww_mutex_set_context_fastpath(ww, ww_ctx);
698 spin_lock_mutex(&lock->wait_lock, flags);
700 * After waiting to acquire the wait_lock, try again.
702 if (__mutex_trylock(lock))
705 debug_mutex_lock_common(lock, &waiter);
706 debug_mutex_add_waiter(lock, &waiter, current);
708 lock_contended(&lock->dep_map, ip);
711 /* add waiting tasks to the end of the waitqueue (FIFO): */
712 list_add_tail(&waiter.list, &lock->wait_list);
714 /* Add in stamp order, waking up waiters that must back off. */
715 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
717 goto err_early_backoff;
719 waiter.ww_ctx = ww_ctx;
722 waiter.task = current;
724 if (__mutex_waiter_is_first(lock, &waiter))
725 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
727 set_current_state(state);
730 * Once we hold wait_lock, we're serialized against
731 * mutex_unlock() handing the lock off to us, do a trylock
732 * before testing the error conditions to make sure we pick up
735 if (__mutex_trylock(lock))
739 * Check for signals and wound conditions while holding
740 * wait_lock. This ensures the lock cancellation is ordered
741 * against mutex_unlock() and wake-ups do not go missing.
743 if (unlikely(signal_pending_state(state, current))) {
748 if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
749 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
754 spin_unlock_mutex(&lock->wait_lock, flags);
755 schedule_preempt_disabled();
758 * ww_mutex needs to always recheck its position since its waiter
759 * list is not FIFO ordered.
761 if ((use_ww_ctx && ww_ctx) || !first) {
762 first = __mutex_waiter_is_first(lock, &waiter);
764 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
767 set_current_state(state);
769 * Here we order against unlock; we must either see it change
770 * state back to RUNNING and fall through the next schedule(),
771 * or we must see its unlock and acquire.
773 if (__mutex_trylock(lock) ||
774 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)))
777 spin_lock_mutex(&lock->wait_lock, flags);
779 spin_lock_mutex(&lock->wait_lock, flags);
781 __set_current_state(TASK_RUNNING);
783 mutex_remove_waiter(lock, &waiter, current);
784 if (likely(list_empty(&lock->wait_list)))
785 __mutex_clear_flag(lock, MUTEX_FLAGS);
787 debug_mutex_free_waiter(&waiter);
790 /* got the lock - cleanup and rejoice! */
791 lock_acquired(&lock->dep_map, ip);
793 if (use_ww_ctx && ww_ctx)
794 ww_mutex_set_context_slowpath(ww, ww_ctx);
796 spin_unlock_mutex(&lock->wait_lock, flags);
801 __set_current_state(TASK_RUNNING);
802 mutex_remove_waiter(lock, &waiter, current);
804 spin_unlock_mutex(&lock->wait_lock, flags);
805 debug_mutex_free_waiter(&waiter);
806 mutex_release(&lock->dep_map, 1, ip);
811 #ifdef CONFIG_DEBUG_LOCK_ALLOC
813 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
816 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
817 subclass, NULL, _RET_IP_, NULL, 0);
820 EXPORT_SYMBOL_GPL(mutex_lock_nested);
823 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
826 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
827 0, nest, _RET_IP_, NULL, 0);
829 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
832 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
835 return __mutex_lock_common(lock, TASK_KILLABLE,
836 subclass, NULL, _RET_IP_, NULL, 0);
838 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
841 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
844 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
845 subclass, NULL, _RET_IP_, NULL, 0);
847 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
850 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
852 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
855 if (ctx->deadlock_inject_countdown-- == 0) {
856 tmp = ctx->deadlock_inject_interval;
857 if (tmp > UINT_MAX/4)
860 tmp = tmp*2 + tmp + tmp/2;
862 ctx->deadlock_inject_interval = tmp;
863 ctx->deadlock_inject_countdown = tmp;
864 ctx->contending_lock = lock;
866 ww_mutex_unlock(lock);
876 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
881 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
882 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
884 if (!ret && ctx && ctx->acquired > 1)
885 return ww_mutex_deadlock_injection(lock, ctx);
889 EXPORT_SYMBOL_GPL(ww_mutex_lock);
892 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
897 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
898 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
901 if (!ret && ctx && ctx->acquired > 1)
902 return ww_mutex_deadlock_injection(lock, ctx);
906 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
911 * Release the lock, slowpath:
913 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
915 struct task_struct *next = NULL;
916 unsigned long owner, flags;
917 DEFINE_WAKE_Q(wake_q);
919 mutex_release(&lock->dep_map, 1, ip);
922 * Release the lock before (potentially) taking the spinlock such that
923 * other contenders can get on with things ASAP.
925 * Except when HANDOFF, in that case we must not clear the owner field,
926 * but instead set it to the top waiter.
928 owner = atomic_long_read(&lock->owner);
932 #ifdef CONFIG_DEBUG_MUTEXES
933 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
934 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
937 if (owner & MUTEX_FLAG_HANDOFF)
940 old = atomic_long_cmpxchg_release(&lock->owner, owner,
941 __owner_flags(owner));
943 if (owner & MUTEX_FLAG_WAITERS)
952 spin_lock_mutex(&lock->wait_lock, flags);
953 debug_mutex_unlock(lock);
954 if (!list_empty(&lock->wait_list)) {
955 /* get the first entry from the wait-list: */
956 struct mutex_waiter *waiter =
957 list_first_entry(&lock->wait_list,
958 struct mutex_waiter, list);
962 debug_mutex_wake_waiter(lock, waiter);
963 wake_q_add(&wake_q, next);
966 if (owner & MUTEX_FLAG_HANDOFF)
967 __mutex_handoff(lock, next);
969 spin_unlock_mutex(&lock->wait_lock, flags);
974 #ifndef CONFIG_DEBUG_LOCK_ALLOC
976 * Here come the less common (and hence less performance-critical) APIs:
977 * mutex_lock_interruptible() and mutex_trylock().
979 static noinline int __sched
980 __mutex_lock_killable_slowpath(struct mutex *lock);
982 static noinline int __sched
983 __mutex_lock_interruptible_slowpath(struct mutex *lock);
986 * mutex_lock_interruptible - acquire the mutex, interruptible
987 * @lock: the mutex to be acquired
989 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
990 * been acquired or sleep until the mutex becomes available. If a
991 * signal arrives while waiting for the lock then this function
994 * This function is similar to (but not equivalent to) down_interruptible().
996 int __sched mutex_lock_interruptible(struct mutex *lock)
1000 if (__mutex_trylock_fast(lock))
1003 return __mutex_lock_interruptible_slowpath(lock);
1006 EXPORT_SYMBOL(mutex_lock_interruptible);
1008 int __sched mutex_lock_killable(struct mutex *lock)
1012 if (__mutex_trylock_fast(lock))
1015 return __mutex_lock_killable_slowpath(lock);
1017 EXPORT_SYMBOL(mutex_lock_killable);
1019 static noinline void __sched
1020 __mutex_lock_slowpath(struct mutex *lock)
1022 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
1023 NULL, _RET_IP_, NULL, 0);
1026 static noinline int __sched
1027 __mutex_lock_killable_slowpath(struct mutex *lock)
1029 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
1030 NULL, _RET_IP_, NULL, 0);
1033 static noinline int __sched
1034 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1036 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
1037 NULL, _RET_IP_, NULL, 0);
1040 static noinline int __sched
1041 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1043 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1044 NULL, _RET_IP_, ctx, 1);
1047 static noinline int __sched
1048 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1049 struct ww_acquire_ctx *ctx)
1051 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
1052 NULL, _RET_IP_, ctx, 1);
1058 * mutex_trylock - try to acquire the mutex, without waiting
1059 * @lock: the mutex to be acquired
1061 * Try to acquire the mutex atomically. Returns 1 if the mutex
1062 * has been acquired successfully, and 0 on contention.
1064 * NOTE: this function follows the spin_trylock() convention, so
1065 * it is negated from the down_trylock() return values! Be careful
1066 * about this when converting semaphore users to mutexes.
1068 * This function must not be used in interrupt context. The
1069 * mutex must be released by the same task that acquired it.
1071 int __sched mutex_trylock(struct mutex *lock)
1073 bool locked = __mutex_trylock(lock);
1076 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1080 EXPORT_SYMBOL(mutex_trylock);
1082 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1084 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1088 if (__mutex_trylock_fast(&lock->base)) {
1090 ww_mutex_set_context_fastpath(lock, ctx);
1094 return __ww_mutex_lock_slowpath(lock, ctx);
1096 EXPORT_SYMBOL(ww_mutex_lock);
1099 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1103 if (__mutex_trylock_fast(&lock->base)) {
1105 ww_mutex_set_context_fastpath(lock, ctx);
1109 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1111 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1116 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1117 * @cnt: the atomic which we are to dec
1118 * @lock: the mutex to return holding if we dec to 0
1120 * return true and hold lock if we dec to 0, return false otherwise
1122 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1124 /* dec if we can't possibly hit 0 */
1125 if (atomic_add_unless(cnt, -1, 1))
1127 /* we might hit 0, so take the lock */
1129 if (!atomic_dec_and_test(cnt)) {
1130 /* when we actually did the dec, we didn't hit 0 */
1134 /* we hit 0, and we hold the lock */
1137 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);