2 * kernel/locking/mutex.c
4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/locking/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
28 #include <linux/osq_lock.h>
30 #ifdef CONFIG_DEBUG_MUTEXES
31 # include "mutex-debug.h"
37 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
39 atomic_long_set(&lock->owner, 0);
40 spin_lock_init(&lock->wait_lock);
41 INIT_LIST_HEAD(&lock->wait_list);
42 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
43 osq_lock_init(&lock->osq);
46 debug_mutex_init(lock, name, key);
48 EXPORT_SYMBOL(__mutex_init);
51 * @owner: contains: 'struct task_struct *' to the current lock owner,
52 * NULL means not owned. Since task_struct pointers are aligned at
53 * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
54 * bits to store extra state.
56 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
57 * Bit1 indicates unlock needs to hand the lock to the top-waiter
59 #define MUTEX_FLAG_WAITERS 0x01
60 #define MUTEX_FLAG_HANDOFF 0x02
62 #define MUTEX_FLAGS 0x03
64 static inline struct task_struct *__owner_task(unsigned long owner)
66 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
69 static inline unsigned long __owner_flags(unsigned long owner)
71 return owner & MUTEX_FLAGS;
75 * Actual trylock that will work on any unlocked state.
77 * When setting the owner field, we must preserve the low flag bits.
79 * Be careful with @handoff, only set that in a wait-loop (where you set
80 * HANDOFF) to avoid recursive lock attempts.
82 static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
84 unsigned long owner, curr = (unsigned long)current;
86 owner = atomic_long_read(&lock->owner);
87 for (;;) { /* must loop, can race against a flag */
88 unsigned long old, flags = __owner_flags(owner);
90 if (__owner_task(owner)) {
91 if (handoff && unlikely(__owner_task(owner) == current)) {
93 * Provide ACQUIRE semantics for the lock-handoff.
95 * We cannot easily use load-acquire here, since
96 * the actual load is a failed cmpxchg, which
97 * doesn't imply any barriers.
99 * Also, this is a fairly unlikely scenario, and
100 * this contains the cost.
102 smp_mb(); /* ACQUIRE */
110 * We set the HANDOFF bit, we must make sure it doesn't live
111 * past the point where we acquire it. This would be possible
112 * if we (accidentally) set the bit on an unlocked mutex.
115 flags &= ~MUTEX_FLAG_HANDOFF;
117 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
125 #ifndef CONFIG_DEBUG_LOCK_ALLOC
127 * Lockdep annotations are contained to the slow paths for simplicity.
128 * There is nothing that would stop spreading the lockdep annotations outwards
133 * Optimistic trylock that only works in the uncontended case. Make sure to
134 * follow with a __mutex_trylock() before failing.
136 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
138 unsigned long curr = (unsigned long)current;
140 if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
146 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
148 unsigned long curr = (unsigned long)current;
150 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
157 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
159 atomic_long_or(flag, &lock->owner);
162 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
164 atomic_long_andnot(flag, &lock->owner);
167 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
169 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
173 * Give up ownership to a specific task, when @task = NULL, this is equivalent
174 * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE
175 * semantics like a regular unlock, the __mutex_trylock() provides matching
176 * ACQUIRE semantics for the handoff.
178 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
180 unsigned long owner = atomic_long_read(&lock->owner);
183 unsigned long old, new;
185 #ifdef CONFIG_DEBUG_MUTEXES
186 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
189 new = (owner & MUTEX_FLAG_WAITERS);
190 new |= (unsigned long)task;
192 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
200 #ifndef CONFIG_DEBUG_LOCK_ALLOC
202 * We split the mutex lock/unlock logic into separate fastpath and
203 * slowpath functions, to reduce the register pressure on the fastpath.
204 * We also put the fastpath first in the kernel image, to make sure the
205 * branch is predicted by the CPU as default-untaken.
207 static void __sched __mutex_lock_slowpath(struct mutex *lock);
210 * mutex_lock - acquire the mutex
211 * @lock: the mutex to be acquired
213 * Lock the mutex exclusively for this task. If the mutex is not
214 * available right now, it will sleep until it can get it.
216 * The mutex must later on be released by the same task that
217 * acquired it. Recursive locking is not allowed. The task
218 * may not exit without first unlocking the mutex. Also, kernel
219 * memory where the mutex resides must not be freed with
220 * the mutex still locked. The mutex must first be initialized
221 * (or statically defined) before it can be locked. memset()-ing
222 * the mutex to 0 is not allowed.
224 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
225 * checks that will enforce the restrictions and will also do
226 * deadlock debugging. )
228 * This function is similar to (but not equivalent to) down().
230 void __sched mutex_lock(struct mutex *lock)
234 if (!__mutex_trylock_fast(lock))
235 __mutex_lock_slowpath(lock);
237 EXPORT_SYMBOL(mutex_lock);
240 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
241 struct ww_acquire_ctx *ww_ctx)
243 #ifdef CONFIG_DEBUG_MUTEXES
245 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
246 * but released with a normal mutex_unlock in this call.
248 * This should never happen, always use ww_mutex_unlock.
250 DEBUG_LOCKS_WARN_ON(ww->ctx);
253 * Not quite done after calling ww_acquire_done() ?
255 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
257 if (ww_ctx->contending_lock) {
259 * After -EDEADLK you tried to
260 * acquire a different ww_mutex? Bad!
262 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
265 * You called ww_mutex_lock after receiving -EDEADLK,
266 * but 'forgot' to unlock everything else first?
268 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
269 ww_ctx->contending_lock = NULL;
273 * Naughty, using a different class will lead to undefined behavior!
275 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
281 * After acquiring lock with fastpath or when we lost out in contested
282 * slowpath, set ctx and wake up any waiters so they can recheck.
284 static __always_inline void
285 ww_mutex_set_context_fastpath(struct ww_mutex *lock,
286 struct ww_acquire_ctx *ctx)
289 struct mutex_waiter *cur;
291 ww_mutex_lock_acquired(lock, ctx);
296 * The lock->ctx update should be visible on all cores before
297 * the atomic read is done, otherwise contended waiters might be
298 * missed. The contended waiters will either see ww_ctx == NULL
299 * and keep spinning, or it will acquire wait_lock, add itself
300 * to waiter list and sleep.
305 * Check if lock is contended, if not there is nobody to wake up
307 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
311 * Uh oh, we raced in fastpath, wake up everyone in this case,
312 * so they can see the new lock->ctx.
314 spin_lock_mutex(&lock->base.wait_lock, flags);
315 list_for_each_entry(cur, &lock->base.wait_list, list) {
316 debug_mutex_wake_waiter(&lock->base, cur);
317 wake_up_process(cur->task);
319 spin_unlock_mutex(&lock->base.wait_lock, flags);
323 * After acquiring lock in the slowpath set ctx and wake up any
324 * waiters so they can recheck.
326 * Callers must hold the mutex wait_lock.
328 static __always_inline void
329 ww_mutex_set_context_slowpath(struct ww_mutex *lock,
330 struct ww_acquire_ctx *ctx)
332 struct mutex_waiter *cur;
334 ww_mutex_lock_acquired(lock, ctx);
338 * Give any possible sleeping processes the chance to wake up,
339 * so they can recheck if they have to back off.
341 list_for_each_entry(cur, &lock->base.wait_list, list) {
342 debug_mutex_wake_waiter(&lock->base, cur);
343 wake_up_process(cur->task);
347 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
349 * Look out! "owner" is an entirely speculative pointer
350 * access and not reliable.
353 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
358 while (__mutex_owner(lock) == owner) {
360 * Ensure we emit the owner->on_cpu, dereference _after_
361 * checking lock->owner still matches owner. If that fails,
362 * owner might point to freed memory. If it still matches,
363 * the rcu_read_lock() ensures the memory stays valid.
367 if (!owner->on_cpu || need_resched()) {
372 cpu_relax_lowlatency();
380 * Initial check for entering the mutex spinning loop
382 static inline int mutex_can_spin_on_owner(struct mutex *lock)
384 struct task_struct *owner;
391 owner = __mutex_owner(lock);
393 retval = owner->on_cpu;
397 * If lock->owner is not set, the mutex has been released. Return true
398 * such that we'll trylock in the spin path, which is a faster option
399 * than the blocking slow path.
405 * Optimistic spinning.
407 * We try to spin for acquisition when we find that the lock owner
408 * is currently running on a (different) CPU and while we don't
409 * need to reschedule. The rationale is that if the lock owner is
410 * running, it is likely to release the lock soon.
412 * The mutex spinners are queued up using MCS lock so that only one
413 * spinner can compete for the mutex. However, if mutex spinning isn't
414 * going to happen, there is no point in going through the lock/unlock
417 * Returns true when the lock was taken, otherwise false, indicating
418 * that we need to jump to the slowpath and sleep.
420 static bool mutex_optimistic_spin(struct mutex *lock,
421 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
423 struct task_struct *task = current;
425 if (!mutex_can_spin_on_owner(lock))
429 * In order to avoid a stampede of mutex spinners trying to
430 * acquire the mutex all at once, the spinners need to take a
431 * MCS (queued) lock first before spinning on the owner field.
433 if (!osq_lock(&lock->osq))
437 struct task_struct *owner;
439 if (use_ww_ctx && ww_ctx->acquired > 0) {
442 ww = container_of(lock, struct ww_mutex, base);
444 * If ww->ctx is set the contents are undefined, only
445 * by acquiring wait_lock there is a guarantee that
446 * they are not invalid when reading.
448 * As such, when deadlock detection needs to be
449 * performed the optimistic spinning cannot be done.
451 if (READ_ONCE(ww->ctx))
456 * If there's an owner, wait for it to either
457 * release the lock or go to sleep.
459 owner = __mutex_owner(lock);
460 if (owner && !mutex_spin_on_owner(lock, owner))
463 /* Try to acquire the mutex if it is unlocked. */
464 if (__mutex_trylock(lock, false)) {
465 osq_unlock(&lock->osq);
470 * The cpu_relax() call is a compiler barrier which forces
471 * everything in this loop to be re-loaded. We don't need
472 * memory barriers as we'll eventually observe the right
473 * values at the cost of a few extra spins.
475 cpu_relax_lowlatency();
478 osq_unlock(&lock->osq);
481 * If we fell out of the spin path because of need_resched(),
482 * reschedule now, before we try-lock the mutex. This avoids getting
483 * scheduled out right after we obtained the mutex.
485 if (need_resched()) {
487 * We _should_ have TASK_RUNNING here, but just in case
488 * we do not, make it so, otherwise we might get stuck.
490 __set_current_state(TASK_RUNNING);
491 schedule_preempt_disabled();
497 static bool mutex_optimistic_spin(struct mutex *lock,
498 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
504 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
507 * mutex_unlock - release the mutex
508 * @lock: the mutex to be released
510 * Unlock a mutex that has been locked by this task previously.
512 * This function must not be used in interrupt context. Unlocking
513 * of a not locked mutex is not allowed.
515 * This function is similar to (but not equivalent to) up().
517 void __sched mutex_unlock(struct mutex *lock)
519 #ifndef CONFIG_DEBUG_LOCK_ALLOC
520 if (__mutex_unlock_fast(lock))
523 __mutex_unlock_slowpath(lock, _RET_IP_);
525 EXPORT_SYMBOL(mutex_unlock);
528 * ww_mutex_unlock - release the w/w mutex
529 * @lock: the mutex to be released
531 * Unlock a mutex that has been locked by this task previously with any of the
532 * ww_mutex_lock* functions (with or without an acquire context). It is
533 * forbidden to release the locks after releasing the acquire context.
535 * This function must not be used in interrupt context. Unlocking
536 * of a unlocked mutex is not allowed.
538 void __sched ww_mutex_unlock(struct ww_mutex *lock)
541 * The unlocking fastpath is the 0->1 transition from 'locked'
542 * into 'unlocked' state:
545 #ifdef CONFIG_DEBUG_MUTEXES
546 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
548 if (lock->ctx->acquired > 0)
549 lock->ctx->acquired--;
553 mutex_unlock(&lock->base);
555 EXPORT_SYMBOL(ww_mutex_unlock);
557 static inline int __sched
558 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
560 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
561 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
566 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
567 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
568 #ifdef CONFIG_DEBUG_MUTEXES
569 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
570 ctx->contending_lock = ww;
579 * Lock a mutex (possibly interruptible), slowpath:
581 static __always_inline int __sched
582 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
583 struct lockdep_map *nest_lock, unsigned long ip,
584 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
586 struct task_struct *task = current;
587 struct mutex_waiter waiter;
594 ww = container_of(lock, struct ww_mutex, base);
595 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
600 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
602 if (__mutex_trylock(lock, false) ||
603 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
604 /* got the lock, yay! */
605 lock_acquired(&lock->dep_map, ip);
607 ww_mutex_set_context_fastpath(ww, ww_ctx);
612 spin_lock_mutex(&lock->wait_lock, flags);
614 * After waiting to acquire the wait_lock, try again.
616 if (__mutex_trylock(lock, false))
619 debug_mutex_lock_common(lock, &waiter);
620 debug_mutex_add_waiter(lock, &waiter, task);
622 /* add waiting tasks to the end of the waitqueue (FIFO): */
623 list_add_tail(&waiter.list, &lock->wait_list);
626 if (__mutex_waiter_is_first(lock, &waiter))
627 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
629 lock_contended(&lock->dep_map, ip);
631 set_task_state(task, state);
634 * Once we hold wait_lock, we're serialized against
635 * mutex_unlock() handing the lock off to us, do a trylock
636 * before testing the error conditions to make sure we pick up
639 if (__mutex_trylock(lock, first))
643 * Check for signals and wound conditions while holding
644 * wait_lock. This ensures the lock cancellation is ordered
645 * against mutex_unlock() and wake-ups do not go missing.
647 if (unlikely(signal_pending_state(state, task))) {
652 if (use_ww_ctx && ww_ctx->acquired > 0) {
653 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
658 spin_unlock_mutex(&lock->wait_lock, flags);
659 schedule_preempt_disabled();
661 if (!first && __mutex_waiter_is_first(lock, &waiter)) {
663 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
666 set_task_state(task, state);
668 * Here we order against unlock; we must either see it change
669 * state back to RUNNING and fall through the next schedule(),
670 * or we must see its unlock and acquire.
672 if (__mutex_trylock(lock, first))
675 spin_lock_mutex(&lock->wait_lock, flags);
677 spin_lock_mutex(&lock->wait_lock, flags);
679 __set_task_state(task, TASK_RUNNING);
681 mutex_remove_waiter(lock, &waiter, task);
682 if (likely(list_empty(&lock->wait_list)))
683 __mutex_clear_flag(lock, MUTEX_FLAGS);
685 debug_mutex_free_waiter(&waiter);
688 /* got the lock - cleanup and rejoice! */
689 lock_acquired(&lock->dep_map, ip);
692 ww_mutex_set_context_slowpath(ww, ww_ctx);
694 spin_unlock_mutex(&lock->wait_lock, flags);
699 __set_task_state(task, TASK_RUNNING);
700 mutex_remove_waiter(lock, &waiter, task);
701 spin_unlock_mutex(&lock->wait_lock, flags);
702 debug_mutex_free_waiter(&waiter);
703 mutex_release(&lock->dep_map, 1, ip);
708 #ifdef CONFIG_DEBUG_LOCK_ALLOC
710 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
713 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
714 subclass, NULL, _RET_IP_, NULL, 0);
717 EXPORT_SYMBOL_GPL(mutex_lock_nested);
720 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
723 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
724 0, nest, _RET_IP_, NULL, 0);
726 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
729 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
732 return __mutex_lock_common(lock, TASK_KILLABLE,
733 subclass, NULL, _RET_IP_, NULL, 0);
735 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
738 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
741 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
742 subclass, NULL, _RET_IP_, NULL, 0);
744 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
747 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
749 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
752 if (ctx->deadlock_inject_countdown-- == 0) {
753 tmp = ctx->deadlock_inject_interval;
754 if (tmp > UINT_MAX/4)
757 tmp = tmp*2 + tmp + tmp/2;
759 ctx->deadlock_inject_interval = tmp;
760 ctx->deadlock_inject_countdown = tmp;
761 ctx->contending_lock = lock;
763 ww_mutex_unlock(lock);
773 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
778 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
779 0, &ctx->dep_map, _RET_IP_, ctx, 1);
780 if (!ret && ctx->acquired > 1)
781 return ww_mutex_deadlock_injection(lock, ctx);
785 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
788 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
793 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
794 0, &ctx->dep_map, _RET_IP_, ctx, 1);
796 if (!ret && ctx->acquired > 1)
797 return ww_mutex_deadlock_injection(lock, ctx);
801 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
806 * Release the lock, slowpath:
808 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
810 struct task_struct *next = NULL;
811 unsigned long owner, flags;
814 mutex_release(&lock->dep_map, 1, ip);
817 * Release the lock before (potentially) taking the spinlock such that
818 * other contenders can get on with things ASAP.
820 * Except when HANDOFF, in that case we must not clear the owner field,
821 * but instead set it to the top waiter.
823 owner = atomic_long_read(&lock->owner);
827 #ifdef CONFIG_DEBUG_MUTEXES
828 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
831 if (owner & MUTEX_FLAG_HANDOFF)
834 old = atomic_long_cmpxchg_release(&lock->owner, owner,
835 __owner_flags(owner));
837 if (owner & MUTEX_FLAG_WAITERS)
846 spin_lock_mutex(&lock->wait_lock, flags);
847 debug_mutex_unlock(lock);
848 if (!list_empty(&lock->wait_list)) {
849 /* get the first entry from the wait-list: */
850 struct mutex_waiter *waiter =
851 list_first_entry(&lock->wait_list,
852 struct mutex_waiter, list);
856 debug_mutex_wake_waiter(lock, waiter);
857 wake_q_add(&wake_q, next);
860 if (owner & MUTEX_FLAG_HANDOFF)
861 __mutex_handoff(lock, next);
863 spin_unlock_mutex(&lock->wait_lock, flags);
868 #ifndef CONFIG_DEBUG_LOCK_ALLOC
870 * Here come the less common (and hence less performance-critical) APIs:
871 * mutex_lock_interruptible() and mutex_trylock().
873 static noinline int __sched
874 __mutex_lock_killable_slowpath(struct mutex *lock);
876 static noinline int __sched
877 __mutex_lock_interruptible_slowpath(struct mutex *lock);
880 * mutex_lock_interruptible - acquire the mutex, interruptible
881 * @lock: the mutex to be acquired
883 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
884 * been acquired or sleep until the mutex becomes available. If a
885 * signal arrives while waiting for the lock then this function
888 * This function is similar to (but not equivalent to) down_interruptible().
890 int __sched mutex_lock_interruptible(struct mutex *lock)
894 if (__mutex_trylock_fast(lock))
897 return __mutex_lock_interruptible_slowpath(lock);
900 EXPORT_SYMBOL(mutex_lock_interruptible);
902 int __sched mutex_lock_killable(struct mutex *lock)
906 if (__mutex_trylock_fast(lock))
909 return __mutex_lock_killable_slowpath(lock);
911 EXPORT_SYMBOL(mutex_lock_killable);
913 static noinline void __sched
914 __mutex_lock_slowpath(struct mutex *lock)
916 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
917 NULL, _RET_IP_, NULL, 0);
920 static noinline int __sched
921 __mutex_lock_killable_slowpath(struct mutex *lock)
923 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
924 NULL, _RET_IP_, NULL, 0);
927 static noinline int __sched
928 __mutex_lock_interruptible_slowpath(struct mutex *lock)
930 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
931 NULL, _RET_IP_, NULL, 0);
934 static noinline int __sched
935 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
937 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
938 NULL, _RET_IP_, ctx, 1);
941 static noinline int __sched
942 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
943 struct ww_acquire_ctx *ctx)
945 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
946 NULL, _RET_IP_, ctx, 1);
952 * mutex_trylock - try to acquire the mutex, without waiting
953 * @lock: the mutex to be acquired
955 * Try to acquire the mutex atomically. Returns 1 if the mutex
956 * has been acquired successfully, and 0 on contention.
958 * NOTE: this function follows the spin_trylock() convention, so
959 * it is negated from the down_trylock() return values! Be careful
960 * about this when converting semaphore users to mutexes.
962 * This function must not be used in interrupt context. The
963 * mutex must be released by the same task that acquired it.
965 int __sched mutex_trylock(struct mutex *lock)
967 bool locked = __mutex_trylock(lock, false);
970 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
974 EXPORT_SYMBOL(mutex_trylock);
976 #ifndef CONFIG_DEBUG_LOCK_ALLOC
978 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
982 if (__mutex_trylock_fast(&lock->base)) {
983 ww_mutex_set_context_fastpath(lock, ctx);
987 return __ww_mutex_lock_slowpath(lock, ctx);
989 EXPORT_SYMBOL(__ww_mutex_lock);
992 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
996 if (__mutex_trylock_fast(&lock->base)) {
997 ww_mutex_set_context_fastpath(lock, ctx);
1001 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1003 EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
1008 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1009 * @cnt: the atomic which we are to dec
1010 * @lock: the mutex to return holding if we dec to 0
1012 * return true and hold lock if we dec to 0, return false otherwise
1014 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1016 /* dec if we can't possibly hit 0 */
1017 if (atomic_add_unless(cnt, -1, 1))
1019 /* we might hit 0, so take the lock */
1021 if (!atomic_dec_and_test(cnt)) {
1022 /* when we actually did the dec, we didn't hit 0 */
1026 /* we hit 0, and we hold the lock */
1029 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);