2 * kernel/locking/mutex.c
4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched.h>
23 #include <linux/sched/rt.h>
24 #include <linux/export.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/debug_locks.h>
30 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
31 * which forces all calls into the slowpath:
33 #ifdef CONFIG_DEBUG_MUTEXES
34 # include "mutex-debug.h"
35 # include <asm-generic/mutex-null.h>
38 # include <asm/mutex.h>
42 * A negative mutex count indicates that waiters are sleeping waiting for the
45 #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
48 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
50 atomic_set(&lock->count, 1);
51 spin_lock_init(&lock->wait_lock);
52 INIT_LIST_HEAD(&lock->wait_list);
53 mutex_clear_owner(lock);
54 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
55 lock->spin_mlock = NULL;
58 debug_mutex_init(lock, name, key);
61 EXPORT_SYMBOL(__mutex_init);
63 #ifndef CONFIG_DEBUG_LOCK_ALLOC
65 * We split the mutex lock/unlock logic into separate fastpath and
66 * slowpath functions, to reduce the register pressure on the fastpath.
67 * We also put the fastpath first in the kernel image, to make sure the
68 * branch is predicted by the CPU as default-untaken.
70 static __used noinline void __sched
71 __mutex_lock_slowpath(atomic_t *lock_count);
74 * mutex_lock - acquire the mutex
75 * @lock: the mutex to be acquired
77 * Lock the mutex exclusively for this task. If the mutex is not
78 * available right now, it will sleep until it can get it.
80 * The mutex must later on be released by the same task that
81 * acquired it. Recursive locking is not allowed. The task
82 * may not exit without first unlocking the mutex. Also, kernel
83 * memory where the mutex resides mutex must not be freed with
84 * the mutex still locked. The mutex must first be initialized
85 * (or statically defined) before it can be locked. memset()-ing
86 * the mutex to 0 is not allowed.
88 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
89 * checks that will enforce the restrictions and will also do
90 * deadlock debugging. )
92 * This function is similar to (but not equivalent to) down().
94 void __sched mutex_lock(struct mutex *lock)
98 * The locking fastpath is the 1->0 transition from
99 * 'unlocked' into 'locked' state.
101 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
102 mutex_set_owner(lock);
105 EXPORT_SYMBOL(mutex_lock);
108 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
110 * In order to avoid a stampede of mutex spinners from acquiring the mutex
111 * more or less simultaneously, the spinners need to acquire a MCS lock
112 * first before spinning on the owner field.
114 * We don't inline mspin_lock() so that perf can correctly account for the
115 * time spent in this lock function.
118 struct mspin_node *next ;
119 int locked; /* 1 if lock acquired */
121 #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
124 void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
126 struct mspin_node *prev;
132 prev = xchg(lock, node);
133 if (likely(prev == NULL)) {
138 ACCESS_ONCE(prev->next) = node;
140 * Wait until the lock holder passes the lock down.
141 * Using smp_load_acquire() provides a memory barrier that
142 * ensures subsequent operations happen after the lock is acquired.
144 while (!(smp_load_acquire(&node->locked)))
145 arch_mutex_cpu_relax();
148 static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
150 struct mspin_node *next = ACCESS_ONCE(node->next);
154 * Release the lock by setting it to NULL
156 if (cmpxchg(lock, node, NULL) == node)
158 /* Wait until the next pointer is set */
159 while (!(next = ACCESS_ONCE(node->next)))
160 arch_mutex_cpu_relax();
163 * Pass lock to next waiter.
164 * smp_store_release() provides a memory barrier to ensure
165 * all operations in the critical section has been completed
168 smp_store_release(&next->locked, 1);
172 * Mutex spinning code migrated from kernel/sched/core.c
175 static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
177 if (lock->owner != owner)
181 * Ensure we emit the owner->on_cpu, dereference _after_ checking
182 * lock->owner still matches owner, if that fails, owner might
183 * point to free()d memory, if it still matches, the rcu_read_lock()
184 * ensures the memory stays valid.
188 return owner->on_cpu;
192 * Look out! "owner" is an entirely speculative pointer
193 * access and not reliable.
196 int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
199 while (owner_running(lock, owner)) {
203 arch_mutex_cpu_relax();
208 * We break out the loop above on need_resched() and when the
209 * owner changed, which is a sign for heavy contention. Return
210 * success only when lock->owner is NULL.
212 return lock->owner == NULL;
216 * Initial check for entering the mutex spinning loop
218 static inline int mutex_can_spin_on_owner(struct mutex *lock)
220 struct task_struct *owner;
224 owner = ACCESS_ONCE(lock->owner);
226 retval = owner->on_cpu;
229 * if lock->owner is not set, the mutex owner may have just acquired
230 * it and not set the owner yet or the mutex has been released.
236 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
239 * mutex_unlock - release the mutex
240 * @lock: the mutex to be released
242 * Unlock a mutex that has been locked by this task previously.
244 * This function must not be used in interrupt context. Unlocking
245 * of a not locked mutex is not allowed.
247 * This function is similar to (but not equivalent to) up().
249 void __sched mutex_unlock(struct mutex *lock)
252 * The unlocking fastpath is the 0->1 transition from 'locked'
253 * into 'unlocked' state:
255 #ifndef CONFIG_DEBUG_MUTEXES
257 * When debugging is enabled we must not clear the owner before time,
258 * the slow path will always be taken, and that clears the owner field
259 * after verifying that it was indeed current.
261 mutex_clear_owner(lock);
263 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
266 EXPORT_SYMBOL(mutex_unlock);
269 * ww_mutex_unlock - release the w/w mutex
270 * @lock: the mutex to be released
272 * Unlock a mutex that has been locked by this task previously with any of the
273 * ww_mutex_lock* functions (with or without an acquire context). It is
274 * forbidden to release the locks after releasing the acquire context.
276 * This function must not be used in interrupt context. Unlocking
277 * of a unlocked mutex is not allowed.
279 void __sched ww_mutex_unlock(struct ww_mutex *lock)
282 * The unlocking fastpath is the 0->1 transition from 'locked'
283 * into 'unlocked' state:
286 #ifdef CONFIG_DEBUG_MUTEXES
287 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
289 if (lock->ctx->acquired > 0)
290 lock->ctx->acquired--;
294 #ifndef CONFIG_DEBUG_MUTEXES
296 * When debugging is enabled we must not clear the owner before time,
297 * the slow path will always be taken, and that clears the owner field
298 * after verifying that it was indeed current.
300 mutex_clear_owner(&lock->base);
302 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
304 EXPORT_SYMBOL(ww_mutex_unlock);
306 static inline int __sched
307 __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
309 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
310 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
315 if (unlikely(ctx == hold_ctx))
318 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
319 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
320 #ifdef CONFIG_DEBUG_MUTEXES
321 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
322 ctx->contending_lock = ww;
330 static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
331 struct ww_acquire_ctx *ww_ctx)
333 #ifdef CONFIG_DEBUG_MUTEXES
335 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
336 * but released with a normal mutex_unlock in this call.
338 * This should never happen, always use ww_mutex_unlock.
340 DEBUG_LOCKS_WARN_ON(ww->ctx);
343 * Not quite done after calling ww_acquire_done() ?
345 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
347 if (ww_ctx->contending_lock) {
349 * After -EDEADLK you tried to
350 * acquire a different ww_mutex? Bad!
352 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
355 * You called ww_mutex_lock after receiving -EDEADLK,
356 * but 'forgot' to unlock everything else first?
358 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
359 ww_ctx->contending_lock = NULL;
363 * Naughty, using a different class will lead to undefined behavior!
365 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
371 * after acquiring lock with fastpath or when we lost out in contested
372 * slowpath, set ctx and wake up any waiters so they can recheck.
374 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
375 * as the fastpath and opportunistic spinning are disabled in that case.
377 static __always_inline void
378 ww_mutex_set_context_fastpath(struct ww_mutex *lock,
379 struct ww_acquire_ctx *ctx)
382 struct mutex_waiter *cur;
384 ww_mutex_lock_acquired(lock, ctx);
389 * The lock->ctx update should be visible on all cores before
390 * the atomic read is done, otherwise contended waiters might be
391 * missed. The contended waiters will either see ww_ctx == NULL
392 * and keep spinning, or it will acquire wait_lock, add itself
393 * to waiter list and sleep.
398 * Check if lock is contended, if not there is nobody to wake up
400 if (likely(atomic_read(&lock->base.count) == 0))
404 * Uh oh, we raced in fastpath, wake up everyone in this case,
405 * so they can see the new lock->ctx.
407 spin_lock_mutex(&lock->base.wait_lock, flags);
408 list_for_each_entry(cur, &lock->base.wait_list, list) {
409 debug_mutex_wake_waiter(&lock->base, cur);
410 wake_up_process(cur->task);
412 spin_unlock_mutex(&lock->base.wait_lock, flags);
416 * Lock a mutex (possibly interruptible), slowpath:
418 static __always_inline int __sched
419 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
420 struct lockdep_map *nest_lock, unsigned long ip,
421 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
423 struct task_struct *task = current;
424 struct mutex_waiter waiter;
429 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
431 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
433 * Optimistic spinning.
435 * We try to spin for acquisition when we find that there are no
436 * pending waiters and the lock owner is currently running on a
439 * The rationale is that if the lock owner is running, it is likely to
440 * release the lock soon.
442 * Since this needs the lock owner, and this mutex implementation
443 * doesn't track the owner atomically in the lock field, we need to
444 * track it non-atomically.
446 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
447 * to serialize everything.
449 * The mutex spinners are queued up using MCS lock so that only one
450 * spinner can compete for the mutex. However, if mutex spinning isn't
451 * going to happen, there is no point in going through the lock/unlock
454 if (!mutex_can_spin_on_owner(lock))
458 struct task_struct *owner;
459 struct mspin_node node;
461 if (use_ww_ctx && ww_ctx->acquired > 0) {
464 ww = container_of(lock, struct ww_mutex, base);
466 * If ww->ctx is set the contents are undefined, only
467 * by acquiring wait_lock there is a guarantee that
468 * they are not invalid when reading.
470 * As such, when deadlock detection needs to be
471 * performed the optimistic spinning cannot be done.
473 if (ACCESS_ONCE(ww->ctx))
478 * If there's an owner, wait for it to either
479 * release the lock or go to sleep.
481 mspin_lock(MLOCK(lock), &node);
482 owner = ACCESS_ONCE(lock->owner);
483 if (owner && !mutex_spin_on_owner(lock, owner)) {
484 mspin_unlock(MLOCK(lock), &node);
488 if ((atomic_read(&lock->count) == 1) &&
489 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
490 lock_acquired(&lock->dep_map, ip);
493 ww = container_of(lock, struct ww_mutex, base);
495 ww_mutex_set_context_fastpath(ww, ww_ctx);
498 mutex_set_owner(lock);
499 mspin_unlock(MLOCK(lock), &node);
503 mspin_unlock(MLOCK(lock), &node);
506 * When there's no owner, we might have preempted between the
507 * owner acquiring the lock and setting the owner field. If
508 * we're an RT task that will live-lock because we won't let
509 * the owner complete.
511 if (!owner && (need_resched() || rt_task(task)))
515 * The cpu_relax() call is a compiler barrier which forces
516 * everything in this loop to be re-loaded. We don't need
517 * memory barriers as we'll eventually observe the right
518 * values at the cost of a few extra spins.
520 arch_mutex_cpu_relax();
524 spin_lock_mutex(&lock->wait_lock, flags);
526 /* once more, can we acquire the lock? */
527 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
530 debug_mutex_lock_common(lock, &waiter);
531 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
533 /* add waiting tasks to the end of the waitqueue (FIFO): */
534 list_add_tail(&waiter.list, &lock->wait_list);
537 lock_contended(&lock->dep_map, ip);
541 * Lets try to take the lock again - this is needed even if
542 * we get here for the first time (shortly after failing to
543 * acquire the lock), to make sure that we get a wakeup once
544 * it's unlocked. Later on, if we sleep, this is the
545 * operation that gives us the lock. We xchg it to -1, so
546 * that when we release the lock, we properly wake up the
549 if (MUTEX_SHOW_NO_WAITER(lock) &&
550 (atomic_xchg(&lock->count, -1) == 1))
554 * got a signal? (This code gets eliminated in the
555 * TASK_UNINTERRUPTIBLE case.)
557 if (unlikely(signal_pending_state(state, task))) {
562 if (use_ww_ctx && ww_ctx->acquired > 0) {
563 ret = __mutex_lock_check_stamp(lock, ww_ctx);
568 __set_task_state(task, state);
570 /* didn't get the lock, go to sleep: */
571 spin_unlock_mutex(&lock->wait_lock, flags);
572 schedule_preempt_disabled();
573 spin_lock_mutex(&lock->wait_lock, flags);
575 mutex_remove_waiter(lock, &waiter, current_thread_info());
576 /* set it to 0 if there are no waiters left: */
577 if (likely(list_empty(&lock->wait_list)))
578 atomic_set(&lock->count, 0);
579 debug_mutex_free_waiter(&waiter);
582 /* got the lock - cleanup and rejoice! */
583 lock_acquired(&lock->dep_map, ip);
584 mutex_set_owner(lock);
587 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
588 struct mutex_waiter *cur;
591 * This branch gets optimized out for the common case,
592 * and is only important for ww_mutex_lock.
594 ww_mutex_lock_acquired(ww, ww_ctx);
598 * Give any possible sleeping processes the chance to wake up,
599 * so they can recheck if they have to back off.
601 list_for_each_entry(cur, &lock->wait_list, list) {
602 debug_mutex_wake_waiter(lock, cur);
603 wake_up_process(cur->task);
607 spin_unlock_mutex(&lock->wait_lock, flags);
612 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
613 spin_unlock_mutex(&lock->wait_lock, flags);
614 debug_mutex_free_waiter(&waiter);
615 mutex_release(&lock->dep_map, 1, ip);
620 #ifdef CONFIG_DEBUG_LOCK_ALLOC
622 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
625 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
626 subclass, NULL, _RET_IP_, NULL, 0);
629 EXPORT_SYMBOL_GPL(mutex_lock_nested);
632 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
635 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
636 0, nest, _RET_IP_, NULL, 0);
639 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
642 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
645 return __mutex_lock_common(lock, TASK_KILLABLE,
646 subclass, NULL, _RET_IP_, NULL, 0);
648 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
651 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
654 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
655 subclass, NULL, _RET_IP_, NULL, 0);
658 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
661 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
663 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
666 if (ctx->deadlock_inject_countdown-- == 0) {
667 tmp = ctx->deadlock_inject_interval;
668 if (tmp > UINT_MAX/4)
671 tmp = tmp*2 + tmp + tmp/2;
673 ctx->deadlock_inject_interval = tmp;
674 ctx->deadlock_inject_countdown = tmp;
675 ctx->contending_lock = lock;
677 ww_mutex_unlock(lock);
687 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
692 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
693 0, &ctx->dep_map, _RET_IP_, ctx, 1);
694 if (!ret && ctx->acquired > 1)
695 return ww_mutex_deadlock_injection(lock, ctx);
699 EXPORT_SYMBOL_GPL(__ww_mutex_lock);
702 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
707 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
708 0, &ctx->dep_map, _RET_IP_, ctx, 1);
710 if (!ret && ctx->acquired > 1)
711 return ww_mutex_deadlock_injection(lock, ctx);
715 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
720 * Release the lock, slowpath:
723 __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
725 struct mutex *lock = container_of(lock_count, struct mutex, count);
728 spin_lock_mutex(&lock->wait_lock, flags);
729 mutex_release(&lock->dep_map, nested, _RET_IP_);
730 debug_mutex_unlock(lock);
733 * some architectures leave the lock unlocked in the fastpath failure
734 * case, others need to leave it locked. In the later case we have to
737 if (__mutex_slowpath_needs_to_unlock())
738 atomic_set(&lock->count, 1);
740 if (!list_empty(&lock->wait_list)) {
741 /* get the first entry from the wait-list: */
742 struct mutex_waiter *waiter =
743 list_entry(lock->wait_list.next,
744 struct mutex_waiter, list);
746 debug_mutex_wake_waiter(lock, waiter);
748 wake_up_process(waiter->task);
751 spin_unlock_mutex(&lock->wait_lock, flags);
755 * Release the lock, slowpath:
757 static __used noinline void
758 __mutex_unlock_slowpath(atomic_t *lock_count)
760 __mutex_unlock_common_slowpath(lock_count, 1);
763 #ifndef CONFIG_DEBUG_LOCK_ALLOC
765 * Here come the less common (and hence less performance-critical) APIs:
766 * mutex_lock_interruptible() and mutex_trylock().
768 static noinline int __sched
769 __mutex_lock_killable_slowpath(struct mutex *lock);
771 static noinline int __sched
772 __mutex_lock_interruptible_slowpath(struct mutex *lock);
775 * mutex_lock_interruptible - acquire the mutex, interruptible
776 * @lock: the mutex to be acquired
778 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
779 * been acquired or sleep until the mutex becomes available. If a
780 * signal arrives while waiting for the lock then this function
783 * This function is similar to (but not equivalent to) down_interruptible().
785 int __sched mutex_lock_interruptible(struct mutex *lock)
790 ret = __mutex_fastpath_lock_retval(&lock->count);
792 mutex_set_owner(lock);
795 return __mutex_lock_interruptible_slowpath(lock);
798 EXPORT_SYMBOL(mutex_lock_interruptible);
800 int __sched mutex_lock_killable(struct mutex *lock)
805 ret = __mutex_fastpath_lock_retval(&lock->count);
807 mutex_set_owner(lock);
810 return __mutex_lock_killable_slowpath(lock);
812 EXPORT_SYMBOL(mutex_lock_killable);
814 static __used noinline void __sched
815 __mutex_lock_slowpath(atomic_t *lock_count)
817 struct mutex *lock = container_of(lock_count, struct mutex, count);
819 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
820 NULL, _RET_IP_, NULL, 0);
823 static noinline int __sched
824 __mutex_lock_killable_slowpath(struct mutex *lock)
826 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
827 NULL, _RET_IP_, NULL, 0);
830 static noinline int __sched
831 __mutex_lock_interruptible_slowpath(struct mutex *lock)
833 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
834 NULL, _RET_IP_, NULL, 0);
837 static noinline int __sched
838 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
840 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
841 NULL, _RET_IP_, ctx, 1);
844 static noinline int __sched
845 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
846 struct ww_acquire_ctx *ctx)
848 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
849 NULL, _RET_IP_, ctx, 1);
855 * Spinlock based trylock, we take the spinlock and check whether we
858 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
860 struct mutex *lock = container_of(lock_count, struct mutex, count);
864 spin_lock_mutex(&lock->wait_lock, flags);
866 prev = atomic_xchg(&lock->count, -1);
867 if (likely(prev == 1)) {
868 mutex_set_owner(lock);
869 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
872 /* Set it back to 0 if there are no waiters: */
873 if (likely(list_empty(&lock->wait_list)))
874 atomic_set(&lock->count, 0);
876 spin_unlock_mutex(&lock->wait_lock, flags);
882 * mutex_trylock - try to acquire the mutex, without waiting
883 * @lock: the mutex to be acquired
885 * Try to acquire the mutex atomically. Returns 1 if the mutex
886 * has been acquired successfully, and 0 on contention.
888 * NOTE: this function follows the spin_trylock() convention, so
889 * it is negated from the down_trylock() return values! Be careful
890 * about this when converting semaphore users to mutexes.
892 * This function must not be used in interrupt context. The
893 * mutex must be released by the same task that acquired it.
895 int __sched mutex_trylock(struct mutex *lock)
899 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
901 mutex_set_owner(lock);
905 EXPORT_SYMBOL(mutex_trylock);
907 #ifndef CONFIG_DEBUG_LOCK_ALLOC
909 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
915 ret = __mutex_fastpath_lock_retval(&lock->base.count);
918 ww_mutex_set_context_fastpath(lock, ctx);
919 mutex_set_owner(&lock->base);
921 ret = __ww_mutex_lock_slowpath(lock, ctx);
924 EXPORT_SYMBOL(__ww_mutex_lock);
927 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
933 ret = __mutex_fastpath_lock_retval(&lock->base.count);
936 ww_mutex_set_context_fastpath(lock, ctx);
937 mutex_set_owner(&lock->base);
939 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
942 EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
947 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
948 * @cnt: the atomic which we are to dec
949 * @lock: the mutex to return holding if we dec to 0
951 * return true and hold lock if we dec to 0, return false otherwise
953 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
955 /* dec if we can't possibly hit 0 */
956 if (atomic_add_unless(cnt, -1, 1))
958 /* we might hit 0, so take the lock */
960 if (!atomic_dec_and_test(cnt)) {
961 /* when we actually did the dec, we didn't hit 0 */
965 /* we hit 0, and we hold the lock */
968 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);