]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/locking/mutex.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / kernel / locking / mutex.c
index 51e2ba557e2a0445db36cac901d42c4727dcdacf..ad2d9e22697b92125a643efd049bd2d3e7a54352 100644 (file)
@@ -241,8 +241,8 @@ void __sched mutex_lock(struct mutex *lock)
 EXPORT_SYMBOL(mutex_lock);
 #endif
 
-static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
-                                                  struct ww_acquire_ctx *ww_ctx)
+static __always_inline void
+ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
 {
 #ifdef CONFIG_DEBUG_MUTEXES
        /*
@@ -323,11 +323,8 @@ __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
  * slowpath, set ctx and wake up any waiters so they can recheck.
  */
 static __always_inline void
-ww_mutex_set_context_fastpath(struct ww_mutex *lock,
-                              struct ww_acquire_ctx *ctx)
+ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-       unsigned long flags;
-
        ww_mutex_lock_acquired(lock, ctx);
 
        lock->ctx = ctx;
@@ -351,9 +348,9 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
         * Uh oh, we raced in fastpath, wake up everyone in this case,
         * so they can see the new lock->ctx.
         */
-       spin_lock_mutex(&lock->base.wait_lock, flags);
+       spin_lock(&lock->base.wait_lock);
        __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
-       spin_unlock_mutex(&lock->base.wait_lock, flags);
+       spin_unlock(&lock->base.wait_lock);
 }
 
 /*
@@ -365,20 +362,65 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
  * Callers must hold the mutex wait_lock.
  */
 static __always_inline void
-ww_mutex_set_context_slowpath(struct ww_mutex *lock,
-                             struct ww_acquire_ctx *ctx)
+ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        ww_mutex_lock_acquired(lock, ctx);
        lock->ctx = ctx;
 }
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+
+static inline
+bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                           struct mutex_waiter *waiter)
+{
+       struct ww_mutex *ww;
+
+       ww = container_of(lock, struct ww_mutex, base);
+
+       /*
+        * If ww->ctx is set the contents are undefined, only
+        * by acquiring wait_lock there is a guarantee that
+        * they are not invalid when reading.
+        *
+        * As such, when deadlock detection needs to be
+        * performed the optimistic spinning cannot be done.
+        *
+        * Check this in every inner iteration because we may
+        * be racing against another thread's ww_mutex_lock.
+        */
+       if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
+               return false;
+
+       /*
+        * If we aren't on the wait list yet, cancel the spin
+        * if there are waiters. We want  to avoid stealing the
+        * lock from a waiter with an earlier stamp, since the
+        * other thread may already own a lock that we also
+        * need.
+        */
+       if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
+               return false;
+
+       /*
+        * Similarly, stop spinning if we are no longer the
+        * first waiter.
+        */
+       if (waiter && !__mutex_waiter_is_first(lock, waiter))
+               return false;
+
+       return true;
+}
+
 /*
- * Look out! "owner" is an entirely speculative pointer
- * access and not reliable.
+ * Look out! "owner" is an entirely speculative pointer access and not
+ * reliable.
+ *
+ * "noinline" so that this function shows up on perf profiles.
  */
 static noinline
-bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
+bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
+                        struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
 {
        bool ret = true;
 
@@ -401,6 +443,11 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
                        break;
                }
 
+               if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
+                       ret = false;
+                       break;
+               }
+
                cpu_relax();
        }
        rcu_read_unlock();
@@ -459,9 +506,9 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
  * with the spinner at the head of the OSQ, if present, until the owner is
  * changed to itself.
  */
-static bool mutex_optimistic_spin(struct mutex *lock,
-                                 struct ww_acquire_ctx *ww_ctx,
-                                 const bool use_ww_ctx, const bool waiter)
+static __always_inline bool
+mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                     const bool use_ww_ctx, struct mutex_waiter *waiter)
 {
        if (!waiter) {
                /*
@@ -486,22 +533,6 @@ static bool mutex_optimistic_spin(struct mutex *lock,
        for (;;) {
                struct task_struct *owner;
 
-               if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
-                       struct ww_mutex *ww;
-
-                       ww = container_of(lock, struct ww_mutex, base);
-                       /*
-                        * If ww->ctx is set the contents are undefined, only
-                        * by acquiring wait_lock there is a guarantee that
-                        * they are not invalid when reading.
-                        *
-                        * As such, when deadlock detection needs to be
-                        * performed the optimistic spinning cannot be done.
-                        */
-                       if (READ_ONCE(ww->ctx))
-                               goto fail_unlock;
-               }
-
                /* Try to acquire the mutex... */
                owner = __mutex_trylock_or_owner(lock);
                if (!owner)
@@ -511,7 +542,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
                 * There's an owner, wait for it to either
                 * release the lock or go to sleep.
                 */
-               if (!mutex_spin_on_owner(lock, owner))
+               if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
                        goto fail_unlock;
 
                /*
@@ -551,9 +582,9 @@ static bool mutex_optimistic_spin(struct mutex *lock,
        return false;
 }
 #else
-static bool mutex_optimistic_spin(struct mutex *lock,
-                                 struct ww_acquire_ctx *ww_ctx,
-                                 const bool use_ww_ctx, const bool waiter)
+static __always_inline bool
+mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                     const bool use_ww_ctx, struct mutex_waiter *waiter)
 {
        return false;
 }
@@ -707,13 +738,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
        struct mutex_waiter waiter;
-       unsigned long flags;
        bool first = false;
        struct ww_mutex *ww;
        int ret;
 
-       ww = container_of(lock, struct ww_mutex, base);
+       might_sleep();
 
+       ww = container_of(lock, struct ww_mutex, base);
        if (use_ww_ctx && ww_ctx) {
                if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
                        return -EALREADY;
@@ -723,7 +754,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
        if (__mutex_trylock(lock) ||
-           mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
+           mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
                /* got the lock, yay! */
                lock_acquired(&lock->dep_map, ip);
                if (use_ww_ctx && ww_ctx)
@@ -732,7 +763,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                return 0;
        }
 
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
        /*
         * After waiting to acquire the wait_lock, try again.
         */
@@ -751,6 +782,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        if (!use_ww_ctx) {
                /* add waiting tasks to the end of the waitqueue (FIFO): */
                list_add_tail(&waiter.list, &lock->wait_list);
+
+#ifdef CONFIG_DEBUG_MUTEXES
+               waiter.ww_ctx = MUTEX_POISON_WW_CTX;
+#endif
        } else {
                /* Add in stamp order, waking up waiters that must back off. */
                ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
@@ -792,7 +827,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                                goto err;
                }
 
-               spin_unlock_mutex(&lock->wait_lock, flags);
+               spin_unlock(&lock->wait_lock);
                schedule_preempt_disabled();
 
                /*
@@ -812,12 +847,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * or we must see its unlock and acquire.
                 */
                if (__mutex_trylock(lock) ||
-                   (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)))
+                   (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
                        break;
 
-               spin_lock_mutex(&lock->wait_lock, flags);
+               spin_lock(&lock->wait_lock);
        }
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
 acquired:
        __set_current_state(TASK_RUNNING);
 
@@ -834,7 +869,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        if (use_ww_ctx && ww_ctx)
                ww_mutex_set_context_slowpath(ww, ww_ctx);
 
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
        preempt_enable();
        return 0;
 
@@ -842,20 +877,33 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        __set_current_state(TASK_RUNNING);
        mutex_remove_waiter(lock, &waiter, current);
 err_early_backoff:
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, 1, ip);
        preempt_enable();
        return ret;
 }
 
+static int __sched
+__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
+            struct lockdep_map *nest_lock, unsigned long ip)
+{
+       return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
+}
+
+static int __sched
+__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
+               struct lockdep_map *nest_lock, unsigned long ip,
+               struct ww_acquire_ctx *ww_ctx)
+{
+       return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
+}
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 void __sched
 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 {
-       might_sleep();
-       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
-                           subclass, NULL, _RET_IP_, NULL, 0);
+       __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
 }
 
 EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -863,30 +911,38 @@ EXPORT_SYMBOL_GPL(mutex_lock_nested);
 void __sched
 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
 {
-       might_sleep();
-       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
-                           0, nest, _RET_IP_, NULL, 0);
+       __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
 }
 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
 
 int __sched
 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
 {
-       might_sleep();
-       return __mutex_lock_common(lock, TASK_KILLABLE,
-                                  subclass, NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
 }
 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
 
 int __sched
 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 {
-       might_sleep();
-       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
-                                  subclass, NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
 }
 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
 
+void __sched
+mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
+{
+       int token;
+
+       might_sleep();
+
+       token = io_schedule_prepare();
+       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
+                           subclass, NULL, _RET_IP_, NULL, 0);
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
+
 static inline int
 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
@@ -919,9 +975,9 @@ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
        int ret;
 
        might_sleep();
-       ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
-                                  0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
-                                  ctx, 1);
+       ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
+                              0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
+                              ctx);
        if (!ret && ctx && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
 
@@ -935,9 +991,9 @@ ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
        int ret;
 
        might_sleep();
-       ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
-                                 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
-                                 ctx, 1);
+       ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
+                             0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
+                             ctx);
 
        if (!ret && ctx && ctx->acquired > 1)
                return ww_mutex_deadlock_injection(lock, ctx);
@@ -954,8 +1010,8 @@ EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
 {
        struct task_struct *next = NULL;
-       unsigned long owner, flags;
        DEFINE_WAKE_Q(wake_q);
+       unsigned long owner;
 
        mutex_release(&lock->dep_map, 1, ip);
 
@@ -990,7 +1046,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
                owner = old;
        }
 
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
        debug_mutex_unlock(lock);
        if (!list_empty(&lock->wait_list)) {
                /* get the first entry from the wait-list: */
@@ -1007,7 +1063,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
        if (owner & MUTEX_FLAG_HANDOFF)
                __mutex_handoff(lock, next);
 
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
 
        wake_up_q(&wake_q);
 }
@@ -1057,40 +1113,47 @@ int __sched mutex_lock_killable(struct mutex *lock)
 }
 EXPORT_SYMBOL(mutex_lock_killable);
 
+void __sched mutex_lock_io(struct mutex *lock)
+{
+       int token;
+
+       token = io_schedule_prepare();
+       mutex_lock(lock);
+       io_schedule_finish(token);
+}
+EXPORT_SYMBOL_GPL(mutex_lock_io);
+
 static noinline void __sched
 __mutex_lock_slowpath(struct mutex *lock)
 {
-       __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
-                           NULL, _RET_IP_, NULL, 0);
+       __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
 }
 
 static noinline int __sched
 __mutex_lock_killable_slowpath(struct mutex *lock)
 {
-       return __mutex_lock_common(lock, TASK_KILLABLE, 0,
-                                  NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
 }
 
 static noinline int __sched
 __mutex_lock_interruptible_slowpath(struct mutex *lock)
 {
-       return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, NULL, 0);
+       return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
 }
 
 static noinline int __sched
 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-       return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, ctx, 1);
+       return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
+                              _RET_IP_, ctx);
 }
 
 static noinline int __sched
 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
                                            struct ww_acquire_ctx *ctx)
 {
-       return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
-                                  NULL, _RET_IP_, ctx, 1);
+       return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
+                              _RET_IP_, ctx);
 }
 
 #endif