]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
locking/mutex: Fix lockdep_assert_held() fail
authorPeter Zijlstra <peterz@infradead.org>
Tue, 17 Jan 2017 15:06:09 +0000 (16:06 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 30 Jan 2017 10:42:59 +0000 (11:42 +0100)
In commit:

  659cf9f5824a ("locking/ww_mutex: Optimize ww-mutexes by waking at most one waiter for backoff when acquiring the lock")

I replaced a comment with a lockdep_assert_held(). However it turns out
we hide that lock from lockdep for hysterical raisins, which results
in the assertion always firing.

Remove the old debug code as lockdep will easily spot the abuse it was
meant to catch, which will make the lock visible to lockdep and make
the assertion work as intended.

Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicolai Haehnle <Nicolai.Haehnle@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: 659cf9f5824a ("locking/ww_mutex: Optimize ww-mutexes by waking at most one waiter for backoff when acquiring the lock")
Link: http://lkml.kernel.org/r/20170117150609.GB32474@worktop
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/mutex-debug.h
kernel/locking/mutex.c
kernel/locking/mutex.h

index a459faa4898738e1121c19509a735fce072418e4..4174417d53094d55c3e89ef08e6f360c6b41557f 100644 (file)
@@ -26,20 +26,3 @@ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 extern void debug_mutex_unlock(struct mutex *lock);
 extern void debug_mutex_init(struct mutex *lock, const char *name,
                             struct lock_class_key *key);
-
-#define spin_lock_mutex(lock, flags)                   \
-       do {                                            \
-               struct mutex *l = container_of(lock, struct mutex, wait_lock); \
-                                                       \
-               DEBUG_LOCKS_WARN_ON(in_interrupt());    \
-               local_irq_save(flags);                  \
-               arch_spin_lock(&(lock)->rlock.raw_lock);\
-               DEBUG_LOCKS_WARN_ON(l->magic != l);     \
-       } while (0)
-
-#define spin_unlock_mutex(lock, flags)                         \
-       do {                                                    \
-               arch_spin_unlock(&(lock)->rlock.raw_lock);      \
-               local_irq_restore(flags);                       \
-               preempt_check_resched();                        \
-       } while (0)
index 935116723a3d4c06fa5c63369c545a8fdbb07033..705e06fe5e6c4d6c0de276d7d45efe60d66f5012 100644 (file)
@@ -325,8 +325,6 @@ __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
 static __always_inline void
 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
-       unsigned long flags;
-
        ww_mutex_lock_acquired(lock, ctx);
 
        lock->ctx = ctx;
@@ -350,9 +348,9 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
         * Uh oh, we raced in fastpath, wake up everyone in this case,
         * so they can see the new lock->ctx.
         */
-       spin_lock_mutex(&lock->base.wait_lock, flags);
+       spin_lock(&lock->base.wait_lock);
        __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
-       spin_unlock_mutex(&lock->base.wait_lock, flags);
+       spin_unlock(&lock->base.wait_lock);
 }
 
 /*
@@ -740,7 +738,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
        struct mutex_waiter waiter;
-       unsigned long flags;
        bool first = false;
        struct ww_mutex *ww;
        int ret;
@@ -766,7 +763,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                return 0;
        }
 
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
        /*
         * After waiting to acquire the wait_lock, try again.
         */
@@ -830,7 +827,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                                goto err;
                }
 
-               spin_unlock_mutex(&lock->wait_lock, flags);
+               spin_unlock(&lock->wait_lock);
                schedule_preempt_disabled();
 
                /*
@@ -853,9 +850,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
                        break;
 
-               spin_lock_mutex(&lock->wait_lock, flags);
+               spin_lock(&lock->wait_lock);
        }
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
 acquired:
        __set_current_state(TASK_RUNNING);
 
@@ -872,7 +869,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        if (use_ww_ctx && ww_ctx)
                ww_mutex_set_context_slowpath(ww, ww_ctx);
 
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
        preempt_enable();
        return 0;
 
@@ -880,7 +877,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        __set_current_state(TASK_RUNNING);
        mutex_remove_waiter(lock, &waiter, current);
 err_early_backoff:
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, 1, ip);
        preempt_enable();
@@ -999,8 +996,8 @@ EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
 {
        struct task_struct *next = NULL;
-       unsigned long owner, flags;
        DEFINE_WAKE_Q(wake_q);
+       unsigned long owner;
 
        mutex_release(&lock->dep_map, 1, ip);
 
@@ -1035,7 +1032,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
                owner = old;
        }
 
-       spin_lock_mutex(&lock->wait_lock, flags);
+       spin_lock(&lock->wait_lock);
        debug_mutex_unlock(lock);
        if (!list_empty(&lock->wait_list)) {
                /* get the first entry from the wait-list: */
@@ -1052,7 +1049,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
        if (owner & MUTEX_FLAG_HANDOFF)
                __mutex_handoff(lock, next);
 
-       spin_unlock_mutex(&lock->wait_lock, flags);
+       spin_unlock(&lock->wait_lock);
 
        wake_up_q(&wake_q);
 }
index 4410a4af42a338d5a826fb20c67cc4eba31034f8..6ebc1902f779fe6d89cc7fe4fe1f78c10610eed6 100644 (file)
@@ -9,10 +9,6 @@
  * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
  */
 
-#define spin_lock_mutex(lock, flags) \
-               do { spin_lock(lock); (void)(flags); } while (0)
-#define spin_unlock_mutex(lock, flags) \
-               do { spin_unlock(lock); (void)(flags); } while (0)
 #define mutex_remove_waiter(lock, waiter, task) \
                __list_del((waiter)->list.prev, (waiter)->list.next)