]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
locking/rwsem: Always release wait_lock before waking up tasks
authorWaiman Long <longman@redhat.com>
Mon, 20 May 2019 20:59:07 +0000 (16:59 -0400)
committerIngo Molnar <mingo@kernel.org>
Mon, 17 Jun 2019 10:28:00 +0000 (12:28 +0200)
With the use of wake_q, we can do task wakeups without holding the
wait_lock. There is one exception in the rwsem code, though. It is
when the writer in the slowpath detects that there are waiters ahead
but the rwsem is not held by a writer. This can lead to a long wait_lock
hold time especially when a large number of readers are to be woken up.

Remediate this situation by releasing the wait_lock before waking
up tasks and re-acquiring it afterward. The rwsem_try_write_lock()
function is also modified to read the rwsem count directly to avoid
stale count value.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: huang ying <huang.ying.caritas@gmail.com>
Link: https://lkml.kernel.org/r/20190520205918.22251-9-longman@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/sched/wake_q.h
kernel/locking/rwsem.c

index ad826d2a4557dea643c18e2c160c6d1f48650431..26a2013ac39c4480784a87297d72f02ae120a7cf 100644 (file)
@@ -51,6 +51,11 @@ static inline void wake_q_init(struct wake_q_head *head)
        head->lastp = &head->first;
 }
 
+static inline bool wake_q_empty(struct wake_q_head *head)
+{
+       return head->first == WAKE_Q_TAIL;
+}
+
 extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
 extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
 extern void wake_up_q(struct wake_q_head *head);
index decda9fb8c6d376fd2b6d0412db5c399a360d05d..5532304406f7ad38a672158d8acae7b4a3846378 100644 (file)
@@ -400,13 +400,14 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
  * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
  * bit is set or the lock is acquired with handoff bit cleared.
  */
-static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem,
+static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
                                        enum writer_wait_state wstate)
 {
-       long new;
+       long count, new;
 
        lockdep_assert_held(&sem->wait_lock);
 
+       count = atomic_long_read(&sem->count);
        do {
                bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
 
@@ -751,26 +752,25 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
                                        ? RWSEM_WAKE_READERS
                                        : RWSEM_WAKE_ANY, &wake_q);
 
-               /*
-                * The wakeup is normally called _after_ the wait_lock
-                * is released, but given that we are proactively waking
-                * readers we can deal with the wake_q overhead as it is
-                * similar to releasing and taking the wait_lock again
-                * for attempting rwsem_try_write_lock().
-                */
-               wake_up_q(&wake_q);
-
-               /* We need wake_q again below, reinitialize */
-               wake_q_init(&wake_q);
+               if (!wake_q_empty(&wake_q)) {
+                       /*
+                        * We want to minimize wait_lock hold time especially
+                        * when a large number of readers are to be woken up.
+                        */
+                       raw_spin_unlock_irq(&sem->wait_lock);
+                       wake_up_q(&wake_q);
+                       wake_q_init(&wake_q);   /* Used again, reinit */
+                       raw_spin_lock_irq(&sem->wait_lock);
+               }
        } else {
-               count = atomic_long_add_return(RWSEM_FLAG_WAITERS, &sem->count);
+               atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
        }
 
 wait:
        /* wait until we successfully acquire the lock */
        set_current_state(state);
        while (true) {
-               if (rwsem_try_write_lock(count, sem, wstate))
+               if (rwsem_try_write_lock(sem, wstate))
                        break;
 
                raw_spin_unlock_irq(&sem->wait_lock);
@@ -811,7 +811,6 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
                }
 
                raw_spin_lock_irq(&sem->wait_lock);
-               count = atomic_long_read(&sem->count);
        }
        __set_current_state(TASK_RUNNING);
        list_del(&waiter.list);