]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
locking/rwsem: Relocate rwsem_down_read_failed()
authorWaiman Long <longman@redhat.com>
Thu, 4 Apr 2019 17:43:10 +0000 (13:43 -0400)
committerIngo Molnar <mingo@kernel.org>
Wed, 10 Apr 2019 08:55:59 +0000 (10:55 +0200)
The rwsem_down_read_failed*() functions were relocated from above the
optimistic spinning section to below that section. This enables the
reader functions to use optimisitic spinning in future patches. There
is no code change.

Signed-off-by: Waiman Long <longman@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Link: http://lkml.kernel.org/r/20190404174320.22416-2-longman@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/rwsem-xadd.c

index fbe96341beeed2c37f84526cb70adbe1b8734156..0d518b52ade4e87c24abf484b2c52ca6b7244bfc 100644 (file)
@@ -224,92 +224,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
                atomic_long_add(adjustment, &sem->count);
 }
 
-/*
- * Wait for the read lock to be granted
- */
-static inline struct rw_semaphore __sched *
-__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
-{
-       long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
-       struct rwsem_waiter waiter;
-       DEFINE_WAKE_Q(wake_q);
-
-       waiter.task = current;
-       waiter.type = RWSEM_WAITING_FOR_READ;
-
-       raw_spin_lock_irq(&sem->wait_lock);
-       if (list_empty(&sem->wait_list)) {
-               /*
-                * In case the wait queue is empty and the lock isn't owned
-                * by a writer, this reader can exit the slowpath and return
-                * immediately as its RWSEM_ACTIVE_READ_BIAS has already
-                * been set in the count.
-                */
-               if (atomic_long_read(&sem->count) >= 0) {
-                       raw_spin_unlock_irq(&sem->wait_lock);
-                       return sem;
-               }
-               adjustment += RWSEM_WAITING_BIAS;
-       }
-       list_add_tail(&waiter.list, &sem->wait_list);
-
-       /* we're now waiting on the lock, but no longer actively locking */
-       count = atomic_long_add_return(adjustment, &sem->count);
-
-       /*
-        * If there are no active locks, wake the front queued process(es).
-        *
-        * If there are no writers and we are first in the queue,
-        * wake our own waiter to join the existing active readers !
-        */
-       if (count == RWSEM_WAITING_BIAS ||
-           (count > RWSEM_WAITING_BIAS &&
-            adjustment != -RWSEM_ACTIVE_READ_BIAS))
-               __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
-
-       raw_spin_unlock_irq(&sem->wait_lock);
-       wake_up_q(&wake_q);
-
-       /* wait to be given the lock */
-       while (true) {
-               set_current_state(state);
-               if (!waiter.task)
-                       break;
-               if (signal_pending_state(state, current)) {
-                       raw_spin_lock_irq(&sem->wait_lock);
-                       if (waiter.task)
-                               goto out_nolock;
-                       raw_spin_unlock_irq(&sem->wait_lock);
-                       break;
-               }
-               schedule();
-       }
-
-       __set_current_state(TASK_RUNNING);
-       return sem;
-out_nolock:
-       list_del(&waiter.list);
-       if (list_empty(&sem->wait_list))
-               atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
-       raw_spin_unlock_irq(&sem->wait_lock);
-       __set_current_state(TASK_RUNNING);
-       return ERR_PTR(-EINTR);
-}
-
-__visible struct rw_semaphore * __sched
-rwsem_down_read_failed(struct rw_semaphore *sem)
-{
-       return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(rwsem_down_read_failed);
-
-__visible struct rw_semaphore * __sched
-rwsem_down_read_failed_killable(struct rw_semaphore *sem)
-{
-       return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
-}
-EXPORT_SYMBOL(rwsem_down_read_failed_killable);
-
 /*
  * This function must be called with the sem->wait_lock held to prevent
  * race conditions between checking the rwsem wait list and setting the
@@ -504,6 +418,92 @@ static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
 }
 #endif
 
+/*
+ * Wait for the read lock to be granted
+ */
+static inline struct rw_semaphore __sched *
+__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
+{
+       long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
+       struct rwsem_waiter waiter;
+       DEFINE_WAKE_Q(wake_q);
+
+       waiter.task = current;
+       waiter.type = RWSEM_WAITING_FOR_READ;
+
+       raw_spin_lock_irq(&sem->wait_lock);
+       if (list_empty(&sem->wait_list)) {
+               /*
+                * In case the wait queue is empty and the lock isn't owned
+                * by a writer, this reader can exit the slowpath and return
+                * immediately as its RWSEM_ACTIVE_READ_BIAS has already
+                * been set in the count.
+                */
+               if (atomic_long_read(&sem->count) >= 0) {
+                       raw_spin_unlock_irq(&sem->wait_lock);
+                       return sem;
+               }
+               adjustment += RWSEM_WAITING_BIAS;
+       }
+       list_add_tail(&waiter.list, &sem->wait_list);
+
+       /* we're now waiting on the lock, but no longer actively locking */
+       count = atomic_long_add_return(adjustment, &sem->count);
+
+       /*
+        * If there are no active locks, wake the front queued process(es).
+        *
+        * If there are no writers and we are first in the queue,
+        * wake our own waiter to join the existing active readers !
+        */
+       if (count == RWSEM_WAITING_BIAS ||
+           (count > RWSEM_WAITING_BIAS &&
+            adjustment != -RWSEM_ACTIVE_READ_BIAS))
+               __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
+
+       raw_spin_unlock_irq(&sem->wait_lock);
+       wake_up_q(&wake_q);
+
+       /* wait to be given the lock */
+       while (true) {
+               set_current_state(state);
+               if (!waiter.task)
+                       break;
+               if (signal_pending_state(state, current)) {
+                       raw_spin_lock_irq(&sem->wait_lock);
+                       if (waiter.task)
+                               goto out_nolock;
+                       raw_spin_unlock_irq(&sem->wait_lock);
+                       break;
+               }
+               schedule();
+       }
+
+       __set_current_state(TASK_RUNNING);
+       return sem;
+out_nolock:
+       list_del(&waiter.list);
+       if (list_empty(&sem->wait_list))
+               atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
+       raw_spin_unlock_irq(&sem->wait_lock);
+       __set_current_state(TASK_RUNNING);
+       return ERR_PTR(-EINTR);
+}
+
+__visible struct rw_semaphore * __sched
+rwsem_down_read_failed(struct rw_semaphore *sem)
+{
+       return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(rwsem_down_read_failed);
+
+__visible struct rw_semaphore * __sched
+rwsem_down_read_failed_killable(struct rw_semaphore *sem)
+{
+       return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(rwsem_down_read_failed_killable);
+
 /*
  * Wait until we successfully acquire the write lock
  */