]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/i915: Use seqlock in engine stats
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>
Thu, 26 Apr 2018 07:47:16 +0000 (08:47 +0100)
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>
Thu, 26 Apr 2018 09:10:05 +0000 (10:10 +0100)
We can convert engine stats from a spinlock to seqlock to ensure interrupt
processing is never even a tiny bit delayed by parallel readers.

There is a smidgen bit more cost on the write lock side, and an extremely
unlikely chance that readers will have to retry a few times in face of
heavy interrupt load. But it should be extremely unlikely given how
lightweight read side section is compared to the interrupt processing
side, and also compared to the rest of the code paths which can lead into
it. Furthermore, writer is the ones doing the real, latency sensitive
work, while readers are only informative.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426074716.7352-1-tvrtko.ursulin@linux.intel.com
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 58be7fac5b8c1c4ae0a95a698ec27d0b94cffacc..ac009f10c948392aef69301542e0ba82a548a609 100644 (file)
@@ -306,7 +306,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
        /* Nothing to do here, execute in order of dependencies */
        engine->schedule = NULL;
 
-       spin_lock_init(&engine->stats.lock);
+       seqlock_init(&engine->stats.lock);
 
        ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
 
@@ -1481,7 +1481,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
                return -ENODEV;
 
        tasklet_disable(&execlists->tasklet);
-       spin_lock_irqsave(&engine->stats.lock, flags);
+       write_seqlock_irqsave(&engine->stats.lock, flags);
 
        if (unlikely(engine->stats.enabled == ~0)) {
                err = -EBUSY;
@@ -1505,7 +1505,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
        }
 
 unlock:
-       spin_unlock_irqrestore(&engine->stats.lock, flags);
+       write_sequnlock_irqrestore(&engine->stats.lock, flags);
        tasklet_enable(&execlists->tasklet);
 
        return err;
@@ -1534,12 +1534,13 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
  */
 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
 {
+       unsigned int seq;
        ktime_t total;
-       unsigned long flags;
 
-       spin_lock_irqsave(&engine->stats.lock, flags);
-       total = __intel_engine_get_busy_time(engine);
-       spin_unlock_irqrestore(&engine->stats.lock, flags);
+       do {
+               seq = read_seqbegin(&engine->stats.lock);
+               total = __intel_engine_get_busy_time(engine);
+       } while (read_seqretry(&engine->stats.lock, seq));
 
        return total;
 }
@@ -1557,13 +1558,13 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
        if (!intel_engine_supports_stats(engine))
                return;
 
-       spin_lock_irqsave(&engine->stats.lock, flags);
+       write_seqlock_irqsave(&engine->stats.lock, flags);
        WARN_ON_ONCE(engine->stats.enabled == 0);
        if (--engine->stats.enabled == 0) {
                engine->stats.total = __intel_engine_get_busy_time(engine);
                engine->stats.active = 0;
        }
-       spin_unlock_irqrestore(&engine->stats.lock, flags);
+       write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
index c5e27905b0e1ea264950873b4e47c52447a77666..24af3f1088ba440dc2278fa780408ef53df5751b 100644 (file)
@@ -3,6 +3,7 @@
 #define _INTEL_RINGBUFFER_H_
 
 #include <linux/hashtable.h>
+#include <linux/seqlock.h>
 
 #include "i915_gem_batch_pool.h"
 #include "i915_gem_timeline.h"
@@ -595,7 +596,7 @@ struct intel_engine_cs {
                /**
                 * @lock: Lock protecting the below fields.
                 */
-               spinlock_t lock;
+               seqlock_t lock;
                /**
                 * @enabled: Reference count indicating number of listeners.
                 */
@@ -1064,7 +1065,7 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine)
        if (READ_ONCE(engine->stats.enabled) == 0)
                return;
 
-       spin_lock_irqsave(&engine->stats.lock, flags);
+       write_seqlock_irqsave(&engine->stats.lock, flags);
 
        if (engine->stats.enabled > 0) {
                if (engine->stats.active++ == 0)
@@ -1072,7 +1073,7 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine)
                GEM_BUG_ON(engine->stats.active == 0);
        }
 
-       spin_unlock_irqrestore(&engine->stats.lock, flags);
+       write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
 static inline void intel_engine_context_out(struct intel_engine_cs *engine)
@@ -1082,7 +1083,7 @@ static inline void intel_engine_context_out(struct intel_engine_cs *engine)
        if (READ_ONCE(engine->stats.enabled) == 0)
                return;
 
-       spin_lock_irqsave(&engine->stats.lock, flags);
+       write_seqlock_irqsave(&engine->stats.lock, flags);
 
        if (engine->stats.enabled > 0) {
                ktime_t last;
@@ -1109,7 +1110,7 @@ static inline void intel_engine_context_out(struct intel_engine_cs *engine)
                }
        }
 
-       spin_unlock_irqrestore(&engine->stats.lock, flags);
+       write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
 int intel_enable_engine_stats(struct intel_engine_cs *engine);