]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/i915: Rename helpers used for unwinding, use macro for can_preempt
authorMichał Winiarski <michal.winiarski@intel.com>
Wed, 25 Oct 2017 20:00:18 +0000 (22:00 +0200)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 26 Oct 2017 20:35:21 +0000 (21:35 +0100)
We would also like to make use of execlist_cancel_port_requests and
unwind_incomplete_requests in GuC preemption backend.
Let's rename the functions to use the correct prefixes, so that we can
simply add the declarations in the following patch.
Similar thing for applies for can_preempt, except we're introducing
HAS_LOGICAL_RING_PREEMPTION macro instad, converting other users that
were previously touching device info directly.

v2: s/intel_engine/execlists and pass execlists to unwind (Chris)
v3: use locked version for exporting, drop const qual (Chris)

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171025200020.16636-11-michal.winiarski@intel.com
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_lrc.c

index 3db5851756f060fd23241aa4dd448f4d71530368..7b871802ae3608bee58d495eb060701bb49cc083 100644 (file)
@@ -372,7 +372,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
                        value |= I915_SCHEDULER_CAP_ENABLED;
                        value |= I915_SCHEDULER_CAP_PRIORITY;
 
-                       if (INTEL_INFO(dev_priv)->has_logical_ring_preemption &&
+                       if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
                            i915_modparams.enable_execlists &&
                            !i915_modparams.enable_guc_submission)
                                value |= I915_SCHEDULER_CAP_PREEMPTION;
index 366ba74b0ad284818efd51e60ad1343fc0132c98..61c155cbf9d71f28554991f5bb01a294a798a797 100644 (file)
@@ -3140,6 +3140,8 @@ intel_info(const struct drm_i915_private *dev_priv)
 
 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
                ((dev_priv)->info.has_logical_ring_contexts)
+#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
+               ((dev_priv)->info.has_logical_ring_preemption)
 #define USES_PPGTT(dev_priv)           (i915_modparams.enable_ppgtt)
 #define USES_FULL_PPGTT(dev_priv)      (i915_modparams.enable_ppgtt >= 2)
 #define USES_FULL_48BIT_PPGTT(dev_priv)        (i915_modparams.enable_ppgtt == 3)
index fedb839dff6111768dd14dfd0a917191db1dfee4..3ac876ca6cae2f1b900b27280c251d8e44201a1e 100644 (file)
@@ -620,7 +620,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
         * Similarly the preempt context must always be available so that
         * we can interrupt the engine at any time.
         */
-       if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) {
+       if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
                ring = engine->context_pin(engine,
                                           engine->i915->preempt_context);
                if (IS_ERR(ring)) {
@@ -651,7 +651,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 err_breadcrumbs:
        intel_engine_fini_breadcrumbs(engine);
 err_unpin_preempt:
-       if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+       if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
                engine->context_unpin(engine, engine->i915->preempt_context);
 err_unpin_kernel:
        engine->context_unpin(engine, engine->i915->kernel_context);
@@ -679,7 +679,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
        intel_engine_cleanup_cmd_parser(engine);
        i915_gem_batch_pool_fini(&engine->batch_pool);
 
-       if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+       if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
                engine->context_unpin(engine, engine->i915->preempt_context);
        engine->context_unpin(engine, engine->i915->kernel_context);
 }
index 599c709fc5a76f145694a99be7cd8e135775a429..b5d382ef8d85b5f7e7b5fb5b3375449d1f1062a4 100644 (file)
@@ -354,7 +354,7 @@ static void unwind_wa_tail(struct drm_i915_gem_request *rq)
        assert_ring_tail_valid(rq->ring, rq->tail);
 }
 
-static void unwind_incomplete_requests(struct intel_engine_cs *engine)
+static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *rq, *rn;
        struct i915_priolist *uninitialized_var(p);
@@ -385,6 +385,17 @@ static void unwind_incomplete_requests(struct intel_engine_cs *engine)
        }
 }
 
+static void
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
+{
+       struct intel_engine_cs *engine =
+               container_of(execlists, typeof(*engine), execlists);
+
+       spin_lock_irq(&engine->timeline->lock);
+       __unwind_incomplete_requests(engine);
+       spin_unlock_irq(&engine->timeline->lock);
+}
+
 static inline void
 execlists_context_status_change(struct drm_i915_gem_request *rq,
                                unsigned long status)
@@ -515,11 +526,6 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
        elsp_write(ce->lrc_desc, elsp);
 }
 
-static bool can_preempt(struct intel_engine_cs *engine)
-{
-       return INTEL_INFO(engine->i915)->has_logical_ring_preemption;
-}
-
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -567,7 +573,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                if (port_count(&port[0]) > 1)
                        goto unlock;
 
-               if (can_preempt(engine) &&
+               if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) &&
                    rb_entry(rb, struct i915_priolist, node)->priority >
                    max(last->priotree.priority, 0)) {
                        /*
@@ -691,7 +697,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 }
 
 static void
-execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
+execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 {
        struct execlist_port *port = execlists->port;
        unsigned int num_ports = execlists_num_ports(execlists);
@@ -718,7 +724,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        spin_lock_irqsave(&engine->timeline->lock, flags);
 
        /* Cancel the requests on the HW and clear the ELSP tracker. */
-       execlist_cancel_port_requests(execlists);
+       execlists_cancel_port_requests(execlists);
 
        /* Mark all executing requests as skipped. */
        list_for_each_entry(rq, &engine->timeline->requests, link) {
@@ -858,11 +864,8 @@ static void intel_lrc_irq_handler(unsigned long data)
 
                        if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
                            buf[2*head + 1] == PREEMPT_ID) {
-                               execlist_cancel_port_requests(execlists);
-
-                               spin_lock_irq(&engine->timeline->lock);
-                               unwind_incomplete_requests(engine);
-                               spin_unlock_irq(&engine->timeline->lock);
+                               execlists_cancel_port_requests(execlists);
+                               execlists_unwind_incomplete_requests(execlists);
 
                                GEM_BUG_ON(!execlists_is_active(execlists,
                                                                EXECLISTS_ACTIVE_PREEMPT));
@@ -1531,10 +1534,10 @@ static void reset_common_ring(struct intel_engine_cs *engine,
         * guessing the missed context-switch events by looking at what
         * requests were completed.
         */
-       execlist_cancel_port_requests(execlists);
+       execlists_cancel_port_requests(execlists);
 
        /* Push back any incomplete requests for replay after the reset. */
-       unwind_incomplete_requests(engine);
+       __unwind_incomplete_requests(engine);
 
        spin_unlock_irqrestore(&engine->timeline->lock, flags);