]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/i915/gt/intel_lrc.c
drm/i915/execlists: Track active elements during dequeue
[linux.git] / drivers / gpu / drm / i915 / gt / intel_lrc.c
index fe8a59aaa629a1bf8d01367c796307234302bdac..5e8928edf376166282312f9589baab06735c733d 100644 (file)
@@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
        spin_unlock(&old->breadcrumbs.irq_lock);
 }
 
-static struct i915_request *
-last_active(const struct intel_engine_execlists *execlists)
-{
-       struct i915_request * const *last = READ_ONCE(execlists->active);
-
-       while (*last && i915_request_completed(*last))
-               last++;
-
-       return *last;
-}
-
 #define for_each_waiter(p__, rq__) \
        list_for_each_entry_lockless(p__, \
                                     &(rq__)->sched.waiters_list, \
@@ -1679,11 +1668,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
        if (!intel_engine_has_timeslices(engine))
                return false;
 
-       if (list_is_last(&rq->sched.link, &engine->active.requests))
-               return false;
-
-       hint = max(rq_prio(list_next_entry(rq, sched.link)),
-                  engine->execlists.queue_priority_hint);
+       hint = engine->execlists.queue_priority_hint;
+       if (!list_is_last(&rq->sched.link, &engine->active.requests))
+               hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
 
        return hint >= effective_prio(rq);
 }
@@ -1725,16 +1712,26 @@ static void set_timeslice(struct intel_engine_cs *engine)
        set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
 }
 
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists *execlists = &engine->execlists;
+
+       execlists->switch_priority_hint = execlists->queue_priority_hint;
+
+       if (timer_pending(&execlists->timer))
+               return;
+
+       set_timer_ms(&execlists->timer, timeslice(engine));
+}
+
 static void record_preemption(struct intel_engine_execlists *execlists)
 {
        (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
 }
 
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+                                           const struct i915_request *rq)
 {
-       struct i915_request *rq;
-
-       rq = last_active(&engine->execlists);
        if (!rq)
                return 0;
 
@@ -1745,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
        return READ_ONCE(engine->props.preempt_timeout_ms);
 }
 
-static void set_preempt_timeout(struct intel_engine_cs *engine)
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+                               const struct i915_request *rq)
 {
        if (!intel_engine_has_preempt_reset(engine))
                return;
 
        set_timer_ms(&engine->execlists.preempt,
-                    active_preempt_timeout(engine));
+                    active_preempt_timeout(engine, rq));
 }
 
 static inline void clear_ports(struct i915_request **ports, int count)
@@ -1764,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct i915_request **port = execlists->pending;
        struct i915_request ** const last_port = port + execlists->port_mask;
+       struct i915_request * const *active;
        struct i915_request *last;
        struct rb_node *rb;
        bool submit = false;
@@ -1818,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
         * i.e. we will retrigger preemption following the ack in case
         * of trouble.
         */
-       last = last_active(execlists);
+       active = READ_ONCE(execlists->active);
+       while ((last = *active) && i915_request_completed(last))
+               active++;
+
        if (last) {
                if (need_preempt(engine, last, rb)) {
                        ENGINE_TRACE(engine,
@@ -1888,11 +1890,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                 * Even if ELSP[1] is occupied and not worthy
                                 * of timeslices, our queue might be.
                                 */
-                               if (!execlists->timer.expires &&
-                                   need_timeslice(engine, last))
-                                       set_timer_ms(&execlists->timer,
-                                                    timeslice(engine));
-
+                               start_timeslice(engine);
                                return;
                        }
                }
@@ -1927,7 +1925,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
                        if (last && !can_merge_rq(last, rq)) {
                                spin_unlock(&ve->base.active.lock);
-                               return; /* leave this for another */
+                               start_timeslice(engine);
+                               return; /* leave this for another sibling */
                        }
 
                        ENGINE_TRACE(engine,
@@ -2103,7 +2102,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                 * Skip if we ended up with exactly the same set of requests,
                 * e.g. trying to timeslice a pair of ordered contexts
                 */
-               if (!memcmp(execlists->active, execlists->pending,
+               if (!memcmp(active, execlists->pending,
                            (port - execlists->pending + 1) * sizeof(*port))) {
                        do
                                execlists_schedule_out(fetch_and_zero(port));
@@ -2114,7 +2113,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                clear_ports(port + 1, last_port - port);
 
                execlists_submit_ports(engine);
-               set_preempt_timeout(engine);
+               set_preempt_timeout(engine, *active);
        } else {
 skip_submit:
                ring_set_paused(engine, 0);