]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/i915/execlists: Convert recursive defer_request() into iterative
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 25 Jun 2019 13:01:09 +0000 (14:01 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 25 Jun 2019 19:17:22 +0000 (20:17 +0100)
As this engine owns the lock around rq->sched.link (for those waiters
submitted to this engine), we can use that link as an element in a local
list. We can thus replace the recursive algorithm with an iterative walk
over the ordered list of waiters.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_lrc.c

index 28685ba91a2c74ca4632ef1eb4c5d0058d09d84c..22afd2616d7ff90821c44322991b05b1412539e0 100644 (file)
@@ -833,10 +833,9 @@ last_active(const struct intel_engine_execlists *execlists)
        return *last;
 }
 
-static void
-defer_request(struct i915_request * const rq, struct list_head * const pl)
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
 {
-       struct i915_dependency *p;
+       LIST_HEAD(list);
 
        /*
         * We want to move the interrupted request to the back of
@@ -845,34 +844,37 @@ defer_request(struct i915_request * const rq, struct list_head * const pl)
         * flight and were waiting for the interrupted request to
         * be run after it again.
         */
-       list_move_tail(&rq->sched.link, pl);
+       do {
+               struct i915_dependency *p;
 
-       list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
-               struct i915_request *w =
-                       container_of(p->waiter, typeof(*w), sched);
+               GEM_BUG_ON(i915_request_is_active(rq));
+               list_move_tail(&rq->sched.link, pl);
 
-               /* Leave semaphores spinning on the other engines */
-               if (w->engine != rq->engine)
-                       continue;
+               list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+                       struct i915_request *w =
+                               container_of(p->waiter, typeof(*w), sched);
 
-               /* No waiter should start before the active request completed */
-               GEM_BUG_ON(i915_request_started(w));
+                       /* Leave semaphores spinning on the other engines */
+                       if (w->engine != rq->engine)
+                               continue;
 
-               GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
-               if (rq_prio(w) < rq_prio(rq))
-                       continue;
+                       /* No waiter should start before its signaler */
+                       GEM_BUG_ON(i915_request_started(w) &&
+                                  !i915_request_completed(rq));
 
-               if (list_empty(&w->sched.link))
-                       continue; /* Not yet submitted; unready */
+                       GEM_BUG_ON(i915_request_is_active(w));
+                       if (list_empty(&w->sched.link))
+                               continue; /* Not yet submitted; unready */
 
-               /*
-                * This should be very shallow as it is limited by the
-                * number of requests that can fit in a ring (<64) and
-                * the number of contexts that can be in flight on this
-                * engine.
-                */
-               defer_request(w, pl);
-       }
+                       if (rq_prio(w) < rq_prio(rq))
+                               continue;
+
+                       GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+                       list_move_tail(&w->sched.link, &list);
+               }
+
+               rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+       } while (rq);
 }
 
 static void defer_active(struct intel_engine_cs *engine)