]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/i915: Wrap engine->schedule in RCU locks for set-wedge protection
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 7 Mar 2018 13:42:25 +0000 (13:42 +0000)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Fri, 16 Mar 2018 12:35:31 +0000 (14:35 +0200)
Similar to the staging around handling of engine->submit_request, we
need to stop adding to the execlists->queue prior to calling
engine->cancel_requests. cancel_requests will move requests from the
queue onto the timeline, so if we add a request onto the queue after that
point, it will be lost.

Fixes: af7a8ffad9c5 ("drm/i915: Use rcu instead of stop_machine in set_wedged")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180307134226.25492-5-chris@chris-wilson.co.uk
(cherry picked from commit 47650db02dd52267953df81438c93cf8a0eb0e5e)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_request.c

index a5bd07338b46d00a2522ce34fb63a485f0a8e37f..8d913d833ab93427178e6fad95da6ac8d44d3882 100644 (file)
@@ -471,10 +471,11 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
 
        rq = to_request(fence);
        engine = rq->engine;
-       if (!engine->schedule)
-               return;
 
-       engine->schedule(rq, prio);
+       rcu_read_lock();
+       if (engine->schedule)
+               engine->schedule(rq, prio);
+       rcu_read_unlock();
 }
 
 static void fence_set_priority(struct dma_fence *fence, int prio)
@@ -3214,8 +3215,11 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
         */
        for_each_engine(engine, i915, id) {
                i915_gem_reset_prepare_engine(engine);
+
                engine->submit_request = nop_submit_request;
+               engine->schedule = NULL;
        }
+       i915->caps.scheduler = 0;
 
        /*
         * Make sure no one is running the old callback before we proceed with
@@ -3233,11 +3237,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
                 * start to complete all requests.
                 */
                engine->submit_request = nop_complete_submit_request;
-               engine->schedule = NULL;
        }
 
-       i915->caps.scheduler = 0;
-
        /*
         * Make sure no request can slip through without getting completed by
         * either this call here to intel_engine_init_global_seqno, or the one
index d437beac3969f815c7e8c8430cd7c590d2bba2a6..282f57630cc15a174d536d0f3adcd38b425ddf25 100644 (file)
@@ -1081,8 +1081,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
         * decide whether to preempt the entire chain so that it is ready to
         * run at the earliest possible convenience.
         */
+       rcu_read_lock();
        if (engine->schedule)
                engine->schedule(request, request->ctx->priority);
+       rcu_read_unlock();
 
        local_bh_disable();
        i915_sw_fence_commit(&request->submit);