]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/i915: Grab execlist spinlock to avoid post-reset concurrency issues.
authorTomas Elf <tomas.elf@intel.com>
Mon, 19 Oct 2015 15:32:32 +0000 (16:32 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 22 Oct 2015 17:34:38 +0000 (19:34 +0200)
Grab execlist lock when cleaning up execlist queues after GPU reset to avoid
concurrency problems between the context event interrupt handler and the reset
path immediately following a GPU reset.

* v2 (Chris Wilson):
Do execlist check and use simpler form of spinlock functions.

Signed-off-by: Tomas Elf <tomas.elf@intel.com>
Reviewed-by: Dave Gordon <david.s.gordon@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c

index d0fa5481543c141e0949a527f7d25180b9d17f96..9b2048c7077d1cbd2f545e2d46b34b8874ce395f 100644 (file)
@@ -2753,18 +2753,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
         * are the ones that keep the context and ringbuffer backing objects
         * pinned in place.
         */
-       while (!list_empty(&ring->execlist_queue)) {
-               struct drm_i915_gem_request *submit_req;
 
-               submit_req = list_first_entry(&ring->execlist_queue,
-                               struct drm_i915_gem_request,
-                               execlist_link);
-               list_del(&submit_req->execlist_link);
+       if (i915.enable_execlists) {
+               spin_lock_irq(&ring->execlist_lock);
+               while (!list_empty(&ring->execlist_queue)) {
+                       struct drm_i915_gem_request *submit_req;
 
-               if (submit_req->ctx != ring->default_context)
-                       intel_lr_context_unpin(submit_req);
+                       submit_req = list_first_entry(&ring->execlist_queue,
+                                       struct drm_i915_gem_request,
+                                       execlist_link);
+                       list_del(&submit_req->execlist_link);
 
-               i915_gem_request_unreference(submit_req);
+                       if (submit_req->ctx != ring->default_context)
+                               intel_lr_context_unpin(submit_req);
+
+                       i915_gem_request_unreference(submit_req);
+               }
+               spin_unlock_irq(&ring->execlist_lock);
        }
 
        /*