]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/i915/gt/intel_gt_requests.c
Merge tag 'ktest-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
[linux.git] / drivers / gpu / drm / i915 / gt / intel_gt_requests.c
index 7ef1d37970f6d4942d3ec2d1028f2e4d9467a8f1..24c99d0838af6e23e7eb6295139b852d02ec538c 100644 (file)
@@ -99,6 +99,9 @@ static bool add_retire(struct intel_engine_cs *engine,
 void intel_engine_add_retire(struct intel_engine_cs *engine,
                             struct intel_timeline *tl)
 {
+       /* We don't deal well with the engine disappearing beneath us */
+       GEM_BUG_ON(intel_engine_is_virtual(engine));
+
        if (add_retire(engine, tl))
                schedule_work(&engine->retire_work);
 }
@@ -144,24 +147,32 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 
                        fence = i915_active_fence_get(&tl->last_request);
                        if (fence) {
+                               mutex_unlock(&tl->mutex);
+
                                timeout = dma_fence_wait_timeout(fence,
                                                                 interruptible,
                                                                 timeout);
                                dma_fence_put(fence);
+
+                               /* Retirement is best effort */
+                               if (!mutex_trylock(&tl->mutex)) {
+                                       active_count++;
+                                       goto out_active;
+                               }
                        }
                }
 
                if (!retire_requests(tl) || flush_submission(gt))
                        active_count++;
+               mutex_unlock(&tl->mutex);
 
-               spin_lock(&timelines->lock);
+out_active:    spin_lock(&timelines->lock);
 
-               /* Resume iteration after dropping lock */
+               /* Resume list iteration after reacquiring spinlock */
                list_safe_reset_next(tl, tn, link);
                if (atomic_dec_and_test(&tl->active_count))
                        list_del(&tl->link);
 
-               mutex_unlock(&tl->mutex);
 
                /* Defer the final release to after the spinlock */
                if (refcount_dec_and_test(&tl->kref.refcount)) {