]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/i915: Merge wait_for_timelines with retire_request
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 4 Oct 2019 13:40:04 +0000 (14:40 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 4 Oct 2019 14:39:23 +0000 (15:39 +0100)
wait_for_timelines is essentially the same loop as retiring requests
(with an extra timeout), so merge the two into one routine.

v2: i915_retire_requests_timeout and keep VT'd w/a as !interruptible

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-10-chris@chris-wilson.co.uk
14 files changed:
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_pm.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/selftests/igt_flush_test.c
drivers/gpu/drm/i915/selftests/igt_live_test.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c

index c19431d609fcab1e872572fbd73ce268c47c95e9..418d0d2b5fa9386927871dce63b50469a952e64a 100644 (file)
@@ -432,9 +432,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
 
        /* Attempt to reap some mmap space from dead objects */
        do {
-               err = i915_gem_wait_for_idle(i915,
-                                            I915_WAIT_INTERRUPTIBLE,
-                                            MAX_SCHEDULE_TIMEOUT);
+               err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
                if (err)
                        break;
 
index 26f325bbfe4d216e41deb34a3f65c14edd945363..90b211257f2da2c52f1c1030180114e09f5bdd21 100644 (file)
@@ -59,9 +59,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
 {
        bool result = !intel_gt_is_wedged(gt);
 
-       if (i915_gem_wait_for_idle(gt->i915,
-                                  I915_WAIT_FOR_IDLE_BOOST,
-                                  I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+       if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
                /* XXX hide warning from gem_eio */
                if (i915_modparams.reset) {
                        dev_err(gt->i915->drm.dev,
index f5402aad9b5a08e9d8d7a78d1c0818d6f3238758..f902aeee175542acb7c77fd2468f55e991d70fa6 100644 (file)
@@ -1137,7 +1137,7 @@ __sseu_finish(const char *name,
 
        if ((flags & TEST_IDLE) && ret == 0) {
                ret = i915_gem_wait_for_idle(ce->engine->i915,
-                                            0, MAX_SCHEDULE_TIMEOUT);
+                                            MAX_SCHEDULE_TIMEOUT);
                if (ret)
                        return ret;
 
index d4cefdd3843139b87f20ae1c258bb53a15c09fe1..bdb34f03ec477ced0f894ec200a5c12ab535af4b 100644 (file)
@@ -196,8 +196,7 @@ int intel_gt_resume(struct intel_gt *gt)
 
 static void wait_for_idle(struct intel_gt *gt)
 {
-       if (i915_gem_wait_for_idle(gt->i915, 0,
-                                  I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+       if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
                /*
                 * Forcibly cancel outstanding work and leave
                 * the gpu quiet.
index 7c4bba21adcd635979f465fb4c9c73cc1075d1ec..5888a658e2b77fa799d08c8bec732e903c37561d 100644 (file)
@@ -3635,9 +3635,7 @@ i915_drop_caches_set(void *data, u64 val)
                i915_retire_requests(i915);
 
        if (val & (DROP_IDLE | DROP_ACTIVE)) {
-               ret = i915_gem_wait_for_idle(i915,
-                                            I915_WAIT_INTERRUPTIBLE,
-                                            MAX_SCHEDULE_TIMEOUT);
+               ret = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
                if (ret)
                        return ret;
        }
index ad31852e430923c5feabdaa3b94e1978a236aeab..44f3463ff9f177baeb6a01ecbfd726d8b3fd7598 100644 (file)
@@ -2321,8 +2321,7 @@ void i915_gem_driver_register(struct drm_i915_private *i915);
 void i915_gem_driver_unregister(struct drm_i915_private *i915);
 void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
 void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                          unsigned int flags, long timeout);
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, long timeout);
 void i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
index 83c2c7bf1e3496a87d41c1e4ea5320d91cdc59fa..12d21e5cf3edecb6df52b7f718a6de1f0d9739b9 100644 (file)
@@ -883,61 +883,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
        }
 }
 
-static long
-wait_for_timelines(struct intel_gt *gt, unsigned int wait, long timeout)
-{
-       struct intel_gt_timelines *timelines = &gt->timelines;
-       struct intel_timeline *tl;
-       unsigned long flags;
-
-       spin_lock_irqsave(&timelines->lock, flags);
-       list_for_each_entry(tl, &timelines->active_list, link) {
-               struct dma_fence *fence;
-
-               fence = i915_active_fence_get(&tl->last_request);
-               if (!fence)
-                       continue;
-
-               spin_unlock_irqrestore(&timelines->lock, flags);
-
-               if (!dma_fence_is_i915(fence)) {
-                       timeout = dma_fence_wait_timeout(fence,
-                                                        flags & I915_WAIT_INTERRUPTIBLE,
-                                                        timeout);
-               } else {
-                       struct i915_request *rq = to_request(fence);
-
-                       /*
-                        * "Race-to-idle".
-                        *
-                        * Switching to the kernel context is often used as
-                        * a synchronous step prior to idling, e.g. in suspend
-                        * for flushing all current operations to memory before
-                        * sleeping. These we want to complete as quickly as
-                        * possible to avoid prolonged stalls, so allow the gpu
-                        * to boost to maximum clocks.
-                        */
-                       if (flags & I915_WAIT_FOR_IDLE_BOOST)
-                               gen6_rps_boost(rq);
-
-                       timeout = i915_request_wait(rq, flags, timeout);
-               }
-
-               dma_fence_put(fence);
-               if (timeout < 0)
-                       return timeout;
-
-               /* restart after reacquiring the lock */
-               spin_lock_irqsave(&timelines->lock, flags);
-               tl = list_entry(&timelines->active_list, typeof(*tl), link);
-       }
-       spin_unlock_irqrestore(&timelines->lock, flags);
-
-       return timeout;
-}
-
-int i915_gem_wait_for_idle(struct drm_i915_private *i915,
-                          unsigned int flags, long timeout)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915, long timeout)
 {
        struct intel_gt *gt = &i915->gt;
 
@@ -945,18 +891,13 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
        if (!intel_gt_pm_is_awake(gt))
                return 0;
 
-       do {
-               timeout = wait_for_timelines(gt, flags, timeout);
-               if (timeout < 0)
-                       return timeout;
-
+       while ((timeout = i915_retire_requests_timeout(i915, timeout)) > 0) {
                cond_resched();
                if (signal_pending(current))
                        return -EINTR;
+       }
 
-       } while (i915_retire_requests(i915));
-
-       return 0;
+       return timeout;
 }
 
 struct i915_vma *
index 0552bf93eea3c49b21dc521bd38de187cc898833..0a412f6d01d73c1a4459b75d08ba1dce30eabe37 100644 (file)
@@ -46,9 +46,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
         * the hopes that we can then remove contexts and the like only
         * bound by their active reference.
         */
-       return i915_gem_wait_for_idle(i915,
-                                     I915_WAIT_INTERRUPTIBLE,
-                                     MAX_SCHEDULE_TIMEOUT);
+       return i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
 }
 
 static bool
@@ -126,6 +124,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
                                    min_size, alignment, color,
                                    start, end, mode);
 
+       i915_retire_requests(vm->i915);
+
 search_again:
        active = NULL;
        INIT_LIST_HEAD(&eviction_list);
@@ -264,13 +264,13 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 
        trace_i915_gem_evict_node(vm, target, flags);
 
-       /* Retire before we search the active list. Although we have
+       /*
+        * Retire before we search the active list. Although we have
         * reasonable accuracy in our retirement lists, we may have
         * a stray pin (preventing eviction) that can only be resolved by
         * retiring.
         */
-       if (!(flags & PIN_NONBLOCK))
-               i915_retire_requests(vm->i915);
+       i915_retire_requests(vm->i915);
 
        if (i915_vm_has_cache_coloring(vm)) {
                /* Expand search to cover neighbouring guard pages (or lack!) */
index 7462d87f7a48c716948b4f6926c655fdb0735f10..082fcf9085a6ba5e4cd5454ef94b60085294035c 100644 (file)
@@ -2528,7 +2528,9 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (unlikely(ggtt->do_idle_maps)) {
-               if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
+               /* XXX This does not prevent more requests being submitted! */
+               if (i915_retire_requests_timeout(dev_priv,
+                                                -MAX_SCHEDULE_TIMEOUT)) {
                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
index 4ffe62a42186422a0160b243b39f1689d99d1693..52f7c4e5b6441672d46e4c662206504b6f81927d 100644 (file)
@@ -1508,13 +1508,19 @@ long i915_request_wait(struct i915_request *rq,
        return timeout;
 }
 
-bool i915_retire_requests(struct drm_i915_private *i915)
+long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout)
 {
        struct intel_gt_timelines *timelines = &i915->gt.timelines;
        struct intel_timeline *tl, *tn;
+       unsigned long active_count = 0;
        unsigned long flags;
+       bool interruptible;
        LIST_HEAD(free);
 
+       interruptible = true;
+       if (timeout < 0)
+               timeout = -timeout, interruptible = false;
+
        spin_lock_irqsave(&timelines->lock, flags);
        list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
                if (!mutex_trylock(&tl->mutex))
@@ -1525,13 +1531,27 @@ bool i915_retire_requests(struct drm_i915_private *i915)
                tl->active_count++; /* pin the list element */
                spin_unlock_irqrestore(&timelines->lock, flags);
 
+               if (timeout > 0) {
+                       struct dma_fence *fence;
+
+                       fence = i915_active_fence_get(&tl->last_request);
+                       if (fence) {
+                               timeout = dma_fence_wait_timeout(fence,
+                                                                interruptible,
+                                                                timeout);
+                               dma_fence_put(fence);
+                       }
+               }
+
                retire_requests(tl);
 
                spin_lock_irqsave(&timelines->lock, flags);
 
                /* Resume iteration after dropping lock */
                list_safe_reset_next(tl, tn, link);
-               if (!--tl->active_count)
+               if (--tl->active_count)
+                       active_count += !!rcu_access_pointer(tl->last_request.fence);
+               else
                        list_del(&tl->link);
 
                mutex_unlock(&tl->mutex);
@@ -1547,7 +1567,7 @@ bool i915_retire_requests(struct drm_i915_private *i915)
        list_for_each_entry_safe(tl, tn, &free, link)
                __intel_timeline_free(&tl->kref);
 
-       return !list_empty(&timelines->active_list);
+       return active_count ? timeout : 0;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
index 621fb33cda30d0120453c60c21aec9bb129b40c6..256b0715180f4a18d6b6fe642807d3eb442a0bf6 100644 (file)
@@ -310,7 +310,6 @@ long i915_request_wait(struct i915_request *rq,
 #define I915_WAIT_INTERRUPTIBLE        BIT(0)
 #define I915_WAIT_PRIORITY     BIT(1) /* small priority bump for the request */
 #define I915_WAIT_ALL          BIT(2) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
 
 static inline bool i915_request_signaled(const struct i915_request *rq)
 {
@@ -460,6 +459,10 @@ i915_request_active_timeline(struct i915_request *rq)
                                         lockdep_is_held(&rq->engine->active.lock));
 }
 
-bool i915_retire_requests(struct drm_i915_private *i915);
+long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout);
+static inline void i915_retire_requests(struct drm_i915_private *i915)
+{
+       i915_retire_requests_timeout(i915, 0);
+}
 
 #endif /* I915_REQUEST_H */
index 2a5fbe46ea9f5f580048e5d8d36623e3e08b9c60..ed496bd6d84fa41c0fd42d6eb44b174cae406f9c 100644 (file)
@@ -18,8 +18,7 @@ int igt_flush_test(struct drm_i915_private *i915)
 
        cond_resched();
 
-       i915_retire_requests(i915);
-       if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) {
+       if (i915_gem_wait_for_idle(i915, HZ / 5) == -ETIME) {
                pr_err("%pS timed out, cancelling all further testing.\n",
                       __builtin_return_address(0));
 
@@ -30,7 +29,6 @@ int igt_flush_test(struct drm_i915_private *i915)
                intel_gt_set_wedged(&i915->gt);
                ret = -EIO;
        }
-       i915_retire_requests(i915);
 
        return ret;
 }
index 04a6f88fdf64555de6ae7e3796f4c09e093f8757..eae90f97df6c72c259272953ad1a2e47980cd666 100644 (file)
@@ -23,9 +23,7 @@ int igt_live_test_begin(struct igt_live_test *t,
        t->func = func;
        t->name = name;
 
-       err = i915_gem_wait_for_idle(i915,
-                                    I915_WAIT_INTERRUPTIBLE,
-                                    MAX_SCHEDULE_TIMEOUT);
+       err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
        if (err) {
                pr_err("%s(%s): failed to idle before, with err=%d!",
                       func, name, err);
index a8be5da2b3cf946564933486fe0ceaa539152415..3b589bbb2c2d195b7c798301d90dc698375b5a24 100644 (file)
@@ -44,7 +44,7 @@ void mock_device_flush(struct drm_i915_private *i915)
        do {
                for_each_engine(engine, i915, id)
                        mock_engine_flush(engine);
-       } while (i915_retire_requests(i915));
+       } while (i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT));
 }
 
 static void mock_device_release(struct drm_device *dev)