]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/i915: Move list of timelines under its own lock
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 28 Jan 2019 10:23:56 +0000 (10:23 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 28 Jan 2019 16:24:22 +0000 (16:24 +0000)
Currently, the list of timelines is serialised by the struct_mutex, but
to alleviate difficulties with using that mutex in future, move the
list management under its own dedicated mutex.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190128102356.15037-5-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_reset.c
drivers/gpu/drm/i915/i915_timeline.c
drivers/gpu/drm/i915/i915_timeline.h
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/i915/selftests/mock_timeline.c

index 0133d1da3d3c4d3b4f6b13c6657db6ba9ac6b621..8a181b4551976cf600614e4af9b1de2e347e990a 100644 (file)
@@ -1975,7 +1975,10 @@ struct drm_i915_private {
                void (*resume)(struct drm_i915_private *);
                void (*cleanup_engine)(struct intel_engine_cs *engine);
 
-               struct list_head timelines;
+               struct i915_gt_timelines {
+                       struct mutex mutex; /* protects list, tainted by GPU */
+                       struct list_head list;
+               } timelines;
 
                struct list_head active_rings;
                struct list_head closed_vma;
index 15acd052da46f9e5b782854a2b82f3b18815d995..761714448ff344f27afd48d16ef7bfa1c2d7595c 100644 (file)
@@ -3222,33 +3222,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        return ret;
 }
 
-static long wait_for_timeline(struct i915_timeline *tl,
-                             unsigned int flags, long timeout)
-{
-       struct i915_request *rq;
-
-       rq = i915_gem_active_get_unlocked(&tl->last_request);
-       if (!rq)
-               return timeout;
-
-       /*
-        * "Race-to-idle".
-        *
-        * Switching to the kernel context is often used a synchronous
-        * step prior to idling, e.g. in suspend for flushing all
-        * current operations to memory before sleeping. These we
-        * want to complete as quickly as possible to avoid prolonged
-        * stalls, so allow the gpu to boost to maximum clocks.
-        */
-       if (flags & I915_WAIT_FOR_IDLE_BOOST)
-               gen6_rps_boost(rq, NULL);
-
-       timeout = i915_request_wait(rq, flags, timeout);
-       i915_request_put(rq);
-
-       return timeout;
-}
-
 static int wait_for_engines(struct drm_i915_private *i915)
 {
        if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
@@ -3262,6 +3235,52 @@ static int wait_for_engines(struct drm_i915_private *i915)
        return 0;
 }
 
+static long
+wait_for_timelines(struct drm_i915_private *i915,
+                  unsigned int flags, long timeout)
+{
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
+       struct i915_timeline *tl;
+
+       if (!READ_ONCE(i915->gt.active_requests))
+               return timeout;
+
+       mutex_lock(&gt->mutex);
+       list_for_each_entry(tl, &gt->list, link) {
+               struct i915_request *rq;
+
+               rq = i915_gem_active_get_unlocked(&tl->last_request);
+               if (!rq)
+                       continue;
+
+               mutex_unlock(&gt->mutex);
+
+               /*
+                * "Race-to-idle".
+                *
+                * Switching to the kernel context is often used a synchronous
+                * step prior to idling, e.g. in suspend for flushing all
+                * current operations to memory before sleeping. These we
+                * want to complete as quickly as possible to avoid prolonged
+                * stalls, so allow the gpu to boost to maximum clocks.
+                */
+               if (flags & I915_WAIT_FOR_IDLE_BOOST)
+                       gen6_rps_boost(rq, NULL);
+
+               timeout = i915_request_wait(rq, flags, timeout);
+               i915_request_put(rq);
+               if (timeout < 0)
+                       return timeout;
+
+               /* restart after reacquiring the lock */
+               mutex_lock(&gt->mutex);
+               tl = list_entry(&gt->list, typeof(*tl), link);
+       }
+       mutex_unlock(&gt->mutex);
+
+       return timeout;
+}
+
 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
                           unsigned int flags, long timeout)
 {
@@ -3273,17 +3292,15 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
        if (!READ_ONCE(i915->gt.awake))
                return 0;
 
+       timeout = wait_for_timelines(i915, flags, timeout);
+       if (timeout < 0)
+               return timeout;
+
        if (flags & I915_WAIT_LOCKED) {
-               struct i915_timeline *tl;
                int err;
 
                lockdep_assert_held(&i915->drm.struct_mutex);
 
-               list_for_each_entry(tl, &i915->gt.timelines, link) {
-                       timeout = wait_for_timeline(tl, flags, timeout);
-                       if (timeout < 0)
-                               return timeout;
-               }
                if (GEM_SHOW_DEBUG() && !timeout) {
                        /* Presume that timeout was non-zero to begin with! */
                        dev_warn(&i915->drm.pdev->dev,
@@ -3297,17 +3314,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
 
                i915_retire_requests(i915);
                GEM_BUG_ON(i915->gt.active_requests);
-       } else {
-               struct intel_engine_cs *engine;
-               enum intel_engine_id id;
-
-               for_each_engine(engine, i915, id) {
-                       struct i915_timeline *tl = &engine->timeline;
-
-                       timeout = wait_for_timeline(tl, flags, timeout);
-                       if (timeout < 0)
-                               return timeout;
-               }
        }
 
        return 0;
@@ -5008,6 +5014,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
                dev_priv->gt.cleanup_engine = intel_engine_cleanup;
        }
 
+       i915_timelines_init(dev_priv);
+
        ret = i915_gem_init_userptr(dev_priv);
        if (ret)
                return ret;
@@ -5130,8 +5138,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 err_uc_misc:
        intel_uc_fini_misc(dev_priv);
 
-       if (ret != -EIO)
+       if (ret != -EIO) {
                i915_gem_cleanup_userptr(dev_priv);
+               i915_timelines_fini(dev_priv);
+       }
 
        if (ret == -EIO) {
                mutex_lock(&dev_priv->drm.struct_mutex);
@@ -5182,6 +5192,7 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
 
        intel_uc_fini_misc(dev_priv);
        i915_gem_cleanup_userptr(dev_priv);
+       i915_timelines_fini(dev_priv);
 
        i915_gem_drain_freed_objects(dev_priv);
 
@@ -5284,7 +5295,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
        if (!dev_priv->priorities)
                goto err_dependencies;
 
-       INIT_LIST_HEAD(&dev_priv->gt.timelines);
        INIT_LIST_HEAD(&dev_priv->gt.active_rings);
        INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
 
@@ -5328,7 +5338,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
        GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
        GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
        WARN_ON(dev_priv->mm.object_count);
-       WARN_ON(!list_empty(&dev_priv->gt.timelines));
 
        kmem_cache_destroy(dev_priv->priorities);
        kmem_cache_destroy(dev_priv->dependencies);
index 99bd3bc336b30437d6e31d3f7ecb953c9ba8ce2e..d2dca85a543d48ddd4044bfc3dba4bc2a6a9245f 100644 (file)
@@ -854,7 +854,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
         *
         * No more can be submitted until we reset the wedged bit.
         */
-       list_for_each_entry(tl, &i915->gt.timelines, link) {
+       mutex_lock(&i915->gt.timelines.mutex);
+       list_for_each_entry(tl, &i915->gt.timelines.list, link) {
                struct i915_request *rq;
                long timeout;
 
@@ -876,9 +877,12 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
                timeout = dma_fence_default_wait(&rq->fence, true,
                                                 MAX_SCHEDULE_TIMEOUT);
                i915_request_put(rq);
-               if (timeout < 0)
+               if (timeout < 0) {
+                       mutex_unlock(&i915->gt.timelines.mutex);
                        goto unlock;
+               }
        }
+       mutex_unlock(&i915->gt.timelines.mutex);
 
        intel_engines_sanitize(i915, false);
 
index 4667cc08c416c16b8c9f8efbfb723d5064e75457..84550f17d3dffd9a5266f7d5b3e9f7b58e26500b 100644 (file)
@@ -13,7 +13,7 @@ void i915_timeline_init(struct drm_i915_private *i915,
                        struct i915_timeline *timeline,
                        const char *name)
 {
-       lockdep_assert_held(&i915->drm.struct_mutex);
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
 
        /*
         * Ideally we want a set of engines on a single leaf as we expect
@@ -23,9 +23,12 @@ void i915_timeline_init(struct drm_i915_private *i915,
         */
        BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
 
+       timeline->i915 = i915;
        timeline->name = name;
 
-       list_add(&timeline->link, &i915->gt.timelines);
+       mutex_lock(&gt->mutex);
+       list_add(&timeline->link, &gt->list);
+       mutex_unlock(&gt->mutex);
 
        /* Called during early_init before we know how many engines there are */
 
@@ -39,6 +42,17 @@ void i915_timeline_init(struct drm_i915_private *i915,
        i915_syncmap_init(&timeline->sync);
 }
 
+void i915_timelines_init(struct drm_i915_private *i915)
+{
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
+
+       mutex_init(&gt->mutex);
+       INIT_LIST_HEAD(&gt->list);
+
+       /* via i915_gem_wait_for_idle() */
+       i915_gem_shrinker_taints_mutex(i915, &gt->mutex);
+}
+
 /**
  * i915_timelines_park - called when the driver idles
  * @i915: the drm_i915_private device
@@ -51,11 +65,11 @@ void i915_timeline_init(struct drm_i915_private *i915,
  */
 void i915_timelines_park(struct drm_i915_private *i915)
 {
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
        struct i915_timeline *timeline;
 
-       lockdep_assert_held(&i915->drm.struct_mutex);
-
-       list_for_each_entry(timeline, &i915->gt.timelines, link) {
+       mutex_lock(&gt->mutex);
+       list_for_each_entry(timeline, &gt->list, link) {
                /*
                 * All known fences are completed so we can scrap
                 * the current sync point tracking and start afresh,
@@ -64,15 +78,20 @@ void i915_timelines_park(struct drm_i915_private *i915)
                 */
                i915_syncmap_free(&timeline->sync);
        }
+       mutex_unlock(&gt->mutex);
 }
 
 void i915_timeline_fini(struct i915_timeline *timeline)
 {
+       struct i915_gt_timelines *gt = &timeline->i915->gt.timelines;
+
        GEM_BUG_ON(!list_empty(&timeline->requests));
 
        i915_syncmap_free(&timeline->sync);
 
+       mutex_lock(&gt->mutex);
        list_del(&timeline->link);
+       mutex_unlock(&gt->mutex);
 }
 
 struct i915_timeline *
@@ -99,6 +118,15 @@ void __i915_timeline_free(struct kref *kref)
        kfree(timeline);
 }
 
+void i915_timelines_fini(struct drm_i915_private *i915)
+{
+       struct i915_gt_timelines *gt = &i915->gt.timelines;
+
+       GEM_BUG_ON(!list_empty(&gt->list));
+
+       mutex_destroy(&gt->mutex);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_timeline.c"
 #include "selftests/i915_timeline.c"
index 38c1e15e927a82297f5f9eb43bebed85eccf062b..87ad2dd31c202fd9a3a2653dc7e2067b8d86cdc2 100644 (file)
@@ -66,6 +66,7 @@ struct i915_timeline {
 
        struct list_head link;
        const char *name;
+       struct drm_i915_private *i915;
 
        struct kref kref;
 };
@@ -134,6 +135,8 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
        return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
 }
 
+void i915_timelines_init(struct drm_i915_private *i915);
 void i915_timelines_park(struct drm_i915_private *i915);
+void i915_timelines_fini(struct drm_i915_private *i915);
 
 #endif
index 8ab5a2688a0cdd0120989e78078780fc502ce7a1..14ae46fda49f1c816fa2d5bb3e88a847eddcd57d 100644 (file)
@@ -68,13 +68,14 @@ static void mock_device_release(struct drm_device *dev)
        i915_gem_contexts_fini(i915);
        mutex_unlock(&i915->drm.struct_mutex);
 
+       i915_timelines_fini(i915);
+
        drain_workqueue(i915->wq);
        i915_gem_drain_freed_objects(i915);
 
        mutex_lock(&i915->drm.struct_mutex);
        mock_fini_ggtt(&i915->ggtt);
        mutex_unlock(&i915->drm.struct_mutex);
-       WARN_ON(!list_empty(&i915->gt.timelines));
 
        destroy_workqueue(i915->wq);
 
@@ -226,7 +227,8 @@ struct drm_i915_private *mock_gem_device(void)
        if (!i915->priorities)
                goto err_dependencies;
 
-       INIT_LIST_HEAD(&i915->gt.timelines);
+       i915_timelines_init(i915);
+
        INIT_LIST_HEAD(&i915->gt.active_rings);
        INIT_LIST_HEAD(&i915->gt.closed_vma);
 
@@ -253,6 +255,7 @@ struct drm_i915_private *mock_gem_device(void)
        i915_gem_contexts_fini(i915);
 err_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
+       i915_timelines_fini(i915);
        kmem_cache_destroy(i915->priorities);
 err_dependencies:
        kmem_cache_destroy(i915->dependencies);
index dcf3b16f5a07c3780c6f19efdad99a5eb55b3aa2..cf39ccd9fc05e5b198b09077bd58aea646ffa52b 100644 (file)
@@ -10,6 +10,7 @@
 
 void mock_timeline_init(struct i915_timeline *timeline, u64 context)
 {
+       timeline->i915 = NULL;
        timeline->fence_context = context;
 
        spin_lock_init(&timeline->lock);
@@ -24,5 +25,5 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
 
 void mock_timeline_fini(struct i915_timeline *timeline)
 {
-       i915_timeline_fini(timeline);
+       i915_syncmap_free(&timeline->sync);
 }