]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/i915/gem/i915_gem_context.c
drm/i915: Fix up the inverse mapping for default ctx->engines[]
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
index 628673d1d7f85d1a2982586e5c8f7e4d0403e5d8..b407baaf0014237af5dfcfdb607522b15ea6d5af 100644 (file)
@@ -70,6 +70,7 @@
 #include <drm/i915_drm.h>
 
 #include "gt/intel_lrc_reg.h"
+#include "gt/intel_engine_user.h"
 
 #include "i915_gem_context.h"
 #include "i915_globals.h"
@@ -158,7 +159,7 @@ lookup_user_engine(struct i915_gem_context *ctx,
                if (!engine)
                        return ERR_PTR(-EINVAL);
 
-               idx = engine->id;
+               idx = engine->legacy_idx;
        } else {
                idx = ci->engine_instance;
        }
@@ -278,6 +279,7 @@ static void free_engines_rcu(struct rcu_head *rcu)
 
 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
 {
+       const struct intel_gt *gt = &ctx->i915->gt;
        struct intel_engine_cs *engine;
        struct i915_gem_engines *e;
        enum intel_engine_id id;
@@ -287,7 +289,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
                return ERR_PTR(-ENOMEM);
 
        init_rcu_head(&e->rcu);
-       for_each_engine(engine, ctx->i915, id) {
+       for_each_engine(engine, gt, id) {
                struct intel_context *ce;
 
                ce = intel_context_create(ctx, engine);
@@ -297,8 +299,8 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
                }
 
                e->engines[id] = ce;
+               e->num_engines = id + 1;
        }
-       e->num_engines = id;
 
        return e;
 }
@@ -397,30 +399,6 @@ static void context_close(struct i915_gem_context *ctx)
        i915_gem_context_put(ctx);
 }
 
-static u32 default_desc_template(const struct drm_i915_private *i915,
-                                const struct i915_address_space *vm)
-{
-       u32 address_mode;
-       u32 desc;
-
-       desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
-
-       address_mode = INTEL_LEGACY_32B_CONTEXT;
-       if (vm && i915_vm_is_4lvl(vm))
-               address_mode = INTEL_LEGACY_64B_CONTEXT;
-       desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
-       if (IS_GEN(i915, 8))
-               desc |= GEN8_CTX_L3LLC_COHERENT;
-
-       /* TODO: WaDisableLiteRestore when we start using semaphore
-        * signalling between Command Streamers
-        * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
-        */
-
-       return desc;
-}
-
 static struct i915_gem_context *
 __create_context(struct drm_i915_private *i915)
 {
@@ -459,8 +437,6 @@ __create_context(struct drm_i915_private *i915)
        i915_gem_context_set_recoverable(ctx);
 
        ctx->ring_size = 4 * PAGE_SIZE;
-       ctx->desc_template =
-               default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm);
 
        for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
                ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -476,9 +452,18 @@ static struct i915_address_space *
 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
 {
        struct i915_address_space *old = ctx->vm;
+       struct i915_gem_engines_iter it;
+       struct intel_context *ce;
+
+       GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
 
        ctx->vm = i915_vm_get(vm);
-       ctx->desc_template = default_desc_template(ctx->i915, vm);
+
+       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+               i915_vm_put(ce->vm);
+               ce->vm = i915_vm_get(vm);
+       }
+       i915_gem_context_unlock_engines(ctx);
 
        return old;
 }
@@ -644,20 +629,13 @@ static void init_contexts(struct drm_i915_private *i915)
        init_llist_head(&i915->contexts.free_list);
 }
 
-static bool needs_preempt_context(struct drm_i915_private *i915)
-{
-       return USES_GUC_SUBMISSION(i915);
-}
-
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 {
        struct i915_gem_context *ctx;
 
        /* Reassure ourselves we are only called once */
        GEM_BUG_ON(dev_priv->kernel_context);
-       GEM_BUG_ON(dev_priv->preempt_context);
 
-       intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
        init_contexts(dev_priv);
 
        /* lowest priority; idle task */
@@ -677,15 +655,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
        GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
        dev_priv->kernel_context = ctx;
 
-       /* highest priority; preempting task */
-       if (needs_preempt_context(dev_priv)) {
-               ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
-               if (!IS_ERR(ctx))
-                       dev_priv->preempt_context = ctx;
-               else
-                       DRM_ERROR("Failed to create preempt context; disabling preemption\n");
-       }
-
        DRM_DEBUG_DRIVER("%s context support initialized\n",
                         DRIVER_CAPS(dev_priv)->has_logical_contexts ?
                         "logical" : "fake");
@@ -696,8 +665,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
 {
        lockdep_assert_held(&i915->drm.struct_mutex);
 
-       if (i915->preempt_context)
-               destroy_kernel_context(&i915->preempt_context);
        destroy_kernel_context(&i915->kernel_context);
 
        /* Must free all deferred contexts (via flush_workqueue) first */
@@ -923,8 +890,12 @@ static int context_barrier_task(struct i915_gem_context *ctx,
        if (!cb)
                return -ENOMEM;
 
-       i915_active_init(i915, &cb->base, cb_retire);
-       i915_active_acquire(&cb->base);
+       i915_active_init(i915, &cb->base, NULL, cb_retire);
+       err = i915_active_acquire(&cb->base);
+       if (err) {
+               kfree(cb);
+               return err;
+       }
 
        for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
                struct i915_request *rq;
@@ -1019,7 +990,7 @@ static void set_ppgtt_barrier(void *data)
 
 static int emit_ppgtt_update(struct i915_request *rq, void *data)
 {
-       struct i915_address_space *vm = rq->gem_context->vm;
+       struct i915_address_space *vm = rq->hw_context->vm;
        struct intel_engine_cs *engine = rq->engine;
        u32 base = engine->mmio_base;
        u32 *cs;
@@ -1128,9 +1099,8 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
                                   set_ppgtt_barrier,
                                   old);
        if (err) {
-               ctx->vm = old;
-               ctx->desc_template = default_desc_template(ctx->i915, old);
-               i915_vm_put(vm);
+               i915_vm_put(__set_ppgtt(ctx, old));
+               i915_vm_put(old);
        }
 
 unlock:
@@ -1187,26 +1157,11 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
-       /* Queue this switch after all other activity by this context. */
-       ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
-       if (ret)
-               goto out_add;
-
-       /*
-        * Guarantee context image and the timeline remains pinned until the
-        * modifying request is retired by setting the ce activity tracker.
-        *
-        * But we only need to take one pin on the account of it. Or in other
-        * words transfer the pinned ce object to tracked active request.
-        */
-       GEM_BUG_ON(i915_active_is_idle(&ce->active));
-       ret = i915_active_ref(&ce->active, rq->fence.context, rq);
-       if (ret)
-               goto out_add;
-
-       ret = gen8_emit_rpcs_config(rq, ce, sseu);
+       /* Serialise with the remote context */
+       ret = intel_context_prepare_remote_request(ce, rq);
+       if (ret == 0)
+               ret = gen8_emit_rpcs_config(rq, ce, sseu);
 
-out_add:
        i915_request_add(rq);
        return ret;
 }
@@ -1217,7 +1172,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
 {
        int ret;
 
-       GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
+       GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
 
        ret = intel_context_lock_pinned(ce);
        if (ret)
@@ -1239,7 +1194,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
 static int
 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
 {
-       struct drm_i915_private *i915 = ce->gem_context->i915;
+       struct drm_i915_private *i915 = ce->engine->i915;
        int ret;
 
        ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
@@ -1776,7 +1731,7 @@ get_engines(struct i915_gem_context *ctx,
 
                if (e->engines[n]) {
                        ci.engine_class = e->engines[n]->engine->uabi_class;
-                       ci.engine_instance = e->engines[n]->engine->instance;
+                       ci.engine_instance = e->engines[n]->engine->uabi_instance;
                }
 
                if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
@@ -2141,7 +2096,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
                return -EINVAL;
 
-       ret = i915_terminally_wedged(i915);
+       ret = intel_gt_terminally_wedged(&i915->gt);
        if (ret)
                return ret;
 
@@ -2287,8 +2242,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                args->size = 0;
                if (ctx->vm)
                        args->value = ctx->vm->total;
-               else if (to_i915(dev)->mm.aliasing_ppgtt)
-                       args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
+               else if (to_i915(dev)->ggtt.alias)
+                       args->value = to_i915(dev)->ggtt.alias->vm.total;
                else
                        args->value = to_i915(dev)->ggtt.vm.total;
                break;