]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/i915/gem/i915_gem_context.c
drm/i915: Fix up the inverse mapping for default ctx->engines[]
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
index 08721ef62e4e134417c0c8dc20ce985b6be6e5a0..b407baaf0014237af5dfcfdb607522b15ea6d5af 100644 (file)
@@ -70,6 +70,7 @@
 #include <drm/i915_drm.h>
 
 #include "gt/intel_lrc_reg.h"
+#include "gt/intel_engine_user.h"
 
 #include "i915_gem_context.h"
 #include "i915_globals.h"
@@ -95,24 +96,45 @@ void i915_lut_handle_free(struct i915_lut_handle *lut)
 
 static void lut_close(struct i915_gem_context *ctx)
 {
-       struct i915_lut_handle *lut, *ln;
        struct radix_tree_iter iter;
        void __rcu **slot;
 
-       list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
-               list_del(&lut->obj_link);
-               i915_lut_handle_free(lut);
-       }
-       INIT_LIST_HEAD(&ctx->handles_list);
+       lockdep_assert_held(&ctx->mutex);
 
        rcu_read_lock();
        radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
                struct i915_vma *vma = rcu_dereference_raw(*slot);
+               struct drm_i915_gem_object *obj = vma->obj;
+               struct i915_lut_handle *lut;
+
+               if (!kref_get_unless_zero(&obj->base.refcount))
+                       continue;
 
-               radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
+               rcu_read_unlock();
+               i915_gem_object_lock(obj);
+               list_for_each_entry(lut, &obj->lut_list, obj_link) {
+                       if (lut->ctx != ctx)
+                               continue;
 
-               vma->open_count--;
-               i915_vma_put(vma);
+                       if (lut->handle != iter.index)
+                               continue;
+
+                       list_del(&lut->obj_link);
+                       break;
+               }
+               i915_gem_object_unlock(obj);
+               rcu_read_lock();
+
+               if (&lut->obj_link != &obj->lut_list) {
+                       i915_lut_handle_free(lut);
+                       radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
+                       if (atomic_dec_and_test(&vma->open_count) &&
+                           !i915_vma_is_ggtt(vma))
+                               i915_vma_close(vma);
+                       i915_gem_object_put(obj);
+               }
+
+               i915_gem_object_put(obj);
        }
        rcu_read_unlock();
 }
@@ -137,7 +159,7 @@ lookup_user_engine(struct i915_gem_context *ctx,
                if (!engine)
                        return ERR_PTR(-EINVAL);
 
-               idx = engine->id;
+               idx = engine->legacy_idx;
        } else {
                idx = ci->engine_instance;
        }
@@ -250,19 +272,14 @@ static void free_engines(struct i915_gem_engines *e)
        __free_engines(e, e->num_engines);
 }
 
-static void free_engines_rcu(struct work_struct *wrk)
+static void free_engines_rcu(struct rcu_head *rcu)
 {
-       struct i915_gem_engines *e =
-               container_of(wrk, struct i915_gem_engines, rcu.work);
-       struct drm_i915_private *i915 = e->i915;
-
-       mutex_lock(&i915->drm.struct_mutex);
-       free_engines(e);
-       mutex_unlock(&i915->drm.struct_mutex);
+       free_engines(container_of(rcu, struct i915_gem_engines, rcu));
 }
 
 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
 {
+       const struct intel_gt *gt = &ctx->i915->gt;
        struct intel_engine_cs *engine;
        struct i915_gem_engines *e;
        enum intel_engine_id id;
@@ -271,8 +288,8 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
        if (!e)
                return ERR_PTR(-ENOMEM);
 
-       e->i915 = ctx->i915;
-       for_each_engine(engine, ctx->i915, id) {
+       init_rcu_head(&e->rcu);
+       for_each_engine(engine, gt, id) {
                struct intel_context *ce;
 
                ce = intel_context_create(ctx, engine);
@@ -282,8 +299,8 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
                }
 
                e->engines[id] = ce;
+               e->num_engines = id + 1;
        }
-       e->num_engines = id;
 
        return e;
 }
@@ -294,13 +311,14 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
        release_hw_id(ctx);
-       i915_ppgtt_put(ctx->ppgtt);
+       if (ctx->vm)
+               i915_vm_put(ctx->vm);
 
        free_engines(rcu_access_pointer(ctx->engines));
        mutex_destroy(&ctx->engines_mutex);
 
        if (ctx->timeline)
-               i915_timeline_put(ctx->timeline);
+               intel_timeline_put(ctx->timeline);
 
        kfree(ctx->name);
        put_pid(ctx->pid);
@@ -359,7 +377,10 @@ void i915_gem_context_release(struct kref *ref)
 
 static void context_close(struct i915_gem_context *ctx)
 {
+       mutex_lock(&ctx->mutex);
+
        i915_gem_context_set_closed(ctx);
+       ctx->file_priv = ERR_PTR(-EBADF);
 
        /*
         * This context will never again be assinged to HW, so we can
@@ -374,36 +395,12 @@ static void context_close(struct i915_gem_context *ctx)
         */
        lut_close(ctx);
 
-       ctx->file_priv = ERR_PTR(-EBADF);
+       mutex_unlock(&ctx->mutex);
        i915_gem_context_put(ctx);
 }
 
-static u32 default_desc_template(const struct drm_i915_private *i915,
-                                const struct i915_hw_ppgtt *ppgtt)
-{
-       u32 address_mode;
-       u32 desc;
-
-       desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
-
-       address_mode = INTEL_LEGACY_32B_CONTEXT;
-       if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
-               address_mode = INTEL_LEGACY_64B_CONTEXT;
-       desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
-       if (IS_GEN(i915, 8))
-               desc |= GEN8_CTX_L3LLC_COHERENT;
-
-       /* TODO: WaDisableLiteRestore when we start using semaphore
-        * signalling between Command Streamers
-        * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
-        */
-
-       return desc;
-}
-
 static struct i915_gem_context *
-__create_context(struct drm_i915_private *dev_priv)
+__create_context(struct drm_i915_private *i915)
 {
        struct i915_gem_context *ctx;
        struct i915_gem_engines *e;
@@ -415,8 +412,8 @@ __create_context(struct drm_i915_private *dev_priv)
                return ERR_PTR(-ENOMEM);
 
        kref_init(&ctx->ref);
-       list_add_tail(&ctx->link, &dev_priv->contexts.list);
-       ctx->i915 = dev_priv;
+       list_add_tail(&ctx->link, &i915->contexts.list);
+       ctx->i915 = i915;
        ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
        mutex_init(&ctx->mutex);
 
@@ -429,20 +426,17 @@ __create_context(struct drm_i915_private *dev_priv)
        RCU_INIT_POINTER(ctx->engines, e);
 
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
-       INIT_LIST_HEAD(&ctx->handles_list);
        INIT_LIST_HEAD(&ctx->hw_id_link);
 
        /* NB: Mark all slices as needing a remap so that when the context first
         * loads it will restore whatever remap state already exists. If there
         * is no remap info, it will be a NOP. */
-       ctx->remap_slice = ALL_L3_SLICES(dev_priv);
+       ctx->remap_slice = ALL_L3_SLICES(i915);
 
        i915_gem_context_set_bannable(ctx);
        i915_gem_context_set_recoverable(ctx);
 
        ctx->ring_size = 4 * PAGE_SIZE;
-       ctx->desc_template =
-               default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
 
        for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
                ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -454,26 +448,35 @@ __create_context(struct drm_i915_private *dev_priv)
        return ERR_PTR(err);
 }
 
-static struct i915_hw_ppgtt *
-__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
+static struct i915_address_space *
+__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
 {
-       struct i915_hw_ppgtt *old = ctx->ppgtt;
+       struct i915_address_space *old = ctx->vm;
+       struct i915_gem_engines_iter it;
+       struct intel_context *ce;
 
-       ctx->ppgtt = i915_ppgtt_get(ppgtt);
-       ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
+       GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
+
+       ctx->vm = i915_vm_get(vm);
+
+       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+               i915_vm_put(ce->vm);
+               ce->vm = i915_vm_get(vm);
+       }
+       i915_gem_context_unlock_engines(ctx);
 
        return old;
 }
 
 static void __assign_ppgtt(struct i915_gem_context *ctx,
-                          struct i915_hw_ppgtt *ppgtt)
+                          struct i915_address_space *vm)
 {
-       if (ppgtt == ctx->ppgtt)
+       if (vm == ctx->vm)
                return;
 
-       ppgtt = __set_ppgtt(ctx, ppgtt);
-       if (ppgtt)
-               i915_ppgtt_put(ppgtt);
+       vm = __set_ppgtt(ctx, vm);
+       if (vm)
+               i915_vm_put(vm);
 }
 
 static struct i915_gem_context *
@@ -495,7 +498,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
                return ctx;
 
        if (HAS_FULL_PPGTT(dev_priv)) {
-               struct i915_hw_ppgtt *ppgtt;
+               struct i915_ppgtt *ppgtt;
 
                ppgtt = i915_ppgtt_create(dev_priv);
                if (IS_ERR(ppgtt)) {
@@ -505,14 +508,14 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
                        return ERR_CAST(ppgtt);
                }
 
-               __assign_ppgtt(ctx, ppgtt);
-               i915_ppgtt_put(ppgtt);
+               __assign_ppgtt(ctx, &ppgtt->vm);
+               i915_vm_put(&ppgtt->vm);
        }
 
        if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
-               struct i915_timeline *timeline;
+               struct intel_timeline *timeline;
 
-               timeline = i915_timeline_create(dev_priv, NULL);
+               timeline = intel_timeline_create(&dev_priv->gt, NULL);
                if (IS_ERR(timeline)) {
                        context_close(ctx);
                        return ERR_CAST(timeline);
@@ -626,20 +629,13 @@ static void init_contexts(struct drm_i915_private *i915)
        init_llist_head(&i915->contexts.free_list);
 }
 
-static bool needs_preempt_context(struct drm_i915_private *i915)
-{
-       return HAS_EXECLISTS(i915);
-}
-
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 {
        struct i915_gem_context *ctx;
 
        /* Reassure ourselves we are only called once */
        GEM_BUG_ON(dev_priv->kernel_context);
-       GEM_BUG_ON(dev_priv->preempt_context);
 
-       intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
        init_contexts(dev_priv);
 
        /* lowest priority; idle task */
@@ -659,38 +655,16 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
        GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
        dev_priv->kernel_context = ctx;
 
-       /* highest priority; preempting task */
-       if (needs_preempt_context(dev_priv)) {
-               ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
-               if (!IS_ERR(ctx))
-                       dev_priv->preempt_context = ctx;
-               else
-                       DRM_ERROR("Failed to create preempt context; disabling preemption\n");
-       }
-
        DRM_DEBUG_DRIVER("%s context support initialized\n",
                         DRIVER_CAPS(dev_priv)->has_logical_contexts ?
                         "logical" : "fake");
        return 0;
 }
 
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-       for_each_engine(engine, dev_priv, id)
-               intel_engine_lost_context(engine);
-}
-
 void i915_gem_contexts_fini(struct drm_i915_private *i915)
 {
        lockdep_assert_held(&i915->drm.struct_mutex);
 
-       if (i915->preempt_context)
-               destroy_kernel_context(&i915->preempt_context);
        destroy_kernel_context(&i915->kernel_context);
 
        /* Must free all deferred contexts (via flush_workqueue) first */
@@ -706,7 +680,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
 
 static int vm_idr_cleanup(int id, void *p, void *data)
 {
-       i915_ppgtt_put(p);
+       i915_vm_put(p);
        return 0;
 }
 
@@ -716,8 +690,8 @@ static int gem_context_register(struct i915_gem_context *ctx,
        int ret;
 
        ctx->file_priv = fpriv;
-       if (ctx->ppgtt)
-               ctx->ppgtt->vm.file = fpriv;
+       if (ctx->vm)
+               ctx->vm->file = fpriv;
 
        ctx->pid = get_task_pid(current, PIDTYPE_PID);
        ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
@@ -772,9 +746,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
        return 0;
 
 err_ctx:
-       mutex_lock(&i915->drm.struct_mutex);
        context_close(ctx);
-       mutex_unlock(&i915->drm.struct_mutex);
 err:
        idr_destroy(&file_priv->vm_idr);
        idr_destroy(&file_priv->context_idr);
@@ -787,8 +759,6 @@ void i915_gem_context_close(struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
-       lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
-
        idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
        idr_destroy(&file_priv->context_idr);
        mutex_destroy(&file_priv->context_idr_lock);
@@ -804,7 +774,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_private *i915 = to_i915(dev);
        struct drm_i915_gem_vm_control *args = data;
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct i915_hw_ppgtt *ppgtt;
+       struct i915_ppgtt *ppgtt;
        int err;
 
        if (!HAS_FULL_PPGTT(i915))
@@ -831,7 +801,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
        if (err)
                goto err_put;
 
-       err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+       err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
        if (err < 0)
                goto err_unlock;
 
@@ -845,7 +815,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
 err_unlock:
        mutex_unlock(&file_priv->vm_idr_lock);
 err_put:
-       i915_ppgtt_put(ppgtt);
+       i915_vm_put(&ppgtt->vm);
        return err;
 }
 
@@ -854,7 +824,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct drm_i915_gem_vm_control *args = data;
-       struct i915_hw_ppgtt *ppgtt;
+       struct i915_address_space *vm;
        int err;
        u32 id;
 
@@ -872,13 +842,13 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
        if (err)
                return err;
 
-       ppgtt = idr_remove(&file_priv->vm_idr, id);
+       vm = idr_remove(&file_priv->vm_idr, id);
 
        mutex_unlock(&file_priv->vm_idr_lock);
-       if (!ppgtt)
+       if (!vm)
                return -ENOENT;
 
-       i915_ppgtt_put(ppgtt);
+       i915_vm_put(vm);
        return 0;
 }
 
@@ -902,6 +872,7 @@ static void cb_retire(struct i915_active *base)
 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
 static int context_barrier_task(struct i915_gem_context *ctx,
                                intel_engine_mask_t engines,
+                               bool (*skip)(struct intel_context *ce, void *data),
                                int (*emit)(struct i915_request *rq, void *data),
                                void (*task)(void *data),
                                void *data)
@@ -919,8 +890,12 @@ static int context_barrier_task(struct i915_gem_context *ctx,
        if (!cb)
                return -ENOMEM;
 
-       i915_active_init(i915, &cb->base, cb_retire);
-       i915_active_acquire(&cb->base);
+       i915_active_init(i915, &cb->base, NULL, cb_retire);
+       err = i915_active_acquire(&cb->base);
+       if (err) {
+               kfree(cb);
+               return err;
+       }
 
        for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
                struct i915_request *rq;
@@ -931,7 +906,10 @@ static int context_barrier_task(struct i915_gem_context *ctx,
                        break;
                }
 
-               if (!(ce->engine->mask & engines) || !ce->state)
+               if (!(ce->engine->mask & engines))
+                       continue;
+
+               if (skip && skip(ce, data))
                        continue;
 
                rq = intel_context_create_request(ce);
@@ -964,10 +942,10 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
                     struct i915_gem_context *ctx,
                     struct drm_i915_gem_context_param *args)
 {
-       struct i915_hw_ppgtt *ppgtt;
+       struct i915_address_space *vm;
        int ret;
 
-       if (!ctx->ppgtt)
+       if (!ctx->vm)
                return -ENODEV;
 
        /* XXX rcu acquire? */
@@ -975,19 +953,19 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
        if (ret)
                return ret;
 
-       ppgtt = i915_ppgtt_get(ctx->ppgtt);
+       vm = i915_vm_get(ctx->vm);
        mutex_unlock(&ctx->i915->drm.struct_mutex);
 
        ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
        if (ret)
                goto err_put;
 
-       ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+       ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
        GEM_BUG_ON(!ret);
        if (ret < 0)
                goto err_unlock;
 
-       i915_ppgtt_get(ppgtt);
+       i915_vm_get(vm);
 
        args->size = 0;
        args->value = ret;
@@ -996,30 +974,31 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
 err_unlock:
        mutex_unlock(&file_priv->vm_idr_lock);
 err_put:
-       i915_ppgtt_put(ppgtt);
+       i915_vm_put(vm);
        return ret;
 }
 
 static void set_ppgtt_barrier(void *data)
 {
-       struct i915_hw_ppgtt *old = data;
+       struct i915_address_space *old = data;
 
-       if (INTEL_GEN(old->vm.i915) < 8)
-               gen6_ppgtt_unpin_all(old);
+       if (INTEL_GEN(old->i915) < 8)
+               gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
 
-       i915_ppgtt_put(old);
+       i915_vm_put(old);
 }
 
 static int emit_ppgtt_update(struct i915_request *rq, void *data)
 {
-       struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
+       struct i915_address_space *vm = rq->hw_context->vm;
        struct intel_engine_cs *engine = rq->engine;
        u32 base = engine->mmio_base;
        u32 *cs;
        int i;
 
-       if (i915_vm_is_4lvl(&ppgtt->vm)) {
-               const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
+       if (i915_vm_is_4lvl(vm)) {
+               struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+               const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
 
                cs = intel_ring_begin(rq, 6);
                if (IS_ERR(cs))
@@ -1035,6 +1014,8 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
                *cs++ = MI_NOOP;
                intel_ring_advance(rq, cs);
        } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
+               struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
                cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
                if (IS_ERR(cs))
                        return PTR_ERR(cs);
@@ -1052,23 +1033,31 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
                intel_ring_advance(rq, cs);
        } else {
                /* ppGTT is not part of the legacy context image */
-               gen6_ppgtt_pin(ppgtt);
+               gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
        }
 
        return 0;
 }
 
+static bool skip_ppgtt_update(struct intel_context *ce, void *data)
+{
+       if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
+               return !ce->state;
+       else
+               return !atomic_read(&ce->pin_count);
+}
+
 static int set_ppgtt(struct drm_i915_file_private *file_priv,
                     struct i915_gem_context *ctx,
                     struct drm_i915_gem_context_param *args)
 {
-       struct i915_hw_ppgtt *ppgtt, *old;
+       struct i915_address_space *vm, *old;
        int err;
 
        if (args->size)
                return -EINVAL;
 
-       if (!ctx->ppgtt)
+       if (!ctx->vm)
                return -ENODEV;
 
        if (upper_32_bits(args->value))
@@ -1078,24 +1067,26 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
        if (err)
                return err;
 
-       ppgtt = idr_find(&file_priv->vm_idr, args->value);
-       if (ppgtt)
-               i915_ppgtt_get(ppgtt);
+       vm = idr_find(&file_priv->vm_idr, args->value);
+       if (vm)
+               i915_vm_get(vm);
        mutex_unlock(&file_priv->vm_idr_lock);
-       if (!ppgtt)
+       if (!vm)
                return -ENOENT;
 
        err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
        if (err)
                goto out;
 
-       if (ppgtt == ctx->ppgtt)
+       if (vm == ctx->vm)
                goto unlock;
 
        /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
+       mutex_lock(&ctx->mutex);
        lut_close(ctx);
+       mutex_unlock(&ctx->mutex);
 
-       old = __set_ppgtt(ctx, ppgtt);
+       old = __set_ppgtt(ctx, vm);
 
        /*
         * We need to flush any requests using the current ppgtt before
@@ -1103,20 +1094,20 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
         * only indirectly through the context.
         */
        err = context_barrier_task(ctx, ALL_ENGINES,
+                                  skip_ppgtt_update,
                                   emit_ppgtt_update,
                                   set_ppgtt_barrier,
                                   old);
        if (err) {
-               ctx->ppgtt = old;
-               ctx->desc_template = default_desc_template(ctx->i915, old);
-               i915_ppgtt_put(ppgtt);
+               i915_vm_put(__set_ppgtt(ctx, old));
+               i915_vm_put(old);
        }
 
 unlock:
        mutex_unlock(&ctx->i915->drm.struct_mutex);
 
 out:
-       i915_ppgtt_put(ppgtt);
+       i915_vm_put(vm);
        return err;
 }
 
@@ -1166,27 +1157,11 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
-       /* Queue this switch after all other activity by this context. */
-       ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
-       if (ret)
-               goto out_add;
+       /* Serialise with the remote context */
+       ret = intel_context_prepare_remote_request(ce, rq);
+       if (ret == 0)
+               ret = gen8_emit_rpcs_config(rq, ce, sseu);
 
-       ret = gen8_emit_rpcs_config(rq, ce, sseu);
-       if (ret)
-               goto out_add;
-
-       /*
-        * Guarantee context image and the timeline remains pinned until the
-        * modifying request is retired by setting the ce activity tracker.
-        *
-        * But we only need to take one pin on the account of it. Or in other
-        * words transfer the pinned ce object to tracked active request.
-        */
-       if (!i915_active_request_isset(&ce->active_tracker))
-               __intel_context_pin(ce);
-       __i915_active_request_set(&ce->active_tracker, rq);
-
-out_add:
        i915_request_add(rq);
        return ret;
 }
@@ -1197,7 +1172,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
 {
        int ret;
 
-       GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
+       GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
 
        ret = intel_context_lock_pinned(ce);
        if (ret)
@@ -1219,7 +1194,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
 static int
 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
 {
-       struct drm_i915_private *i915 = ce->gem_context->i915;
+       struct drm_i915_private *i915 = ce->engine->i915;
        int ret;
 
        ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
@@ -1612,7 +1587,7 @@ set_engines(struct i915_gem_context *ctx,
        if (!set.engines)
                return -ENOMEM;
 
-       set.engines->i915 = ctx->i915;
+       init_rcu_head(&set.engines->rcu);
        for (n = 0; n < num_engines; n++) {
                struct i915_engine_class_instance ci;
                struct intel_engine_cs *engine;
@@ -1666,8 +1641,7 @@ set_engines(struct i915_gem_context *ctx,
        rcu_swap_protected(ctx->engines, set.engines, 1);
        mutex_unlock(&ctx->engines_mutex);
 
-       INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu);
-       queue_rcu_work(system_wq, &set.engines->rcu);
+       call_rcu(&set.engines->rcu, free_engines_rcu);
 
        return 0;
 }
@@ -1682,7 +1656,7 @@ __copy_engines(struct i915_gem_engines *e)
        if (!copy)
                return ERR_PTR(-ENOMEM);
 
-       copy->i915 = e->i915;
+       init_rcu_head(&copy->rcu);
        for (n = 0; n < e->num_engines; n++) {
                if (e->engines[n])
                        copy->engines[n] = intel_context_get(e->engines[n]);
@@ -1757,7 +1731,7 @@ get_engines(struct i915_gem_context *ctx,
 
                if (e->engines[n]) {
                        ci.engine_class = e->engines[n]->engine->uabi_class;
-                       ci.engine_instance = e->engines[n]->engine->instance;
+                       ci.engine_instance = e->engines[n]->engine->uabi_instance;
                }
 
                if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
@@ -1769,8 +1743,7 @@ get_engines(struct i915_gem_context *ctx,
        args->size = size;
 
 err_free:
-       INIT_RCU_WORK(&e->rcu, free_engines_rcu);
-       queue_rcu_work(system_wq, &e->rcu);
+       free_engines(e);
        return err;
 }
 
@@ -1891,7 +1864,7 @@ static int clone_engines(struct i915_gem_context *dst,
        if (!clone)
                goto err_unlock;
 
-       clone->i915 = dst->i915;
+       init_rcu_head(&clone->rcu);
        for (n = 0; n < e->num_engines; n++) {
                struct intel_engine_cs *engine;
 
@@ -1997,8 +1970,8 @@ static int clone_timeline(struct i915_gem_context *dst,
                GEM_BUG_ON(src->timeline == dst->timeline);
 
                if (dst->timeline)
-                       i915_timeline_put(dst->timeline);
-               dst->timeline = i915_timeline_get(src->timeline);
+                       intel_timeline_put(dst->timeline);
+               dst->timeline = intel_timeline_get(src->timeline);
        }
 
        return 0;
@@ -2007,15 +1980,15 @@ static int clone_timeline(struct i915_gem_context *dst,
 static int clone_vm(struct i915_gem_context *dst,
                    struct i915_gem_context *src)
 {
-       struct i915_hw_ppgtt *ppgtt;
+       struct i915_address_space *vm;
 
        rcu_read_lock();
        do {
-               ppgtt = READ_ONCE(src->ppgtt);
-               if (!ppgtt)
+               vm = READ_ONCE(src->vm);
+               if (!vm)
                        break;
 
-               if (!kref_get_unless_zero(&ppgtt->ref))
+               if (!kref_get_unless_zero(&vm->ref))
                        continue;
 
                /*
@@ -2033,16 +2006,16 @@ static int clone_vm(struct i915_gem_context *dst,
                 * it cannot be reallocated elsewhere.
                 */
 
-               if (ppgtt == READ_ONCE(src->ppgtt))
+               if (vm == READ_ONCE(src->vm))
                        break;
 
-               i915_ppgtt_put(ppgtt);
+               i915_vm_put(vm);
        } while (1);
        rcu_read_unlock();
 
-       if (ppgtt) {
-               __assign_ppgtt(dst, ppgtt);
-               i915_ppgtt_put(ppgtt);
+       if (vm) {
+               __assign_ppgtt(dst, vm);
+               i915_vm_put(vm);
        }
 
        return 0;
@@ -2123,7 +2096,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
                return -EINVAL;
 
-       ret = i915_terminally_wedged(i915);
+       ret = intel_gt_terminally_wedged(&i915->gt);
        if (ret)
                return ret;
 
@@ -2163,9 +2136,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        return 0;
 
 err_ctx:
-       mutex_lock(&dev->struct_mutex);
        context_close(ext_data.ctx);
-       mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
@@ -2190,10 +2161,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
        if (!ctx)
                return -ENOENT;
 
-       mutex_lock(&dev->struct_mutex);
        context_close(ctx);
-       mutex_unlock(&dev->struct_mutex);
-
        return 0;
 }
 
@@ -2272,10 +2240,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 
        case I915_CONTEXT_PARAM_GTT_SIZE:
                args->size = 0;
-               if (ctx->ppgtt)
-                       args->value = ctx->ppgtt->vm.total;
-               else if (to_i915(dev)->mm.aliasing_ppgtt)
-                       args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
+               if (ctx->vm)
+                       args->value = ctx->vm->total;
+               else if (to_i915(dev)->ggtt.alias)
+                       args->value = to_i915(dev)->ggtt.alias->vm.total;
                else
                        args->value = to_i915(dev)->ggtt.vm.total;
                break;