]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/i915/gem/i915_gem_context.c
drm/i915: Fix up the inverse mapping for default ctx->engines[]
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
index dd9aa77e38ae5f97f728812edc7a3a3c59f634d0..b407baaf0014237af5dfcfdb607522b15ea6d5af 100644 (file)
@@ -70,6 +70,7 @@
 #include <drm/i915_drm.h>
 
 #include "gt/intel_lrc_reg.h"
+#include "gt/intel_engine_user.h"
 
 #include "i915_gem_context.h"
 #include "i915_globals.h"
@@ -158,7 +159,7 @@ lookup_user_engine(struct i915_gem_context *ctx,
                if (!engine)
                        return ERR_PTR(-EINVAL);
 
-               idx = engine->id;
+               idx = engine->legacy_idx;
        } else {
                idx = ci->engine_instance;
        }
@@ -278,6 +279,7 @@ static void free_engines_rcu(struct rcu_head *rcu)
 
 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
 {
+       const struct intel_gt *gt = &ctx->i915->gt;
        struct intel_engine_cs *engine;
        struct i915_gem_engines *e;
        enum intel_engine_id id;
@@ -287,7 +289,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
                return ERR_PTR(-ENOMEM);
 
        init_rcu_head(&e->rcu);
-       for_each_engine(engine, ctx->i915, id) {
+       for_each_engine(engine, gt, id) {
                struct intel_context *ce;
 
                ce = intel_context_create(ctx, engine);
@@ -297,8 +299,8 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
                }
 
                e->engines[id] = ce;
+               e->num_engines = id + 1;
        }
-       e->num_engines = id;
 
        return e;
 }
@@ -309,13 +311,14 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
        release_hw_id(ctx);
-       i915_ppgtt_put(ctx->ppgtt);
+       if (ctx->vm)
+               i915_vm_put(ctx->vm);
 
        free_engines(rcu_access_pointer(ctx->engines));
        mutex_destroy(&ctx->engines_mutex);
 
        if (ctx->timeline)
-               i915_timeline_put(ctx->timeline);
+               intel_timeline_put(ctx->timeline);
 
        kfree(ctx->name);
        put_pid(ctx->pid);
@@ -396,32 +399,8 @@ static void context_close(struct i915_gem_context *ctx)
        i915_gem_context_put(ctx);
 }
 
-static u32 default_desc_template(const struct drm_i915_private *i915,
-                                const struct i915_hw_ppgtt *ppgtt)
-{
-       u32 address_mode;
-       u32 desc;
-
-       desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
-
-       address_mode = INTEL_LEGACY_32B_CONTEXT;
-       if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
-               address_mode = INTEL_LEGACY_64B_CONTEXT;
-       desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
-       if (IS_GEN(i915, 8))
-               desc |= GEN8_CTX_L3LLC_COHERENT;
-
-       /* TODO: WaDisableLiteRestore when we start using semaphore
-        * signalling between Command Streamers
-        * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
-        */
-
-       return desc;
-}
-
 static struct i915_gem_context *
-__create_context(struct drm_i915_private *dev_priv)
+__create_context(struct drm_i915_private *i915)
 {
        struct i915_gem_context *ctx;
        struct i915_gem_engines *e;
@@ -433,8 +412,8 @@ __create_context(struct drm_i915_private *dev_priv)
                return ERR_PTR(-ENOMEM);
 
        kref_init(&ctx->ref);
-       list_add_tail(&ctx->link, &dev_priv->contexts.list);
-       ctx->i915 = dev_priv;
+       list_add_tail(&ctx->link, &i915->contexts.list);
+       ctx->i915 = i915;
        ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
        mutex_init(&ctx->mutex);
 
@@ -452,14 +431,12 @@ __create_context(struct drm_i915_private *dev_priv)
        /* NB: Mark all slices as needing a remap so that when the context first
         * loads it will restore whatever remap state already exists. If there
         * is no remap info, it will be a NOP. */
-       ctx->remap_slice = ALL_L3_SLICES(dev_priv);
+       ctx->remap_slice = ALL_L3_SLICES(i915);
 
        i915_gem_context_set_bannable(ctx);
        i915_gem_context_set_recoverable(ctx);
 
        ctx->ring_size = 4 * PAGE_SIZE;
-       ctx->desc_template =
-               default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
 
        for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
                ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -471,26 +448,35 @@ __create_context(struct drm_i915_private *dev_priv)
        return ERR_PTR(err);
 }
 
-static struct i915_hw_ppgtt *
-__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
+static struct i915_address_space *
+__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
 {
-       struct i915_hw_ppgtt *old = ctx->ppgtt;
+       struct i915_address_space *old = ctx->vm;
+       struct i915_gem_engines_iter it;
+       struct intel_context *ce;
+
+       GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
+
+       ctx->vm = i915_vm_get(vm);
 
-       ctx->ppgtt = i915_ppgtt_get(ppgtt);
-       ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
+       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+               i915_vm_put(ce->vm);
+               ce->vm = i915_vm_get(vm);
+       }
+       i915_gem_context_unlock_engines(ctx);
 
        return old;
 }
 
 static void __assign_ppgtt(struct i915_gem_context *ctx,
-                          struct i915_hw_ppgtt *ppgtt)
+                          struct i915_address_space *vm)
 {
-       if (ppgtt == ctx->ppgtt)
+       if (vm == ctx->vm)
                return;
 
-       ppgtt = __set_ppgtt(ctx, ppgtt);
-       if (ppgtt)
-               i915_ppgtt_put(ppgtt);
+       vm = __set_ppgtt(ctx, vm);
+       if (vm)
+               i915_vm_put(vm);
 }
 
 static struct i915_gem_context *
@@ -512,7 +498,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
                return ctx;
 
        if (HAS_FULL_PPGTT(dev_priv)) {
-               struct i915_hw_ppgtt *ppgtt;
+               struct i915_ppgtt *ppgtt;
 
                ppgtt = i915_ppgtt_create(dev_priv);
                if (IS_ERR(ppgtt)) {
@@ -522,14 +508,14 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
                        return ERR_CAST(ppgtt);
                }
 
-               __assign_ppgtt(ctx, ppgtt);
-               i915_ppgtt_put(ppgtt);
+               __assign_ppgtt(ctx, &ppgtt->vm);
+               i915_vm_put(&ppgtt->vm);
        }
 
        if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
-               struct i915_timeline *timeline;
+               struct intel_timeline *timeline;
 
-               timeline = i915_timeline_create(dev_priv, NULL);
+               timeline = intel_timeline_create(&dev_priv->gt, NULL);
                if (IS_ERR(timeline)) {
                        context_close(ctx);
                        return ERR_CAST(timeline);
@@ -643,20 +629,13 @@ static void init_contexts(struct drm_i915_private *i915)
        init_llist_head(&i915->contexts.free_list);
 }
 
-static bool needs_preempt_context(struct drm_i915_private *i915)
-{
-       return HAS_EXECLISTS(i915);
-}
-
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 {
        struct i915_gem_context *ctx;
 
        /* Reassure ourselves we are only called once */
        GEM_BUG_ON(dev_priv->kernel_context);
-       GEM_BUG_ON(dev_priv->preempt_context);
 
-       intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
        init_contexts(dev_priv);
 
        /* lowest priority; idle task */
@@ -676,38 +655,16 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
        GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
        dev_priv->kernel_context = ctx;
 
-       /* highest priority; preempting task */
-       if (needs_preempt_context(dev_priv)) {
-               ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
-               if (!IS_ERR(ctx))
-                       dev_priv->preempt_context = ctx;
-               else
-                       DRM_ERROR("Failed to create preempt context; disabling preemption\n");
-       }
-
        DRM_DEBUG_DRIVER("%s context support initialized\n",
                         DRIVER_CAPS(dev_priv)->has_logical_contexts ?
                         "logical" : "fake");
        return 0;
 }
 
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-       for_each_engine(engine, dev_priv, id)
-               intel_engine_lost_context(engine);
-}
-
 void i915_gem_contexts_fini(struct drm_i915_private *i915)
 {
        lockdep_assert_held(&i915->drm.struct_mutex);
 
-       if (i915->preempt_context)
-               destroy_kernel_context(&i915->preempt_context);
        destroy_kernel_context(&i915->kernel_context);
 
        /* Must free all deferred contexts (via flush_workqueue) first */
@@ -723,7 +680,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
 
 static int vm_idr_cleanup(int id, void *p, void *data)
 {
-       i915_ppgtt_put(p);
+       i915_vm_put(p);
        return 0;
 }
 
@@ -733,8 +690,8 @@ static int gem_context_register(struct i915_gem_context *ctx,
        int ret;
 
        ctx->file_priv = fpriv;
-       if (ctx->ppgtt)
-               ctx->ppgtt->vm.file = fpriv;
+       if (ctx->vm)
+               ctx->vm->file = fpriv;
 
        ctx->pid = get_task_pid(current, PIDTYPE_PID);
        ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
@@ -817,7 +774,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_private *i915 = to_i915(dev);
        struct drm_i915_gem_vm_control *args = data;
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct i915_hw_ppgtt *ppgtt;
+       struct i915_ppgtt *ppgtt;
        int err;
 
        if (!HAS_FULL_PPGTT(i915))
@@ -844,7 +801,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
        if (err)
                goto err_put;
 
-       err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+       err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
        if (err < 0)
                goto err_unlock;
 
@@ -858,7 +815,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
 err_unlock:
        mutex_unlock(&file_priv->vm_idr_lock);
 err_put:
-       i915_ppgtt_put(ppgtt);
+       i915_vm_put(&ppgtt->vm);
        return err;
 }
 
@@ -867,7 +824,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct drm_i915_gem_vm_control *args = data;
-       struct i915_hw_ppgtt *ppgtt;
+       struct i915_address_space *vm;
        int err;
        u32 id;
 
@@ -885,13 +842,13 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
        if (err)
                return err;
 
-       ppgtt = idr_remove(&file_priv->vm_idr, id);
+       vm = idr_remove(&file_priv->vm_idr, id);
 
        mutex_unlock(&file_priv->vm_idr_lock);
-       if (!ppgtt)
+       if (!vm)
                return -ENOENT;
 
-       i915_ppgtt_put(ppgtt);
+       i915_vm_put(vm);
        return 0;
 }
 
@@ -933,8 +890,12 @@ static int context_barrier_task(struct i915_gem_context *ctx,
        if (!cb)
                return -ENOMEM;
 
-       i915_active_init(i915, &cb->base, cb_retire);
-       i915_active_acquire(&cb->base);
+       i915_active_init(i915, &cb->base, NULL, cb_retire);
+       err = i915_active_acquire(&cb->base);
+       if (err) {
+               kfree(cb);
+               return err;
+       }
 
        for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
                struct i915_request *rq;
@@ -981,10 +942,10 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
                     struct i915_gem_context *ctx,
                     struct drm_i915_gem_context_param *args)
 {
-       struct i915_hw_ppgtt *ppgtt;
+       struct i915_address_space *vm;
        int ret;
 
-       if (!ctx->ppgtt)
+       if (!ctx->vm)
                return -ENODEV;
 
        /* XXX rcu acquire? */
@@ -992,19 +953,19 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
        if (ret)
                return ret;
 
-       ppgtt = i915_ppgtt_get(ctx->ppgtt);
+       vm = i915_vm_get(ctx->vm);
        mutex_unlock(&ctx->i915->drm.struct_mutex);
 
        ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
        if (ret)
                goto err_put;
 
-       ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+       ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
        GEM_BUG_ON(!ret);
        if (ret < 0)
                goto err_unlock;
 
-       i915_ppgtt_get(ppgtt);
+       i915_vm_get(vm);
 
        args->size = 0;
        args->value = ret;
@@ -1013,30 +974,31 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
 err_unlock:
        mutex_unlock(&file_priv->vm_idr_lock);
 err_put:
-       i915_ppgtt_put(ppgtt);
+       i915_vm_put(vm);
        return ret;
 }
 
 static void set_ppgtt_barrier(void *data)
 {
-       struct i915_hw_ppgtt *old = data;
+       struct i915_address_space *old = data;
 
-       if (INTEL_GEN(old->vm.i915) < 8)
-               gen6_ppgtt_unpin_all(old);
+       if (INTEL_GEN(old->i915) < 8)
+               gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
 
-       i915_ppgtt_put(old);
+       i915_vm_put(old);
 }
 
 static int emit_ppgtt_update(struct i915_request *rq, void *data)
 {
-       struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
+       struct i915_address_space *vm = rq->hw_context->vm;
        struct intel_engine_cs *engine = rq->engine;
        u32 base = engine->mmio_base;
        u32 *cs;
        int i;
 
-       if (i915_vm_is_4lvl(&ppgtt->vm)) {
-               const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
+       if (i915_vm_is_4lvl(vm)) {
+               struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+               const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
 
                cs = intel_ring_begin(rq, 6);
                if (IS_ERR(cs))
@@ -1052,6 +1014,8 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
                *cs++ = MI_NOOP;
                intel_ring_advance(rq, cs);
        } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
+               struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
                cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
                if (IS_ERR(cs))
                        return PTR_ERR(cs);
@@ -1069,7 +1033,7 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
                intel_ring_advance(rq, cs);
        } else {
                /* ppGTT is not part of the legacy context image */
-               gen6_ppgtt_pin(ppgtt);
+               gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
        }
 
        return 0;
@@ -1087,13 +1051,13 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
                     struct i915_gem_context *ctx,
                     struct drm_i915_gem_context_param *args)
 {
-       struct i915_hw_ppgtt *ppgtt, *old;
+       struct i915_address_space *vm, *old;
        int err;
 
        if (args->size)
                return -EINVAL;
 
-       if (!ctx->ppgtt)
+       if (!ctx->vm)
                return -ENODEV;
 
        if (upper_32_bits(args->value))
@@ -1103,18 +1067,18 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
        if (err)
                return err;
 
-       ppgtt = idr_find(&file_priv->vm_idr, args->value);
-       if (ppgtt)
-               i915_ppgtt_get(ppgtt);
+       vm = idr_find(&file_priv->vm_idr, args->value);
+       if (vm)
+               i915_vm_get(vm);
        mutex_unlock(&file_priv->vm_idr_lock);
-       if (!ppgtt)
+       if (!vm)
                return -ENOENT;
 
        err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
        if (err)
                goto out;
 
-       if (ppgtt == ctx->ppgtt)
+       if (vm == ctx->vm)
                goto unlock;
 
        /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
@@ -1122,7 +1086,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
        lut_close(ctx);
        mutex_unlock(&ctx->mutex);
 
-       old = __set_ppgtt(ctx, ppgtt);
+       old = __set_ppgtt(ctx, vm);
 
        /*
         * We need to flush any requests using the current ppgtt before
@@ -1135,16 +1099,15 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
                                   set_ppgtt_barrier,
                                   old);
        if (err) {
-               ctx->ppgtt = old;
-               ctx->desc_template = default_desc_template(ctx->i915, old);
-               i915_ppgtt_put(ppgtt);
+               i915_vm_put(__set_ppgtt(ctx, old));
+               i915_vm_put(old);
        }
 
 unlock:
        mutex_unlock(&ctx->i915->drm.struct_mutex);
 
 out:
-       i915_ppgtt_put(ppgtt);
+       i915_vm_put(vm);
        return err;
 }
 
@@ -1194,27 +1157,11 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
-       /* Queue this switch after all other activity by this context. */
-       ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
-       if (ret)
-               goto out_add;
-
-       ret = gen8_emit_rpcs_config(rq, ce, sseu);
-       if (ret)
-               goto out_add;
-
-       /*
-        * Guarantee context image and the timeline remains pinned until the
-        * modifying request is retired by setting the ce activity tracker.
-        *
-        * But we only need to take one pin on the account of it. Or in other
-        * words transfer the pinned ce object to tracked active request.
-        */
-       if (!i915_active_request_isset(&ce->active_tracker))
-               __intel_context_pin(ce);
-       __i915_active_request_set(&ce->active_tracker, rq);
+       /* Serialise with the remote context */
+       ret = intel_context_prepare_remote_request(ce, rq);
+       if (ret == 0)
+               ret = gen8_emit_rpcs_config(rq, ce, sseu);
 
-out_add:
        i915_request_add(rq);
        return ret;
 }
@@ -1225,7 +1172,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
 {
        int ret;
 
-       GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
+       GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
 
        ret = intel_context_lock_pinned(ce);
        if (ret)
@@ -1247,7 +1194,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
 static int
 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
 {
-       struct drm_i915_private *i915 = ce->gem_context->i915;
+       struct drm_i915_private *i915 = ce->engine->i915;
        int ret;
 
        ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
@@ -1784,7 +1731,7 @@ get_engines(struct i915_gem_context *ctx,
 
                if (e->engines[n]) {
                        ci.engine_class = e->engines[n]->engine->uabi_class;
-                       ci.engine_instance = e->engines[n]->engine->instance;
+                       ci.engine_instance = e->engines[n]->engine->uabi_instance;
                }
 
                if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
@@ -2023,8 +1970,8 @@ static int clone_timeline(struct i915_gem_context *dst,
                GEM_BUG_ON(src->timeline == dst->timeline);
 
                if (dst->timeline)
-                       i915_timeline_put(dst->timeline);
-               dst->timeline = i915_timeline_get(src->timeline);
+                       intel_timeline_put(dst->timeline);
+               dst->timeline = intel_timeline_get(src->timeline);
        }
 
        return 0;
@@ -2033,15 +1980,15 @@ static int clone_timeline(struct i915_gem_context *dst,
 static int clone_vm(struct i915_gem_context *dst,
                    struct i915_gem_context *src)
 {
-       struct i915_hw_ppgtt *ppgtt;
+       struct i915_address_space *vm;
 
        rcu_read_lock();
        do {
-               ppgtt = READ_ONCE(src->ppgtt);
-               if (!ppgtt)
+               vm = READ_ONCE(src->vm);
+               if (!vm)
                        break;
 
-               if (!kref_get_unless_zero(&ppgtt->ref))
+               if (!kref_get_unless_zero(&vm->ref))
                        continue;
 
                /*
@@ -2059,16 +2006,16 @@ static int clone_vm(struct i915_gem_context *dst,
                 * it cannot be reallocated elsewhere.
                 */
 
-               if (ppgtt == READ_ONCE(src->ppgtt))
+               if (vm == READ_ONCE(src->vm))
                        break;
 
-               i915_ppgtt_put(ppgtt);
+               i915_vm_put(vm);
        } while (1);
        rcu_read_unlock();
 
-       if (ppgtt) {
-               __assign_ppgtt(dst, ppgtt);
-               i915_ppgtt_put(ppgtt);
+       if (vm) {
+               __assign_ppgtt(dst, vm);
+               i915_vm_put(vm);
        }
 
        return 0;
@@ -2149,7 +2096,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
                return -EINVAL;
 
-       ret = i915_terminally_wedged(i915);
+       ret = intel_gt_terminally_wedged(&i915->gt);
        if (ret)
                return ret;
 
@@ -2293,10 +2240,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 
        case I915_CONTEXT_PARAM_GTT_SIZE:
                args->size = 0;
-               if (ctx->ppgtt)
-                       args->value = ctx->ppgtt->vm.total;
-               else if (to_i915(dev)->mm.aliasing_ppgtt)
-                       args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
+               if (ctx->vm)
+                       args->value = ctx->vm->total;
+               else if (to_i915(dev)->ggtt.alias)
+                       args->value = to_i915(dev)->ggtt.alias->vm.total;
                else
                        args->value = to_i915(dev)->ggtt.vm.total;
                break;