2 * SPDX-License-Identifier: MIT
4 * Copyright © 2011-2012 Intel Corporation
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
70 #include <drm/i915_drm.h>
72 #include "gt/intel_lrc_reg.h"
73 #include "gt/intel_engine_user.h"
75 #include "i915_gem_context.h"
76 #include "i915_globals.h"
77 #include "i915_trace.h"
78 #include "i915_user_extensions.h"
80 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
82 static struct i915_global_gem_context {
83 struct i915_global base;
84 struct kmem_cache *slab_luts;
87 struct i915_lut_handle *i915_lut_handle_alloc(void)
89 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
92 void i915_lut_handle_free(struct i915_lut_handle *lut)
94 return kmem_cache_free(global.slab_luts, lut);
97 static void lut_close(struct i915_gem_context *ctx)
99 struct radix_tree_iter iter;
102 lockdep_assert_held(&ctx->mutex);
105 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
106 struct i915_vma *vma = rcu_dereference_raw(*slot);
107 struct drm_i915_gem_object *obj = vma->obj;
108 struct i915_lut_handle *lut;
110 if (!kref_get_unless_zero(&obj->base.refcount))
114 i915_gem_object_lock(obj);
115 list_for_each_entry(lut, &obj->lut_list, obj_link) {
119 if (lut->handle != iter.index)
122 list_del(&lut->obj_link);
125 i915_gem_object_unlock(obj);
128 if (&lut->obj_link != &obj->lut_list) {
129 i915_lut_handle_free(lut);
130 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
131 if (atomic_dec_and_test(&vma->open_count) &&
132 !i915_vma_is_ggtt(vma))
134 i915_gem_object_put(obj);
137 i915_gem_object_put(obj);
142 static struct intel_context *
143 lookup_user_engine(struct i915_gem_context *ctx,
145 const struct i915_engine_class_instance *ci)
146 #define LOOKUP_USER_INDEX BIT(0)
150 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
151 return ERR_PTR(-EINVAL);
153 if (!i915_gem_context_user_engines(ctx)) {
154 struct intel_engine_cs *engine;
156 engine = intel_engine_lookup_user(ctx->i915,
158 ci->engine_instance);
160 return ERR_PTR(-EINVAL);
164 idx = ci->engine_instance;
167 return i915_gem_context_get_engine(ctx, idx);
170 static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
174 lockdep_assert_held(&i915->contexts.mutex);
176 if (INTEL_GEN(i915) >= 11)
177 max = GEN11_MAX_CONTEXT_HW_ID;
178 else if (USES_GUC_SUBMISSION(i915))
180 * When using GuC in proxy submission, GuC consumes the
181 * highest bit in the context id to indicate proxy submission.
183 max = MAX_GUC_CONTEXT_HW_ID;
185 max = MAX_CONTEXT_HW_ID;
187 return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
190 static int steal_hw_id(struct drm_i915_private *i915)
192 struct i915_gem_context *ctx, *cn;
196 lockdep_assert_held(&i915->contexts.mutex);
198 list_for_each_entry_safe(ctx, cn,
199 &i915->contexts.hw_id_list, hw_id_link) {
200 if (atomic_read(&ctx->hw_id_pin_count)) {
201 list_move_tail(&ctx->hw_id_link, &pinned);
205 GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
206 list_del_init(&ctx->hw_id_link);
212 * Remember how far we got up on the last repossesion scan, so the
213 * list is kept in a "least recently scanned" order.
215 list_splice_tail(&pinned, &i915->contexts.hw_id_list);
219 static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
223 lockdep_assert_held(&i915->contexts.mutex);
226 * We prefer to steal/stall ourselves and our users over that of the
227 * entire system. That may be a little unfair to our users, and
228 * even hurt high priority clients. The choice is whether to oomkill
229 * something else, or steal a context id.
231 ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
232 if (unlikely(ret < 0)) {
233 ret = steal_hw_id(i915);
234 if (ret < 0) /* once again for the correct errno code */
235 ret = new_hw_id(i915, GFP_KERNEL);
244 static void release_hw_id(struct i915_gem_context *ctx)
246 struct drm_i915_private *i915 = ctx->i915;
248 if (list_empty(&ctx->hw_id_link))
251 mutex_lock(&i915->contexts.mutex);
252 if (!list_empty(&ctx->hw_id_link)) {
253 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
254 list_del_init(&ctx->hw_id_link);
256 mutex_unlock(&i915->contexts.mutex);
259 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
262 if (!e->engines[count])
265 intel_context_put(e->engines[count]);
270 static void free_engines(struct i915_gem_engines *e)
272 __free_engines(e, e->num_engines);
275 static void free_engines_rcu(struct rcu_head *rcu)
277 free_engines(container_of(rcu, struct i915_gem_engines, rcu));
280 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
282 struct intel_engine_cs *engine;
283 struct i915_gem_engines *e;
284 enum intel_engine_id id;
286 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
288 return ERR_PTR(-ENOMEM);
290 init_rcu_head(&e->rcu);
291 for_each_engine(engine, ctx->i915, id) {
292 struct intel_context *ce;
294 ce = intel_context_create(ctx, engine);
296 __free_engines(e, id);
307 static void i915_gem_context_free(struct i915_gem_context *ctx)
309 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
310 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
314 i915_vm_put(ctx->vm);
316 free_engines(rcu_access_pointer(ctx->engines));
317 mutex_destroy(&ctx->engines_mutex);
320 intel_timeline_put(ctx->timeline);
325 list_del(&ctx->link);
326 mutex_destroy(&ctx->mutex);
331 static void contexts_free(struct drm_i915_private *i915)
333 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
334 struct i915_gem_context *ctx, *cn;
336 lockdep_assert_held(&i915->drm.struct_mutex);
338 llist_for_each_entry_safe(ctx, cn, freed, free_link)
339 i915_gem_context_free(ctx);
342 static void contexts_free_first(struct drm_i915_private *i915)
344 struct i915_gem_context *ctx;
345 struct llist_node *freed;
347 lockdep_assert_held(&i915->drm.struct_mutex);
349 freed = llist_del_first(&i915->contexts.free_list);
353 ctx = container_of(freed, typeof(*ctx), free_link);
354 i915_gem_context_free(ctx);
357 static void contexts_free_worker(struct work_struct *work)
359 struct drm_i915_private *i915 =
360 container_of(work, typeof(*i915), contexts.free_work);
362 mutex_lock(&i915->drm.struct_mutex);
364 mutex_unlock(&i915->drm.struct_mutex);
367 void i915_gem_context_release(struct kref *ref)
369 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
370 struct drm_i915_private *i915 = ctx->i915;
372 trace_i915_context_free(ctx);
373 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
374 queue_work(i915->wq, &i915->contexts.free_work);
377 static void context_close(struct i915_gem_context *ctx)
379 mutex_lock(&ctx->mutex);
381 i915_gem_context_set_closed(ctx);
382 ctx->file_priv = ERR_PTR(-EBADF);
385 * This context will never again be assinged to HW, so we can
386 * reuse its ID for the next context.
391 * The LUT uses the VMA as a backpointer to unref the object,
392 * so we need to clear the LUT before we close all the VMA (inside
397 mutex_unlock(&ctx->mutex);
398 i915_gem_context_put(ctx);
401 static struct i915_gem_context *
402 __create_context(struct drm_i915_private *i915)
404 struct i915_gem_context *ctx;
405 struct i915_gem_engines *e;
409 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
411 return ERR_PTR(-ENOMEM);
413 kref_init(&ctx->ref);
414 list_add_tail(&ctx->link, &i915->contexts.list);
416 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
417 mutex_init(&ctx->mutex);
419 mutex_init(&ctx->engines_mutex);
420 e = default_engines(ctx);
425 RCU_INIT_POINTER(ctx->engines, e);
427 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
428 INIT_LIST_HEAD(&ctx->hw_id_link);
430 /* NB: Mark all slices as needing a remap so that when the context first
431 * loads it will restore whatever remap state already exists. If there
432 * is no remap info, it will be a NOP. */
433 ctx->remap_slice = ALL_L3_SLICES(i915);
435 i915_gem_context_set_bannable(ctx);
436 i915_gem_context_set_recoverable(ctx);
438 ctx->ring_size = 4 * PAGE_SIZE;
440 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
441 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
450 static struct i915_address_space *
451 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
453 struct i915_address_space *old = ctx->vm;
454 struct i915_gem_engines_iter it;
455 struct intel_context *ce;
457 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
459 ctx->vm = i915_vm_get(vm);
461 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
463 ce->vm = i915_vm_get(vm);
465 i915_gem_context_unlock_engines(ctx);
470 static void __assign_ppgtt(struct i915_gem_context *ctx,
471 struct i915_address_space *vm)
476 vm = __set_ppgtt(ctx, vm);
481 static struct i915_gem_context *
482 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
484 struct i915_gem_context *ctx;
486 lockdep_assert_held(&dev_priv->drm.struct_mutex);
488 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
489 !HAS_EXECLISTS(dev_priv))
490 return ERR_PTR(-EINVAL);
492 /* Reap the most stale context */
493 contexts_free_first(dev_priv);
495 ctx = __create_context(dev_priv);
499 if (HAS_FULL_PPGTT(dev_priv)) {
500 struct i915_ppgtt *ppgtt;
502 ppgtt = i915_ppgtt_create(dev_priv);
504 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
507 return ERR_CAST(ppgtt);
510 __assign_ppgtt(ctx, &ppgtt->vm);
511 i915_vm_put(&ppgtt->vm);
514 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
515 struct intel_timeline *timeline;
517 timeline = intel_timeline_create(&dev_priv->gt, NULL);
518 if (IS_ERR(timeline)) {
520 return ERR_CAST(timeline);
523 ctx->timeline = timeline;
526 trace_i915_context_create(ctx);
532 * i915_gem_context_create_gvt - create a GVT GEM context
535 * This function is used to create a GVT specific GEM context.
538 * pointer to i915_gem_context on success, error pointer if failed
541 struct i915_gem_context *
542 i915_gem_context_create_gvt(struct drm_device *dev)
544 struct i915_gem_context *ctx;
547 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
548 return ERR_PTR(-ENODEV);
550 ret = i915_mutex_lock_interruptible(dev);
554 ctx = i915_gem_create_context(to_i915(dev), 0);
558 ret = i915_gem_context_pin_hw_id(ctx);
565 ctx->file_priv = ERR_PTR(-EBADF);
566 i915_gem_context_set_closed(ctx); /* not user accessible */
567 i915_gem_context_clear_bannable(ctx);
568 i915_gem_context_set_force_single_submission(ctx);
569 if (!USES_GUC_SUBMISSION(to_i915(dev)))
570 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
572 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
574 mutex_unlock(&dev->struct_mutex);
579 destroy_kernel_context(struct i915_gem_context **ctxp)
581 struct i915_gem_context *ctx;
583 /* Keep the context ref so that we can free it immediately ourselves */
584 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
585 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
588 i915_gem_context_free(ctx);
591 struct i915_gem_context *
592 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
594 struct i915_gem_context *ctx;
597 ctx = i915_gem_create_context(i915, 0);
601 err = i915_gem_context_pin_hw_id(ctx);
603 destroy_kernel_context(&ctx);
607 i915_gem_context_clear_bannable(ctx);
608 ctx->sched.priority = I915_USER_PRIORITY(prio);
609 ctx->ring_size = PAGE_SIZE;
611 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
616 static void init_contexts(struct drm_i915_private *i915)
618 mutex_init(&i915->contexts.mutex);
619 INIT_LIST_HEAD(&i915->contexts.list);
621 /* Using the simple ida interface, the max is limited by sizeof(int) */
622 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
623 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
624 ida_init(&i915->contexts.hw_ida);
625 INIT_LIST_HEAD(&i915->contexts.hw_id_list);
627 INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
628 init_llist_head(&i915->contexts.free_list);
631 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
633 struct i915_gem_context *ctx;
635 /* Reassure ourselves we are only called once */
636 GEM_BUG_ON(dev_priv->kernel_context);
638 init_contexts(dev_priv);
640 /* lowest priority; idle task */
641 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
643 DRM_ERROR("Failed to create default global context\n");
647 * For easy recognisablity, we want the kernel context to be 0 and then
648 * all user contexts will have non-zero hw_id. Kernel contexts are
649 * permanently pinned, so that we never suffer a stall and can
650 * use them from any allocation context (e.g. for evicting other
651 * contexts and from inside the shrinker).
653 GEM_BUG_ON(ctx->hw_id);
654 GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
655 dev_priv->kernel_context = ctx;
657 DRM_DEBUG_DRIVER("%s context support initialized\n",
658 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
663 void i915_gem_contexts_fini(struct drm_i915_private *i915)
665 lockdep_assert_held(&i915->drm.struct_mutex);
667 destroy_kernel_context(&i915->kernel_context);
669 /* Must free all deferred contexts (via flush_workqueue) first */
670 GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
671 ida_destroy(&i915->contexts.hw_ida);
674 static int context_idr_cleanup(int id, void *p, void *data)
680 static int vm_idr_cleanup(int id, void *p, void *data)
686 static int gem_context_register(struct i915_gem_context *ctx,
687 struct drm_i915_file_private *fpriv)
691 ctx->file_priv = fpriv;
693 ctx->vm->file = fpriv;
695 ctx->pid = get_task_pid(current, PIDTYPE_PID);
696 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
697 current->comm, pid_nr(ctx->pid));
703 /* And finally expose ourselves to userspace via the idr */
704 mutex_lock(&fpriv->context_idr_lock);
705 ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
706 mutex_unlock(&fpriv->context_idr_lock);
710 kfree(fetch_and_zero(&ctx->name));
712 put_pid(fetch_and_zero(&ctx->pid));
717 int i915_gem_context_open(struct drm_i915_private *i915,
718 struct drm_file *file)
720 struct drm_i915_file_private *file_priv = file->driver_priv;
721 struct i915_gem_context *ctx;
724 mutex_init(&file_priv->context_idr_lock);
725 mutex_init(&file_priv->vm_idr_lock);
727 idr_init(&file_priv->context_idr);
728 idr_init_base(&file_priv->vm_idr, 1);
730 mutex_lock(&i915->drm.struct_mutex);
731 ctx = i915_gem_create_context(i915, 0);
732 mutex_unlock(&i915->drm.struct_mutex);
738 err = gem_context_register(ctx, file_priv);
742 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
750 idr_destroy(&file_priv->vm_idr);
751 idr_destroy(&file_priv->context_idr);
752 mutex_destroy(&file_priv->vm_idr_lock);
753 mutex_destroy(&file_priv->context_idr_lock);
757 void i915_gem_context_close(struct drm_file *file)
759 struct drm_i915_file_private *file_priv = file->driver_priv;
761 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
762 idr_destroy(&file_priv->context_idr);
763 mutex_destroy(&file_priv->context_idr_lock);
765 idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
766 idr_destroy(&file_priv->vm_idr);
767 mutex_destroy(&file_priv->vm_idr_lock);
770 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
771 struct drm_file *file)
773 struct drm_i915_private *i915 = to_i915(dev);
774 struct drm_i915_gem_vm_control *args = data;
775 struct drm_i915_file_private *file_priv = file->driver_priv;
776 struct i915_ppgtt *ppgtt;
779 if (!HAS_FULL_PPGTT(i915))
785 ppgtt = i915_ppgtt_create(i915);
787 return PTR_ERR(ppgtt);
789 ppgtt->vm.file = file_priv;
791 if (args->extensions) {
792 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
799 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
803 err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
807 GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
809 mutex_unlock(&file_priv->vm_idr_lock);
815 mutex_unlock(&file_priv->vm_idr_lock);
817 i915_vm_put(&ppgtt->vm);
821 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
822 struct drm_file *file)
824 struct drm_i915_file_private *file_priv = file->driver_priv;
825 struct drm_i915_gem_vm_control *args = data;
826 struct i915_address_space *vm;
833 if (args->extensions)
840 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
844 vm = idr_remove(&file_priv->vm_idr, id);
846 mutex_unlock(&file_priv->vm_idr_lock);
854 struct context_barrier_task {
855 struct i915_active base;
856 void (*task)(void *data);
860 static void cb_retire(struct i915_active *base)
862 struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
867 i915_active_fini(&cb->base);
871 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
872 static int context_barrier_task(struct i915_gem_context *ctx,
873 intel_engine_mask_t engines,
874 bool (*skip)(struct intel_context *ce, void *data),
875 int (*emit)(struct i915_request *rq, void *data),
876 void (*task)(void *data),
879 struct drm_i915_private *i915 = ctx->i915;
880 struct context_barrier_task *cb;
881 struct i915_gem_engines_iter it;
882 struct intel_context *ce;
885 lockdep_assert_held(&i915->drm.struct_mutex);
888 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
892 i915_active_init(i915, &cb->base, NULL, cb_retire);
893 err = i915_active_acquire(&cb->base);
899 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
900 struct i915_request *rq;
902 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
908 if (!(ce->engine->mask & engines))
911 if (skip && skip(ce, data))
914 rq = intel_context_create_request(ce);
922 err = emit(rq, data);
924 err = i915_active_ref(&cb->base, rq->fence.context, rq);
926 i915_request_add(rq);
930 i915_gem_context_unlock_engines(ctx);
932 cb->task = err ? NULL : task; /* caller needs to unwind instead */
935 i915_active_release(&cb->base);
940 static int get_ppgtt(struct drm_i915_file_private *file_priv,
941 struct i915_gem_context *ctx,
942 struct drm_i915_gem_context_param *args)
944 struct i915_address_space *vm;
950 /* XXX rcu acquire? */
951 ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
955 vm = i915_vm_get(ctx->vm);
956 mutex_unlock(&ctx->i915->drm.struct_mutex);
958 ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
962 ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
974 mutex_unlock(&file_priv->vm_idr_lock);
980 static void set_ppgtt_barrier(void *data)
982 struct i915_address_space *old = data;
984 if (INTEL_GEN(old->i915) < 8)
985 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
990 static int emit_ppgtt_update(struct i915_request *rq, void *data)
992 struct i915_address_space *vm = rq->hw_context->vm;
993 struct intel_engine_cs *engine = rq->engine;
994 u32 base = engine->mmio_base;
998 if (i915_vm_is_4lvl(vm)) {
999 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1000 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1002 cs = intel_ring_begin(rq, 6);
1006 *cs++ = MI_LOAD_REGISTER_IMM(2);
1008 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1009 *cs++ = upper_32_bits(pd_daddr);
1010 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1011 *cs++ = lower_32_bits(pd_daddr);
1014 intel_ring_advance(rq, cs);
1015 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1016 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1018 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1022 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1023 for (i = GEN8_3LVL_PDPES; i--; ) {
1024 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1026 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1027 *cs++ = upper_32_bits(pd_daddr);
1028 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1029 *cs++ = lower_32_bits(pd_daddr);
1032 intel_ring_advance(rq, cs);
1034 /* ppGTT is not part of the legacy context image */
1035 gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
1041 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1043 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1046 return !atomic_read(&ce->pin_count);
1049 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1050 struct i915_gem_context *ctx,
1051 struct drm_i915_gem_context_param *args)
1053 struct i915_address_space *vm, *old;
1062 if (upper_32_bits(args->value))
1065 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1069 vm = idr_find(&file_priv->vm_idr, args->value);
1072 mutex_unlock(&file_priv->vm_idr_lock);
1076 err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1083 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1084 mutex_lock(&ctx->mutex);
1086 mutex_unlock(&ctx->mutex);
1088 old = __set_ppgtt(ctx, vm);
1091 * We need to flush any requests using the current ppgtt before
1092 * we release it as the requests do not hold a reference themselves,
1093 * only indirectly through the context.
1095 err = context_barrier_task(ctx, ALL_ENGINES,
1101 i915_vm_put(__set_ppgtt(ctx, old));
1106 mutex_unlock(&ctx->i915->drm.struct_mutex);
1113 static int gen8_emit_rpcs_config(struct i915_request *rq,
1114 struct intel_context *ce,
1115 struct intel_sseu sseu)
1120 cs = intel_ring_begin(rq, 4);
1124 offset = i915_ggtt_offset(ce->state) +
1125 LRC_STATE_PN * PAGE_SIZE +
1126 (CTX_R_PWR_CLK_STATE + 1) * 4;
1128 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1129 *cs++ = lower_32_bits(offset);
1130 *cs++ = upper_32_bits(offset);
1131 *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1133 intel_ring_advance(rq, cs);
1139 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1141 struct i915_request *rq;
1144 lockdep_assert_held(&ce->pin_mutex);
1147 * If the context is not idle, we have to submit an ordered request to
1148 * modify its context image via the kernel context (writing to our own
1149 * image, or into the registers directory, does not stick). Pristine
1150 * and idle contexts will be configured on pinning.
1152 if (!intel_context_is_pinned(ce))
1155 rq = i915_request_create(ce->engine->kernel_context);
1159 /* Serialise with the remote context */
1160 ret = intel_context_prepare_remote_request(ce, rq);
1162 ret = gen8_emit_rpcs_config(rq, ce, sseu);
1164 i915_request_add(rq);
1169 __intel_context_reconfigure_sseu(struct intel_context *ce,
1170 struct intel_sseu sseu)
1174 GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
1176 ret = intel_context_lock_pinned(ce);
1180 /* Nothing to do if unmodified. */
1181 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1184 ret = gen8_modify_rpcs(ce, sseu);
1189 intel_context_unlock_pinned(ce);
1194 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1196 struct drm_i915_private *i915 = ce->engine->i915;
1199 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1203 ret = __intel_context_reconfigure_sseu(ce, sseu);
1205 mutex_unlock(&i915->drm.struct_mutex);
1211 user_to_context_sseu(struct drm_i915_private *i915,
1212 const struct drm_i915_gem_context_param_sseu *user,
1213 struct intel_sseu *context)
1215 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1217 /* No zeros in any field. */
1218 if (!user->slice_mask || !user->subslice_mask ||
1219 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1223 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1227 * Some future proofing on the types since the uAPI is wider than the
1228 * current internal implementation.
1230 if (overflows_type(user->slice_mask, context->slice_mask) ||
1231 overflows_type(user->subslice_mask, context->subslice_mask) ||
1232 overflows_type(user->min_eus_per_subslice,
1233 context->min_eus_per_subslice) ||
1234 overflows_type(user->max_eus_per_subslice,
1235 context->max_eus_per_subslice))
1238 /* Check validity against hardware. */
1239 if (user->slice_mask & ~device->slice_mask)
1242 if (user->subslice_mask & ~device->subslice_mask[0])
1245 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1248 context->slice_mask = user->slice_mask;
1249 context->subslice_mask = user->subslice_mask;
1250 context->min_eus_per_subslice = user->min_eus_per_subslice;
1251 context->max_eus_per_subslice = user->max_eus_per_subslice;
1253 /* Part specific restrictions. */
1254 if (IS_GEN(i915, 11)) {
1255 unsigned int hw_s = hweight8(device->slice_mask);
1256 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1257 unsigned int req_s = hweight8(context->slice_mask);
1258 unsigned int req_ss = hweight8(context->subslice_mask);
1261 * Only full subslice enablement is possible if more than one
1262 * slice is turned on.
1264 if (req_s > 1 && req_ss != hw_ss_per_s)
1268 * If more than four (SScount bitfield limit) subslices are
1269 * requested then the number has to be even.
1271 if (req_ss > 4 && (req_ss & 1))
1275 * If only one slice is enabled and subslice count is below the
1276 * device full enablement, it must be at most half of the all
1277 * available subslices.
1279 if (req_s == 1 && req_ss < hw_ss_per_s &&
1280 req_ss > (hw_ss_per_s / 2))
1283 /* ABI restriction - VME use case only. */
1285 /* All slices or one slice only. */
1286 if (req_s != 1 && req_s != hw_s)
1290 * Half subslices or full enablement only when one slice is
1294 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1297 /* No EU configuration changes. */
1298 if ((user->min_eus_per_subslice !=
1299 device->max_eus_per_subslice) ||
1300 (user->max_eus_per_subslice !=
1301 device->max_eus_per_subslice))
1308 static int set_sseu(struct i915_gem_context *ctx,
1309 struct drm_i915_gem_context_param *args)
1311 struct drm_i915_private *i915 = ctx->i915;
1312 struct drm_i915_gem_context_param_sseu user_sseu;
1313 struct intel_context *ce;
1314 struct intel_sseu sseu;
1315 unsigned long lookup;
1318 if (args->size < sizeof(user_sseu))
1321 if (!IS_GEN(i915, 11))
1324 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1331 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1335 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1336 lookup |= LOOKUP_USER_INDEX;
1338 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1342 /* Only render engine supports RPCS configuration. */
1343 if (ce->engine->class != RENDER_CLASS) {
1348 ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1352 ret = intel_context_reconfigure_sseu(ce, sseu);
1356 args->size = sizeof(user_sseu);
1359 intel_context_put(ce);
1363 struct set_engines {
1364 struct i915_gem_context *ctx;
1365 struct i915_gem_engines *engines;
1369 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1371 struct i915_context_engines_load_balance __user *ext =
1372 container_of_user(base, typeof(*ext), base);
1373 const struct set_engines *set = data;
1374 struct intel_engine_cs *stack[16];
1375 struct intel_engine_cs **siblings;
1376 struct intel_context *ce;
1377 u16 num_siblings, idx;
1381 if (!HAS_EXECLISTS(set->ctx->i915))
1384 if (USES_GUC_SUBMISSION(set->ctx->i915))
1385 return -ENODEV; /* not implement yet */
1387 if (get_user(idx, &ext->engine_index))
1390 if (idx >= set->engines->num_engines) {
1391 DRM_DEBUG("Invalid placement value, %d >= %d\n",
1392 idx, set->engines->num_engines);
1396 idx = array_index_nospec(idx, set->engines->num_engines);
1397 if (set->engines->engines[idx]) {
1398 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1402 if (get_user(num_siblings, &ext->num_siblings))
1405 err = check_user_mbz(&ext->flags);
1409 err = check_user_mbz(&ext->mbz64);
1414 if (num_siblings > ARRAY_SIZE(stack)) {
1415 siblings = kmalloc_array(num_siblings,
1422 for (n = 0; n < num_siblings; n++) {
1423 struct i915_engine_class_instance ci;
1425 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1430 siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1432 ci.engine_instance);
1434 DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1435 n, ci.engine_class, ci.engine_instance);
1441 ce = intel_execlists_create_virtual(set->ctx, siblings, n);
1447 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1448 intel_context_put(ce);
1454 if (siblings != stack)
1461 set_engines__bond(struct i915_user_extension __user *base, void *data)
1463 struct i915_context_engines_bond __user *ext =
1464 container_of_user(base, typeof(*ext), base);
1465 const struct set_engines *set = data;
1466 struct i915_engine_class_instance ci;
1467 struct intel_engine_cs *virtual;
1468 struct intel_engine_cs *master;
1472 if (get_user(idx, &ext->virtual_index))
1475 if (idx >= set->engines->num_engines) {
1476 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1477 idx, set->engines->num_engines);
1481 idx = array_index_nospec(idx, set->engines->num_engines);
1482 if (!set->engines->engines[idx]) {
1483 DRM_DEBUG("Invalid engine at %d\n", idx);
1486 virtual = set->engines->engines[idx]->engine;
1488 err = check_user_mbz(&ext->flags);
1492 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1493 err = check_user_mbz(&ext->mbz64[n]);
1498 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1501 master = intel_engine_lookup_user(set->ctx->i915,
1502 ci.engine_class, ci.engine_instance);
1504 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1505 ci.engine_class, ci.engine_instance);
1509 if (get_user(num_bonds, &ext->num_bonds))
1512 for (n = 0; n < num_bonds; n++) {
1513 struct intel_engine_cs *bond;
1515 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1518 bond = intel_engine_lookup_user(set->ctx->i915,
1520 ci.engine_instance);
1522 DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1523 n, ci.engine_class, ci.engine_instance);
1528 * A non-virtual engine has no siblings to choose between; and
1529 * a submit fence will always be directed to the one engine.
1531 if (intel_engine_is_virtual(virtual)) {
1532 err = intel_virtual_engine_attach_bond(virtual,
1543 static const i915_user_extension_fn set_engines__extensions[] = {
1544 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1545 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1549 set_engines(struct i915_gem_context *ctx,
1550 const struct drm_i915_gem_context_param *args)
1552 struct i915_context_param_engines __user *user =
1553 u64_to_user_ptr(args->value);
1554 struct set_engines set = { .ctx = ctx };
1555 unsigned int num_engines, n;
1559 if (!args->size) { /* switch back to legacy user_ring_map */
1560 if (!i915_gem_context_user_engines(ctx))
1563 set.engines = default_engines(ctx);
1564 if (IS_ERR(set.engines))
1565 return PTR_ERR(set.engines);
1570 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1571 if (args->size < sizeof(*user) ||
1572 !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1573 DRM_DEBUG("Invalid size for engine array: %d\n",
1579 * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1580 * first 64 engines defined here.
1582 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1584 set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1589 init_rcu_head(&set.engines->rcu);
1590 for (n = 0; n < num_engines; n++) {
1591 struct i915_engine_class_instance ci;
1592 struct intel_engine_cs *engine;
1594 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1595 __free_engines(set.engines, n);
1599 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1600 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1601 set.engines->engines[n] = NULL;
1605 engine = intel_engine_lookup_user(ctx->i915,
1607 ci.engine_instance);
1609 DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1610 n, ci.engine_class, ci.engine_instance);
1611 __free_engines(set.engines, n);
1615 set.engines->engines[n] = intel_context_create(ctx, engine);
1616 if (!set.engines->engines[n]) {
1617 __free_engines(set.engines, n);
1621 set.engines->num_engines = num_engines;
1624 if (!get_user(extensions, &user->extensions))
1625 err = i915_user_extensions(u64_to_user_ptr(extensions),
1626 set_engines__extensions,
1627 ARRAY_SIZE(set_engines__extensions),
1630 free_engines(set.engines);
1635 mutex_lock(&ctx->engines_mutex);
1637 i915_gem_context_set_user_engines(ctx);
1639 i915_gem_context_clear_user_engines(ctx);
1640 rcu_swap_protected(ctx->engines, set.engines, 1);
1641 mutex_unlock(&ctx->engines_mutex);
1643 call_rcu(&set.engines->rcu, free_engines_rcu);
1648 static struct i915_gem_engines *
1649 __copy_engines(struct i915_gem_engines *e)
1651 struct i915_gem_engines *copy;
1654 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1656 return ERR_PTR(-ENOMEM);
1658 init_rcu_head(©->rcu);
1659 for (n = 0; n < e->num_engines; n++) {
1661 copy->engines[n] = intel_context_get(e->engines[n]);
1663 copy->engines[n] = NULL;
1665 copy->num_engines = n;
1671 get_engines(struct i915_gem_context *ctx,
1672 struct drm_i915_gem_context_param *args)
1674 struct i915_context_param_engines __user *user;
1675 struct i915_gem_engines *e;
1676 size_t n, count, size;
1679 err = mutex_lock_interruptible(&ctx->engines_mutex);
1684 if (i915_gem_context_user_engines(ctx))
1685 e = __copy_engines(i915_gem_context_engines(ctx));
1686 mutex_unlock(&ctx->engines_mutex);
1687 if (IS_ERR_OR_NULL(e)) {
1689 return PTR_ERR_OR_ZERO(e);
1692 count = e->num_engines;
1694 /* Be paranoid in case we have an impedance mismatch */
1695 if (!check_struct_size(user, engines, count, &size)) {
1699 if (overflows_type(size, args->size)) {
1709 if (args->size < size) {
1714 user = u64_to_user_ptr(args->value);
1715 if (!access_ok(user, size)) {
1720 if (put_user(0, &user->extensions)) {
1725 for (n = 0; n < count; n++) {
1726 struct i915_engine_class_instance ci = {
1727 .engine_class = I915_ENGINE_CLASS_INVALID,
1728 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1731 if (e->engines[n]) {
1732 ci.engine_class = e->engines[n]->engine->uabi_class;
1733 ci.engine_instance = e->engines[n]->engine->uabi_instance;
1736 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1749 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1750 struct i915_gem_context *ctx,
1751 struct drm_i915_gem_context_param *args)
1755 switch (args->param) {
1756 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1759 else if (args->value)
1760 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1762 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1765 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1768 else if (args->value)
1769 i915_gem_context_set_no_error_capture(ctx);
1771 i915_gem_context_clear_no_error_capture(ctx);
1774 case I915_CONTEXT_PARAM_BANNABLE:
1777 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1779 else if (args->value)
1780 i915_gem_context_set_bannable(ctx);
1782 i915_gem_context_clear_bannable(ctx);
1785 case I915_CONTEXT_PARAM_RECOVERABLE:
1788 else if (args->value)
1789 i915_gem_context_set_recoverable(ctx);
1791 i915_gem_context_clear_recoverable(ctx);
1794 case I915_CONTEXT_PARAM_PRIORITY:
1796 s64 priority = args->value;
1800 else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1802 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1803 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1805 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1806 !capable(CAP_SYS_NICE))
1809 ctx->sched.priority =
1810 I915_USER_PRIORITY(priority);
1814 case I915_CONTEXT_PARAM_SSEU:
1815 ret = set_sseu(ctx, args);
1818 case I915_CONTEXT_PARAM_VM:
1819 ret = set_ppgtt(fpriv, ctx, args);
1822 case I915_CONTEXT_PARAM_ENGINES:
1823 ret = set_engines(ctx, args);
1826 case I915_CONTEXT_PARAM_BAN_PERIOD:
1836 struct i915_gem_context *ctx;
1837 struct drm_i915_file_private *fpriv;
1840 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1842 struct drm_i915_gem_context_create_ext_setparam local;
1843 const struct create_ext *arg = data;
1845 if (copy_from_user(&local, ext, sizeof(local)))
1848 if (local.param.ctx_id)
1851 return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1854 static int clone_engines(struct i915_gem_context *dst,
1855 struct i915_gem_context *src)
1857 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1858 struct i915_gem_engines *clone;
1862 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1866 init_rcu_head(&clone->rcu);
1867 for (n = 0; n < e->num_engines; n++) {
1868 struct intel_engine_cs *engine;
1870 if (!e->engines[n]) {
1871 clone->engines[n] = NULL;
1874 engine = e->engines[n]->engine;
1877 * Virtual engines are singletons; they can only exist
1878 * inside a single context, because they embed their
1879 * HW context... As each virtual context implies a single
1880 * timeline (each engine can only dequeue a single request
1881 * at any time), it would be surprising for two contexts
1882 * to use the same engine. So let's create a copy of
1883 * the virtual engine instead.
1885 if (intel_engine_is_virtual(engine))
1887 intel_execlists_clone_virtual(dst, engine);
1889 clone->engines[n] = intel_context_create(dst, engine);
1890 if (IS_ERR_OR_NULL(clone->engines[n])) {
1891 __free_engines(clone, n);
1895 clone->num_engines = n;
1897 user_engines = i915_gem_context_user_engines(src);
1898 i915_gem_context_unlock_engines(src);
1900 free_engines(dst->engines);
1901 RCU_INIT_POINTER(dst->engines, clone);
1903 i915_gem_context_set_user_engines(dst);
1905 i915_gem_context_clear_user_engines(dst);
1909 i915_gem_context_unlock_engines(src);
1913 static int clone_flags(struct i915_gem_context *dst,
1914 struct i915_gem_context *src)
1916 dst->user_flags = src->user_flags;
1920 static int clone_schedattr(struct i915_gem_context *dst,
1921 struct i915_gem_context *src)
1923 dst->sched = src->sched;
1927 static int clone_sseu(struct i915_gem_context *dst,
1928 struct i915_gem_context *src)
1930 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1931 struct i915_gem_engines *clone;
1935 clone = dst->engines; /* no locking required; sole access */
1936 if (e->num_engines != clone->num_engines) {
1941 for (n = 0; n < e->num_engines; n++) {
1942 struct intel_context *ce = e->engines[n];
1944 if (clone->engines[n]->engine->class != ce->engine->class) {
1945 /* Must have compatible engine maps! */
1950 /* serialises with set_sseu */
1951 err = intel_context_lock_pinned(ce);
1955 clone->engines[n]->sseu = ce->sseu;
1956 intel_context_unlock_pinned(ce);
1961 i915_gem_context_unlock_engines(src);
1965 static int clone_timeline(struct i915_gem_context *dst,
1966 struct i915_gem_context *src)
1968 if (src->timeline) {
1969 GEM_BUG_ON(src->timeline == dst->timeline);
1972 intel_timeline_put(dst->timeline);
1973 dst->timeline = intel_timeline_get(src->timeline);
1979 static int clone_vm(struct i915_gem_context *dst,
1980 struct i915_gem_context *src)
1982 struct i915_address_space *vm;
1986 vm = READ_ONCE(src->vm);
1990 if (!kref_get_unless_zero(&vm->ref))
1994 * This ppgtt may have be reallocated between
1995 * the read and the kref, and reassigned to a third
1996 * context. In order to avoid inadvertent sharing
1997 * of this ppgtt with that third context (and not
1998 * src), we have to confirm that we have the same
1999 * ppgtt after passing through the strong memory
2000 * barrier implied by a successful
2001 * kref_get_unless_zero().
2003 * Once we have acquired the current ppgtt of src,
2004 * we no longer care if it is released from src, as
2005 * it cannot be reallocated elsewhere.
2008 if (vm == READ_ONCE(src->vm))
2016 __assign_ppgtt(dst, vm);
2023 static int create_clone(struct i915_user_extension __user *ext, void *data)
2025 static int (* const fn[])(struct i915_gem_context *dst,
2026 struct i915_gem_context *src) = {
2027 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2028 MAP(ENGINES, clone_engines),
2029 MAP(FLAGS, clone_flags),
2030 MAP(SCHEDATTR, clone_schedattr),
2031 MAP(SSEU, clone_sseu),
2032 MAP(TIMELINE, clone_timeline),
2036 struct drm_i915_gem_context_create_ext_clone local;
2037 const struct create_ext *arg = data;
2038 struct i915_gem_context *dst = arg->ctx;
2039 struct i915_gem_context *src;
2042 if (copy_from_user(&local, ext, sizeof(local)))
2045 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2046 I915_CONTEXT_CLONE_UNKNOWN);
2048 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2055 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2060 GEM_BUG_ON(src == dst);
2062 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2063 if (!(local.flags & BIT(bit)))
2066 err = fn[bit](dst, src);
2074 static const i915_user_extension_fn create_extensions[] = {
2075 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2076 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2079 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2081 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2084 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2085 struct drm_file *file)
2087 struct drm_i915_private *i915 = to_i915(dev);
2088 struct drm_i915_gem_context_create_ext *args = data;
2089 struct create_ext ext_data;
2092 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2095 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2098 ret = intel_gt_terminally_wedged(&i915->gt);
2102 ext_data.fpriv = file->driver_priv;
2103 if (client_is_banned(ext_data.fpriv)) {
2104 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2106 pid_nr(get_task_pid(current, PIDTYPE_PID)));
2110 ret = i915_mutex_lock_interruptible(dev);
2114 ext_data.ctx = i915_gem_create_context(i915, args->flags);
2115 mutex_unlock(&dev->struct_mutex);
2116 if (IS_ERR(ext_data.ctx))
2117 return PTR_ERR(ext_data.ctx);
2119 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2120 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2122 ARRAY_SIZE(create_extensions),
2128 ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
2133 DRM_DEBUG("HW context %d created\n", args->ctx_id);
2138 context_close(ext_data.ctx);
2142 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2143 struct drm_file *file)
2145 struct drm_i915_gem_context_destroy *args = data;
2146 struct drm_i915_file_private *file_priv = file->driver_priv;
2147 struct i915_gem_context *ctx;
2155 if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2158 ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2159 mutex_unlock(&file_priv->context_idr_lock);
2167 static int get_sseu(struct i915_gem_context *ctx,
2168 struct drm_i915_gem_context_param *args)
2170 struct drm_i915_gem_context_param_sseu user_sseu;
2171 struct intel_context *ce;
2172 unsigned long lookup;
2175 if (args->size == 0)
2177 else if (args->size < sizeof(user_sseu))
2180 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2187 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2191 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2192 lookup |= LOOKUP_USER_INDEX;
2194 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2198 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2200 intel_context_put(ce);
2204 user_sseu.slice_mask = ce->sseu.slice_mask;
2205 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2206 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2207 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2209 intel_context_unlock_pinned(ce);
2210 intel_context_put(ce);
2212 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2217 args->size = sizeof(user_sseu);
2222 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2223 struct drm_file *file)
2225 struct drm_i915_file_private *file_priv = file->driver_priv;
2226 struct drm_i915_gem_context_param *args = data;
2227 struct i915_gem_context *ctx;
2230 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2234 switch (args->param) {
2235 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2237 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2240 case I915_CONTEXT_PARAM_GTT_SIZE:
2243 args->value = ctx->vm->total;
2244 else if (to_i915(dev)->ggtt.alias)
2245 args->value = to_i915(dev)->ggtt.alias->vm.total;
2247 args->value = to_i915(dev)->ggtt.vm.total;
2250 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2252 args->value = i915_gem_context_no_error_capture(ctx);
2255 case I915_CONTEXT_PARAM_BANNABLE:
2257 args->value = i915_gem_context_is_bannable(ctx);
2260 case I915_CONTEXT_PARAM_RECOVERABLE:
2262 args->value = i915_gem_context_is_recoverable(ctx);
2265 case I915_CONTEXT_PARAM_PRIORITY:
2267 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2270 case I915_CONTEXT_PARAM_SSEU:
2271 ret = get_sseu(ctx, args);
2274 case I915_CONTEXT_PARAM_VM:
2275 ret = get_ppgtt(file_priv, ctx, args);
2278 case I915_CONTEXT_PARAM_ENGINES:
2279 ret = get_engines(ctx, args);
2282 case I915_CONTEXT_PARAM_BAN_PERIOD:
2288 i915_gem_context_put(ctx);
2292 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2293 struct drm_file *file)
2295 struct drm_i915_file_private *file_priv = file->driver_priv;
2296 struct drm_i915_gem_context_param *args = data;
2297 struct i915_gem_context *ctx;
2300 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2304 ret = ctx_setparam(file_priv, ctx, args);
2306 i915_gem_context_put(ctx);
2310 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2311 void *data, struct drm_file *file)
2313 struct drm_i915_private *dev_priv = to_i915(dev);
2314 struct drm_i915_reset_stats *args = data;
2315 struct i915_gem_context *ctx;
2318 if (args->flags || args->pad)
2323 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2328 * We opt for unserialised reads here. This may result in tearing
2329 * in the extremely unlikely event of a GPU hang on this context
2330 * as we are querying them. If we need that extra layer of protection,
2331 * we should wrap the hangstats with a seqlock.
2334 if (capable(CAP_SYS_ADMIN))
2335 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2337 args->reset_count = 0;
2339 args->batch_active = atomic_read(&ctx->guilty_count);
2340 args->batch_pending = atomic_read(&ctx->active_count);
2348 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2350 struct drm_i915_private *i915 = ctx->i915;
2353 mutex_lock(&i915->contexts.mutex);
2355 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2357 if (list_empty(&ctx->hw_id_link)) {
2358 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2360 err = assign_hw_id(i915, &ctx->hw_id);
2364 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2367 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2368 atomic_inc(&ctx->hw_id_pin_count);
2371 mutex_unlock(&i915->contexts.mutex);
2375 /* GEM context-engines iterator: for_each_gem_engine() */
2376 struct intel_context *
2377 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2379 const struct i915_gem_engines *e = it->engines;
2380 struct intel_context *ctx;
2383 if (it->idx >= e->num_engines)
2386 ctx = e->engines[it->idx++];
2392 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2393 #include "selftests/mock_context.c"
2394 #include "selftests/i915_gem_context.c"
2397 static void i915_global_gem_context_shrink(void)
2399 kmem_cache_shrink(global.slab_luts);
2402 static void i915_global_gem_context_exit(void)
2404 kmem_cache_destroy(global.slab_luts);
2407 static struct i915_global_gem_context global = { {
2408 .shrink = i915_global_gem_context_shrink,
2409 .exit = i915_global_gem_context_exit,
2412 int __init i915_global_gem_context_init(void)
2414 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2415 if (!global.slab_luts)
2418 i915_global_register(&global.base);