2 * SPDX-License-Identifier: MIT
4 * Copyright © 2011-2012 Intel Corporation
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
70 #include <drm/i915_drm.h>
72 #include "gt/intel_lrc_reg.h"
74 #include "i915_gem_context.h"
75 #include "i915_globals.h"
76 #include "i915_trace.h"
77 #include "i915_user_extensions.h"
79 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
81 static struct i915_global_gem_context {
82 struct i915_global base;
83 struct kmem_cache *slab_luts;
86 struct i915_lut_handle *i915_lut_handle_alloc(void)
88 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
91 void i915_lut_handle_free(struct i915_lut_handle *lut)
93 return kmem_cache_free(global.slab_luts, lut);
96 static void lut_close(struct i915_gem_context *ctx)
98 struct i915_lut_handle *lut, *ln;
99 struct radix_tree_iter iter;
102 list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
103 list_del(&lut->obj_link);
104 i915_lut_handle_free(lut);
106 INIT_LIST_HEAD(&ctx->handles_list);
109 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
110 struct i915_vma *vma = rcu_dereference_raw(*slot);
112 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
120 static struct intel_context *
121 lookup_user_engine(struct i915_gem_context *ctx,
123 const struct i915_engine_class_instance *ci)
124 #define LOOKUP_USER_INDEX BIT(0)
128 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
129 return ERR_PTR(-EINVAL);
131 if (!i915_gem_context_user_engines(ctx)) {
132 struct intel_engine_cs *engine;
134 engine = intel_engine_lookup_user(ctx->i915,
136 ci->engine_instance);
138 return ERR_PTR(-EINVAL);
142 idx = ci->engine_instance;
145 return i915_gem_context_get_engine(ctx, idx);
148 static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
152 lockdep_assert_held(&i915->contexts.mutex);
154 if (INTEL_GEN(i915) >= 11)
155 max = GEN11_MAX_CONTEXT_HW_ID;
156 else if (USES_GUC_SUBMISSION(i915))
158 * When using GuC in proxy submission, GuC consumes the
159 * highest bit in the context id to indicate proxy submission.
161 max = MAX_GUC_CONTEXT_HW_ID;
163 max = MAX_CONTEXT_HW_ID;
165 return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
168 static int steal_hw_id(struct drm_i915_private *i915)
170 struct i915_gem_context *ctx, *cn;
174 lockdep_assert_held(&i915->contexts.mutex);
176 list_for_each_entry_safe(ctx, cn,
177 &i915->contexts.hw_id_list, hw_id_link) {
178 if (atomic_read(&ctx->hw_id_pin_count)) {
179 list_move_tail(&ctx->hw_id_link, &pinned);
183 GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
184 list_del_init(&ctx->hw_id_link);
190 * Remember how far we got up on the last repossesion scan, so the
191 * list is kept in a "least recently scanned" order.
193 list_splice_tail(&pinned, &i915->contexts.hw_id_list);
197 static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
201 lockdep_assert_held(&i915->contexts.mutex);
204 * We prefer to steal/stall ourselves and our users over that of the
205 * entire system. That may be a little unfair to our users, and
206 * even hurt high priority clients. The choice is whether to oomkill
207 * something else, or steal a context id.
209 ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
210 if (unlikely(ret < 0)) {
211 ret = steal_hw_id(i915);
212 if (ret < 0) /* once again for the correct errno code */
213 ret = new_hw_id(i915, GFP_KERNEL);
222 static void release_hw_id(struct i915_gem_context *ctx)
224 struct drm_i915_private *i915 = ctx->i915;
226 if (list_empty(&ctx->hw_id_link))
229 mutex_lock(&i915->contexts.mutex);
230 if (!list_empty(&ctx->hw_id_link)) {
231 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
232 list_del_init(&ctx->hw_id_link);
234 mutex_unlock(&i915->contexts.mutex);
237 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
240 if (!e->engines[count])
243 intel_context_put(e->engines[count]);
248 static void free_engines(struct i915_gem_engines *e)
250 __free_engines(e, e->num_engines);
253 static void free_engines_rcu(struct work_struct *wrk)
255 struct i915_gem_engines *e =
256 container_of(wrk, struct i915_gem_engines, rcu.work);
257 struct drm_i915_private *i915 = e->i915;
259 mutex_lock(&i915->drm.struct_mutex);
261 mutex_unlock(&i915->drm.struct_mutex);
264 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
266 struct intel_engine_cs *engine;
267 struct i915_gem_engines *e;
268 enum intel_engine_id id;
270 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
272 return ERR_PTR(-ENOMEM);
275 for_each_engine(engine, ctx->i915, id) {
276 struct intel_context *ce;
278 ce = intel_context_create(ctx, engine);
280 __free_engines(e, id);
291 static void i915_gem_context_free(struct i915_gem_context *ctx)
293 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
294 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
297 i915_ppgtt_put(ctx->ppgtt);
299 free_engines(rcu_access_pointer(ctx->engines));
300 mutex_destroy(&ctx->engines_mutex);
303 i915_timeline_put(ctx->timeline);
308 list_del(&ctx->link);
309 mutex_destroy(&ctx->mutex);
314 static void contexts_free(struct drm_i915_private *i915)
316 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
317 struct i915_gem_context *ctx, *cn;
319 lockdep_assert_held(&i915->drm.struct_mutex);
321 llist_for_each_entry_safe(ctx, cn, freed, free_link)
322 i915_gem_context_free(ctx);
325 static void contexts_free_first(struct drm_i915_private *i915)
327 struct i915_gem_context *ctx;
328 struct llist_node *freed;
330 lockdep_assert_held(&i915->drm.struct_mutex);
332 freed = llist_del_first(&i915->contexts.free_list);
336 ctx = container_of(freed, typeof(*ctx), free_link);
337 i915_gem_context_free(ctx);
340 static void contexts_free_worker(struct work_struct *work)
342 struct drm_i915_private *i915 =
343 container_of(work, typeof(*i915), contexts.free_work);
345 mutex_lock(&i915->drm.struct_mutex);
347 mutex_unlock(&i915->drm.struct_mutex);
350 void i915_gem_context_release(struct kref *ref)
352 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
353 struct drm_i915_private *i915 = ctx->i915;
355 trace_i915_context_free(ctx);
356 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
357 queue_work(i915->wq, &i915->contexts.free_work);
360 static void context_close(struct i915_gem_context *ctx)
362 i915_gem_context_set_closed(ctx);
365 * This context will never again be assinged to HW, so we can
366 * reuse its ID for the next context.
371 * The LUT uses the VMA as a backpointer to unref the object,
372 * so we need to clear the LUT before we close all the VMA (inside
377 ctx->file_priv = ERR_PTR(-EBADF);
378 i915_gem_context_put(ctx);
381 static u32 default_desc_template(const struct drm_i915_private *i915,
382 const struct i915_hw_ppgtt *ppgtt)
387 desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
389 address_mode = INTEL_LEGACY_32B_CONTEXT;
390 if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
391 address_mode = INTEL_LEGACY_64B_CONTEXT;
392 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
395 desc |= GEN8_CTX_L3LLC_COHERENT;
397 /* TODO: WaDisableLiteRestore when we start using semaphore
398 * signalling between Command Streamers
399 * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
405 static struct i915_gem_context *
406 __create_context(struct drm_i915_private *dev_priv)
408 struct i915_gem_context *ctx;
409 struct i915_gem_engines *e;
413 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
415 return ERR_PTR(-ENOMEM);
417 kref_init(&ctx->ref);
418 list_add_tail(&ctx->link, &dev_priv->contexts.list);
419 ctx->i915 = dev_priv;
420 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
421 mutex_init(&ctx->mutex);
423 mutex_init(&ctx->engines_mutex);
424 e = default_engines(ctx);
429 RCU_INIT_POINTER(ctx->engines, e);
431 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
432 INIT_LIST_HEAD(&ctx->handles_list);
433 INIT_LIST_HEAD(&ctx->hw_id_link);
435 /* NB: Mark all slices as needing a remap so that when the context first
436 * loads it will restore whatever remap state already exists. If there
437 * is no remap info, it will be a NOP. */
438 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
440 i915_gem_context_set_bannable(ctx);
441 i915_gem_context_set_recoverable(ctx);
443 ctx->ring_size = 4 * PAGE_SIZE;
445 default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
447 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
448 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
457 static struct i915_hw_ppgtt *
458 __set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
460 struct i915_hw_ppgtt *old = ctx->ppgtt;
462 ctx->ppgtt = i915_ppgtt_get(ppgtt);
463 ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
468 static void __assign_ppgtt(struct i915_gem_context *ctx,
469 struct i915_hw_ppgtt *ppgtt)
471 if (ppgtt == ctx->ppgtt)
474 ppgtt = __set_ppgtt(ctx, ppgtt);
476 i915_ppgtt_put(ppgtt);
479 static struct i915_gem_context *
480 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
482 struct i915_gem_context *ctx;
484 lockdep_assert_held(&dev_priv->drm.struct_mutex);
486 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
487 !HAS_EXECLISTS(dev_priv))
488 return ERR_PTR(-EINVAL);
490 /* Reap the most stale context */
491 contexts_free_first(dev_priv);
493 ctx = __create_context(dev_priv);
497 if (HAS_FULL_PPGTT(dev_priv)) {
498 struct i915_hw_ppgtt *ppgtt;
500 ppgtt = i915_ppgtt_create(dev_priv);
502 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
505 return ERR_CAST(ppgtt);
508 __assign_ppgtt(ctx, ppgtt);
509 i915_ppgtt_put(ppgtt);
512 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
513 struct i915_timeline *timeline;
515 timeline = i915_timeline_create(dev_priv, NULL);
516 if (IS_ERR(timeline)) {
518 return ERR_CAST(timeline);
521 ctx->timeline = timeline;
524 trace_i915_context_create(ctx);
530 * i915_gem_context_create_gvt - create a GVT GEM context
533 * This function is used to create a GVT specific GEM context.
536 * pointer to i915_gem_context on success, error pointer if failed
539 struct i915_gem_context *
540 i915_gem_context_create_gvt(struct drm_device *dev)
542 struct i915_gem_context *ctx;
545 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
546 return ERR_PTR(-ENODEV);
548 ret = i915_mutex_lock_interruptible(dev);
552 ctx = i915_gem_create_context(to_i915(dev), 0);
556 ret = i915_gem_context_pin_hw_id(ctx);
563 ctx->file_priv = ERR_PTR(-EBADF);
564 i915_gem_context_set_closed(ctx); /* not user accessible */
565 i915_gem_context_clear_bannable(ctx);
566 i915_gem_context_set_force_single_submission(ctx);
567 if (!USES_GUC_SUBMISSION(to_i915(dev)))
568 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
570 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
572 mutex_unlock(&dev->struct_mutex);
577 destroy_kernel_context(struct i915_gem_context **ctxp)
579 struct i915_gem_context *ctx;
581 /* Keep the context ref so that we can free it immediately ourselves */
582 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
583 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
586 i915_gem_context_free(ctx);
589 struct i915_gem_context *
590 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
592 struct i915_gem_context *ctx;
595 ctx = i915_gem_create_context(i915, 0);
599 err = i915_gem_context_pin_hw_id(ctx);
601 destroy_kernel_context(&ctx);
605 i915_gem_context_clear_bannable(ctx);
606 ctx->sched.priority = I915_USER_PRIORITY(prio);
607 ctx->ring_size = PAGE_SIZE;
609 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
614 static void init_contexts(struct drm_i915_private *i915)
616 mutex_init(&i915->contexts.mutex);
617 INIT_LIST_HEAD(&i915->contexts.list);
619 /* Using the simple ida interface, the max is limited by sizeof(int) */
620 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
621 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
622 ida_init(&i915->contexts.hw_ida);
623 INIT_LIST_HEAD(&i915->contexts.hw_id_list);
625 INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
626 init_llist_head(&i915->contexts.free_list);
629 static bool needs_preempt_context(struct drm_i915_private *i915)
631 return HAS_EXECLISTS(i915);
634 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
636 struct i915_gem_context *ctx;
638 /* Reassure ourselves we are only called once */
639 GEM_BUG_ON(dev_priv->kernel_context);
640 GEM_BUG_ON(dev_priv->preempt_context);
642 intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
643 init_contexts(dev_priv);
645 /* lowest priority; idle task */
646 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
648 DRM_ERROR("Failed to create default global context\n");
652 * For easy recognisablity, we want the kernel context to be 0 and then
653 * all user contexts will have non-zero hw_id. Kernel contexts are
654 * permanently pinned, so that we never suffer a stall and can
655 * use them from any allocation context (e.g. for evicting other
656 * contexts and from inside the shrinker).
658 GEM_BUG_ON(ctx->hw_id);
659 GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
660 dev_priv->kernel_context = ctx;
662 /* highest priority; preempting task */
663 if (needs_preempt_context(dev_priv)) {
664 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
666 dev_priv->preempt_context = ctx;
668 DRM_ERROR("Failed to create preempt context; disabling preemption\n");
671 DRM_DEBUG_DRIVER("%s context support initialized\n",
672 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
677 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
679 struct intel_engine_cs *engine;
680 enum intel_engine_id id;
682 lockdep_assert_held(&dev_priv->drm.struct_mutex);
684 for_each_engine(engine, dev_priv, id)
685 intel_engine_lost_context(engine);
688 void i915_gem_contexts_fini(struct drm_i915_private *i915)
690 lockdep_assert_held(&i915->drm.struct_mutex);
692 if (i915->preempt_context)
693 destroy_kernel_context(&i915->preempt_context);
694 destroy_kernel_context(&i915->kernel_context);
696 /* Must free all deferred contexts (via flush_workqueue) first */
697 GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
698 ida_destroy(&i915->contexts.hw_ida);
701 static int context_idr_cleanup(int id, void *p, void *data)
707 static int vm_idr_cleanup(int id, void *p, void *data)
713 static int gem_context_register(struct i915_gem_context *ctx,
714 struct drm_i915_file_private *fpriv)
718 ctx->file_priv = fpriv;
720 ctx->ppgtt->vm.file = fpriv;
722 ctx->pid = get_task_pid(current, PIDTYPE_PID);
723 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
724 current->comm, pid_nr(ctx->pid));
730 /* And finally expose ourselves to userspace via the idr */
731 mutex_lock(&fpriv->context_idr_lock);
732 ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
733 mutex_unlock(&fpriv->context_idr_lock);
737 kfree(fetch_and_zero(&ctx->name));
739 put_pid(fetch_and_zero(&ctx->pid));
744 int i915_gem_context_open(struct drm_i915_private *i915,
745 struct drm_file *file)
747 struct drm_i915_file_private *file_priv = file->driver_priv;
748 struct i915_gem_context *ctx;
751 mutex_init(&file_priv->context_idr_lock);
752 mutex_init(&file_priv->vm_idr_lock);
754 idr_init(&file_priv->context_idr);
755 idr_init_base(&file_priv->vm_idr, 1);
757 mutex_lock(&i915->drm.struct_mutex);
758 ctx = i915_gem_create_context(i915, 0);
759 mutex_unlock(&i915->drm.struct_mutex);
765 err = gem_context_register(ctx, file_priv);
769 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
775 mutex_lock(&i915->drm.struct_mutex);
777 mutex_unlock(&i915->drm.struct_mutex);
779 idr_destroy(&file_priv->vm_idr);
780 idr_destroy(&file_priv->context_idr);
781 mutex_destroy(&file_priv->vm_idr_lock);
782 mutex_destroy(&file_priv->context_idr_lock);
786 void i915_gem_context_close(struct drm_file *file)
788 struct drm_i915_file_private *file_priv = file->driver_priv;
790 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
792 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
793 idr_destroy(&file_priv->context_idr);
794 mutex_destroy(&file_priv->context_idr_lock);
796 idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
797 idr_destroy(&file_priv->vm_idr);
798 mutex_destroy(&file_priv->vm_idr_lock);
801 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
802 struct drm_file *file)
804 struct drm_i915_private *i915 = to_i915(dev);
805 struct drm_i915_gem_vm_control *args = data;
806 struct drm_i915_file_private *file_priv = file->driver_priv;
807 struct i915_hw_ppgtt *ppgtt;
810 if (!HAS_FULL_PPGTT(i915))
816 ppgtt = i915_ppgtt_create(i915);
818 return PTR_ERR(ppgtt);
820 ppgtt->vm.file = file_priv;
822 if (args->extensions) {
823 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
830 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
834 err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
838 GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
840 mutex_unlock(&file_priv->vm_idr_lock);
846 mutex_unlock(&file_priv->vm_idr_lock);
848 i915_ppgtt_put(ppgtt);
852 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
853 struct drm_file *file)
855 struct drm_i915_file_private *file_priv = file->driver_priv;
856 struct drm_i915_gem_vm_control *args = data;
857 struct i915_hw_ppgtt *ppgtt;
864 if (args->extensions)
871 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
875 ppgtt = idr_remove(&file_priv->vm_idr, id);
877 mutex_unlock(&file_priv->vm_idr_lock);
881 i915_ppgtt_put(ppgtt);
885 struct context_barrier_task {
886 struct i915_active base;
887 void (*task)(void *data);
891 static void cb_retire(struct i915_active *base)
893 struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
898 i915_active_fini(&cb->base);
902 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
903 static int context_barrier_task(struct i915_gem_context *ctx,
904 intel_engine_mask_t engines,
905 int (*emit)(struct i915_request *rq, void *data),
906 void (*task)(void *data),
909 struct drm_i915_private *i915 = ctx->i915;
910 struct context_barrier_task *cb;
911 struct i915_gem_engines_iter it;
912 struct intel_context *ce;
915 lockdep_assert_held(&i915->drm.struct_mutex);
918 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
922 i915_active_init(i915, &cb->base, cb_retire);
923 i915_active_acquire(&cb->base);
925 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
926 struct i915_request *rq;
928 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
934 if (!(ce->engine->mask & engines) || !ce->state)
937 rq = intel_context_create_request(ce);
945 err = emit(rq, data);
947 err = i915_active_ref(&cb->base, rq->fence.context, rq);
949 i915_request_add(rq);
953 i915_gem_context_unlock_engines(ctx);
955 cb->task = err ? NULL : task; /* caller needs to unwind instead */
958 i915_active_release(&cb->base);
963 static int get_ppgtt(struct drm_i915_file_private *file_priv,
964 struct i915_gem_context *ctx,
965 struct drm_i915_gem_context_param *args)
967 struct i915_hw_ppgtt *ppgtt;
973 /* XXX rcu acquire? */
974 ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
978 ppgtt = i915_ppgtt_get(ctx->ppgtt);
979 mutex_unlock(&ctx->i915->drm.struct_mutex);
981 ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
985 ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
990 i915_ppgtt_get(ppgtt);
997 mutex_unlock(&file_priv->vm_idr_lock);
999 i915_ppgtt_put(ppgtt);
1003 static void set_ppgtt_barrier(void *data)
1005 struct i915_hw_ppgtt *old = data;
1007 if (INTEL_GEN(old->vm.i915) < 8)
1008 gen6_ppgtt_unpin_all(old);
1010 i915_ppgtt_put(old);
1013 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1015 struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
1016 struct intel_engine_cs *engine = rq->engine;
1017 u32 base = engine->mmio_base;
1021 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1022 const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
1024 cs = intel_ring_begin(rq, 6);
1028 *cs++ = MI_LOAD_REGISTER_IMM(2);
1030 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1031 *cs++ = upper_32_bits(pd_daddr);
1032 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1033 *cs++ = lower_32_bits(pd_daddr);
1036 intel_ring_advance(rq, cs);
1037 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1038 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1042 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1043 for (i = GEN8_3LVL_PDPES; i--; ) {
1044 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1046 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1047 *cs++ = upper_32_bits(pd_daddr);
1048 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1049 *cs++ = lower_32_bits(pd_daddr);
1052 intel_ring_advance(rq, cs);
1054 /* ppGTT is not part of the legacy context image */
1055 gen6_ppgtt_pin(ppgtt);
1061 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1062 struct i915_gem_context *ctx,
1063 struct drm_i915_gem_context_param *args)
1065 struct i915_hw_ppgtt *ppgtt, *old;
1074 if (upper_32_bits(args->value))
1077 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1081 ppgtt = idr_find(&file_priv->vm_idr, args->value);
1083 i915_ppgtt_get(ppgtt);
1084 mutex_unlock(&file_priv->vm_idr_lock);
1088 err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1092 if (ppgtt == ctx->ppgtt)
1095 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1098 old = __set_ppgtt(ctx, ppgtt);
1101 * We need to flush any requests using the current ppgtt before
1102 * we release it as the requests do not hold a reference themselves,
1103 * only indirectly through the context.
1105 err = context_barrier_task(ctx, ALL_ENGINES,
1111 ctx->desc_template = default_desc_template(ctx->i915, old);
1112 i915_ppgtt_put(ppgtt);
1116 mutex_unlock(&ctx->i915->drm.struct_mutex);
1119 i915_ppgtt_put(ppgtt);
1123 static int gen8_emit_rpcs_config(struct i915_request *rq,
1124 struct intel_context *ce,
1125 struct intel_sseu sseu)
1130 cs = intel_ring_begin(rq, 4);
1134 offset = i915_ggtt_offset(ce->state) +
1135 LRC_STATE_PN * PAGE_SIZE +
1136 (CTX_R_PWR_CLK_STATE + 1) * 4;
1138 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1139 *cs++ = lower_32_bits(offset);
1140 *cs++ = upper_32_bits(offset);
1141 *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1143 intel_ring_advance(rq, cs);
1149 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1151 struct i915_request *rq;
1154 lockdep_assert_held(&ce->pin_mutex);
1157 * If the context is not idle, we have to submit an ordered request to
1158 * modify its context image via the kernel context (writing to our own
1159 * image, or into the registers directory, does not stick). Pristine
1160 * and idle contexts will be configured on pinning.
1162 if (!intel_context_is_pinned(ce))
1165 rq = i915_request_create(ce->engine->kernel_context);
1169 /* Queue this switch after all other activity by this context. */
1170 ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
1174 ret = gen8_emit_rpcs_config(rq, ce, sseu);
1179 * Guarantee context image and the timeline remains pinned until the
1180 * modifying request is retired by setting the ce activity tracker.
1182 * But we only need to take one pin on the account of it. Or in other
1183 * words transfer the pinned ce object to tracked active request.
1185 if (!i915_active_request_isset(&ce->active_tracker))
1186 __intel_context_pin(ce);
1187 __i915_active_request_set(&ce->active_tracker, rq);
1190 i915_request_add(rq);
1195 __intel_context_reconfigure_sseu(struct intel_context *ce,
1196 struct intel_sseu sseu)
1200 GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
1202 ret = intel_context_lock_pinned(ce);
1206 /* Nothing to do if unmodified. */
1207 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1210 ret = gen8_modify_rpcs(ce, sseu);
1215 intel_context_unlock_pinned(ce);
1220 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1222 struct drm_i915_private *i915 = ce->gem_context->i915;
1225 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1229 ret = __intel_context_reconfigure_sseu(ce, sseu);
1231 mutex_unlock(&i915->drm.struct_mutex);
1237 user_to_context_sseu(struct drm_i915_private *i915,
1238 const struct drm_i915_gem_context_param_sseu *user,
1239 struct intel_sseu *context)
1241 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1243 /* No zeros in any field. */
1244 if (!user->slice_mask || !user->subslice_mask ||
1245 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1249 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1253 * Some future proofing on the types since the uAPI is wider than the
1254 * current internal implementation.
1256 if (overflows_type(user->slice_mask, context->slice_mask) ||
1257 overflows_type(user->subslice_mask, context->subslice_mask) ||
1258 overflows_type(user->min_eus_per_subslice,
1259 context->min_eus_per_subslice) ||
1260 overflows_type(user->max_eus_per_subslice,
1261 context->max_eus_per_subslice))
1264 /* Check validity against hardware. */
1265 if (user->slice_mask & ~device->slice_mask)
1268 if (user->subslice_mask & ~device->subslice_mask[0])
1271 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1274 context->slice_mask = user->slice_mask;
1275 context->subslice_mask = user->subslice_mask;
1276 context->min_eus_per_subslice = user->min_eus_per_subslice;
1277 context->max_eus_per_subslice = user->max_eus_per_subslice;
1279 /* Part specific restrictions. */
1280 if (IS_GEN(i915, 11)) {
1281 unsigned int hw_s = hweight8(device->slice_mask);
1282 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1283 unsigned int req_s = hweight8(context->slice_mask);
1284 unsigned int req_ss = hweight8(context->subslice_mask);
1287 * Only full subslice enablement is possible if more than one
1288 * slice is turned on.
1290 if (req_s > 1 && req_ss != hw_ss_per_s)
1294 * If more than four (SScount bitfield limit) subslices are
1295 * requested then the number has to be even.
1297 if (req_ss > 4 && (req_ss & 1))
1301 * If only one slice is enabled and subslice count is below the
1302 * device full enablement, it must be at most half of the all
1303 * available subslices.
1305 if (req_s == 1 && req_ss < hw_ss_per_s &&
1306 req_ss > (hw_ss_per_s / 2))
1309 /* ABI restriction - VME use case only. */
1311 /* All slices or one slice only. */
1312 if (req_s != 1 && req_s != hw_s)
1316 * Half subslices or full enablement only when one slice is
1320 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1323 /* No EU configuration changes. */
1324 if ((user->min_eus_per_subslice !=
1325 device->max_eus_per_subslice) ||
1326 (user->max_eus_per_subslice !=
1327 device->max_eus_per_subslice))
1334 static int set_sseu(struct i915_gem_context *ctx,
1335 struct drm_i915_gem_context_param *args)
1337 struct drm_i915_private *i915 = ctx->i915;
1338 struct drm_i915_gem_context_param_sseu user_sseu;
1339 struct intel_context *ce;
1340 struct intel_sseu sseu;
1341 unsigned long lookup;
1344 if (args->size < sizeof(user_sseu))
1347 if (!IS_GEN(i915, 11))
1350 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1357 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1361 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1362 lookup |= LOOKUP_USER_INDEX;
1364 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1368 /* Only render engine supports RPCS configuration. */
1369 if (ce->engine->class != RENDER_CLASS) {
1374 ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1378 ret = intel_context_reconfigure_sseu(ce, sseu);
1382 args->size = sizeof(user_sseu);
1385 intel_context_put(ce);
1389 struct set_engines {
1390 struct i915_gem_context *ctx;
1391 struct i915_gem_engines *engines;
1395 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1397 struct i915_context_engines_load_balance __user *ext =
1398 container_of_user(base, typeof(*ext), base);
1399 const struct set_engines *set = data;
1400 struct intel_engine_cs *stack[16];
1401 struct intel_engine_cs **siblings;
1402 struct intel_context *ce;
1403 u16 num_siblings, idx;
1407 if (!HAS_EXECLISTS(set->ctx->i915))
1410 if (USES_GUC_SUBMISSION(set->ctx->i915))
1411 return -ENODEV; /* not implement yet */
1413 if (get_user(idx, &ext->engine_index))
1416 if (idx >= set->engines->num_engines) {
1417 DRM_DEBUG("Invalid placement value, %d >= %d\n",
1418 idx, set->engines->num_engines);
1422 idx = array_index_nospec(idx, set->engines->num_engines);
1423 if (set->engines->engines[idx]) {
1424 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1428 if (get_user(num_siblings, &ext->num_siblings))
1431 err = check_user_mbz(&ext->flags);
1435 err = check_user_mbz(&ext->mbz64);
1440 if (num_siblings > ARRAY_SIZE(stack)) {
1441 siblings = kmalloc_array(num_siblings,
1448 for (n = 0; n < num_siblings; n++) {
1449 struct i915_engine_class_instance ci;
1451 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1456 siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1458 ci.engine_instance);
1460 DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1461 n, ci.engine_class, ci.engine_instance);
1467 ce = intel_execlists_create_virtual(set->ctx, siblings, n);
1473 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1474 intel_context_put(ce);
1480 if (siblings != stack)
1487 set_engines__bond(struct i915_user_extension __user *base, void *data)
1489 struct i915_context_engines_bond __user *ext =
1490 container_of_user(base, typeof(*ext), base);
1491 const struct set_engines *set = data;
1492 struct i915_engine_class_instance ci;
1493 struct intel_engine_cs *virtual;
1494 struct intel_engine_cs *master;
1498 if (get_user(idx, &ext->virtual_index))
1501 if (idx >= set->engines->num_engines) {
1502 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1503 idx, set->engines->num_engines);
1507 idx = array_index_nospec(idx, set->engines->num_engines);
1508 if (!set->engines->engines[idx]) {
1509 DRM_DEBUG("Invalid engine at %d\n", idx);
1512 virtual = set->engines->engines[idx]->engine;
1514 err = check_user_mbz(&ext->flags);
1518 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1519 err = check_user_mbz(&ext->mbz64[n]);
1524 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1527 master = intel_engine_lookup_user(set->ctx->i915,
1528 ci.engine_class, ci.engine_instance);
1530 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1531 ci.engine_class, ci.engine_instance);
1535 if (get_user(num_bonds, &ext->num_bonds))
1538 for (n = 0; n < num_bonds; n++) {
1539 struct intel_engine_cs *bond;
1541 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1544 bond = intel_engine_lookup_user(set->ctx->i915,
1546 ci.engine_instance);
1548 DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1549 n, ci.engine_class, ci.engine_instance);
1554 * A non-virtual engine has no siblings to choose between; and
1555 * a submit fence will always be directed to the one engine.
1557 if (intel_engine_is_virtual(virtual)) {
1558 err = intel_virtual_engine_attach_bond(virtual,
1569 static const i915_user_extension_fn set_engines__extensions[] = {
1570 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1571 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1575 set_engines(struct i915_gem_context *ctx,
1576 const struct drm_i915_gem_context_param *args)
1578 struct i915_context_param_engines __user *user =
1579 u64_to_user_ptr(args->value);
1580 struct set_engines set = { .ctx = ctx };
1581 unsigned int num_engines, n;
1585 if (!args->size) { /* switch back to legacy user_ring_map */
1586 if (!i915_gem_context_user_engines(ctx))
1589 set.engines = default_engines(ctx);
1590 if (IS_ERR(set.engines))
1591 return PTR_ERR(set.engines);
1596 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1597 if (args->size < sizeof(*user) ||
1598 !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1599 DRM_DEBUG("Invalid size for engine array: %d\n",
1605 * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1606 * first 64 engines defined here.
1608 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1610 set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1615 set.engines->i915 = ctx->i915;
1616 for (n = 0; n < num_engines; n++) {
1617 struct i915_engine_class_instance ci;
1618 struct intel_engine_cs *engine;
1620 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1621 __free_engines(set.engines, n);
1625 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1626 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1627 set.engines->engines[n] = NULL;
1631 engine = intel_engine_lookup_user(ctx->i915,
1633 ci.engine_instance);
1635 DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1636 n, ci.engine_class, ci.engine_instance);
1637 __free_engines(set.engines, n);
1641 set.engines->engines[n] = intel_context_create(ctx, engine);
1642 if (!set.engines->engines[n]) {
1643 __free_engines(set.engines, n);
1647 set.engines->num_engines = num_engines;
1650 if (!get_user(extensions, &user->extensions))
1651 err = i915_user_extensions(u64_to_user_ptr(extensions),
1652 set_engines__extensions,
1653 ARRAY_SIZE(set_engines__extensions),
1656 free_engines(set.engines);
1661 mutex_lock(&ctx->engines_mutex);
1663 i915_gem_context_set_user_engines(ctx);
1665 i915_gem_context_clear_user_engines(ctx);
1666 rcu_swap_protected(ctx->engines, set.engines, 1);
1667 mutex_unlock(&ctx->engines_mutex);
1669 INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu);
1670 queue_rcu_work(system_wq, &set.engines->rcu);
1675 static struct i915_gem_engines *
1676 __copy_engines(struct i915_gem_engines *e)
1678 struct i915_gem_engines *copy;
1681 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1683 return ERR_PTR(-ENOMEM);
1685 copy->i915 = e->i915;
1686 for (n = 0; n < e->num_engines; n++) {
1688 copy->engines[n] = intel_context_get(e->engines[n]);
1690 copy->engines[n] = NULL;
1692 copy->num_engines = n;
1698 get_engines(struct i915_gem_context *ctx,
1699 struct drm_i915_gem_context_param *args)
1701 struct i915_context_param_engines __user *user;
1702 struct i915_gem_engines *e;
1703 size_t n, count, size;
1706 err = mutex_lock_interruptible(&ctx->engines_mutex);
1711 if (i915_gem_context_user_engines(ctx))
1712 e = __copy_engines(i915_gem_context_engines(ctx));
1713 mutex_unlock(&ctx->engines_mutex);
1714 if (IS_ERR_OR_NULL(e)) {
1716 return PTR_ERR_OR_ZERO(e);
1719 count = e->num_engines;
1721 /* Be paranoid in case we have an impedance mismatch */
1722 if (!check_struct_size(user, engines, count, &size)) {
1726 if (overflows_type(size, args->size)) {
1736 if (args->size < size) {
1741 user = u64_to_user_ptr(args->value);
1742 if (!access_ok(user, size)) {
1747 if (put_user(0, &user->extensions)) {
1752 for (n = 0; n < count; n++) {
1753 struct i915_engine_class_instance ci = {
1754 .engine_class = I915_ENGINE_CLASS_INVALID,
1755 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1758 if (e->engines[n]) {
1759 ci.engine_class = e->engines[n]->engine->uabi_class;
1760 ci.engine_instance = e->engines[n]->engine->instance;
1763 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1772 INIT_RCU_WORK(&e->rcu, free_engines_rcu);
1773 queue_rcu_work(system_wq, &e->rcu);
1777 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1778 struct i915_gem_context *ctx,
1779 struct drm_i915_gem_context_param *args)
1783 switch (args->param) {
1784 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1787 else if (args->value)
1788 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1790 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1793 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1796 else if (args->value)
1797 i915_gem_context_set_no_error_capture(ctx);
1799 i915_gem_context_clear_no_error_capture(ctx);
1802 case I915_CONTEXT_PARAM_BANNABLE:
1805 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1807 else if (args->value)
1808 i915_gem_context_set_bannable(ctx);
1810 i915_gem_context_clear_bannable(ctx);
1813 case I915_CONTEXT_PARAM_RECOVERABLE:
1816 else if (args->value)
1817 i915_gem_context_set_recoverable(ctx);
1819 i915_gem_context_clear_recoverable(ctx);
1822 case I915_CONTEXT_PARAM_PRIORITY:
1824 s64 priority = args->value;
1828 else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1830 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1831 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1833 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1834 !capable(CAP_SYS_NICE))
1837 ctx->sched.priority =
1838 I915_USER_PRIORITY(priority);
1842 case I915_CONTEXT_PARAM_SSEU:
1843 ret = set_sseu(ctx, args);
1846 case I915_CONTEXT_PARAM_VM:
1847 ret = set_ppgtt(fpriv, ctx, args);
1850 case I915_CONTEXT_PARAM_ENGINES:
1851 ret = set_engines(ctx, args);
1854 case I915_CONTEXT_PARAM_BAN_PERIOD:
1864 struct i915_gem_context *ctx;
1865 struct drm_i915_file_private *fpriv;
1868 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1870 struct drm_i915_gem_context_create_ext_setparam local;
1871 const struct create_ext *arg = data;
1873 if (copy_from_user(&local, ext, sizeof(local)))
1876 if (local.param.ctx_id)
1879 return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1882 static int clone_engines(struct i915_gem_context *dst,
1883 struct i915_gem_context *src)
1885 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1886 struct i915_gem_engines *clone;
1890 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1894 clone->i915 = dst->i915;
1895 for (n = 0; n < e->num_engines; n++) {
1896 struct intel_engine_cs *engine;
1898 if (!e->engines[n]) {
1899 clone->engines[n] = NULL;
1902 engine = e->engines[n]->engine;
1905 * Virtual engines are singletons; they can only exist
1906 * inside a single context, because they embed their
1907 * HW context... As each virtual context implies a single
1908 * timeline (each engine can only dequeue a single request
1909 * at any time), it would be surprising for two contexts
1910 * to use the same engine. So let's create a copy of
1911 * the virtual engine instead.
1913 if (intel_engine_is_virtual(engine))
1915 intel_execlists_clone_virtual(dst, engine);
1917 clone->engines[n] = intel_context_create(dst, engine);
1918 if (IS_ERR_OR_NULL(clone->engines[n])) {
1919 __free_engines(clone, n);
1923 clone->num_engines = n;
1925 user_engines = i915_gem_context_user_engines(src);
1926 i915_gem_context_unlock_engines(src);
1928 free_engines(dst->engines);
1929 RCU_INIT_POINTER(dst->engines, clone);
1931 i915_gem_context_set_user_engines(dst);
1933 i915_gem_context_clear_user_engines(dst);
1937 i915_gem_context_unlock_engines(src);
1941 static int clone_flags(struct i915_gem_context *dst,
1942 struct i915_gem_context *src)
1944 dst->user_flags = src->user_flags;
1948 static int clone_schedattr(struct i915_gem_context *dst,
1949 struct i915_gem_context *src)
1951 dst->sched = src->sched;
1955 static int clone_sseu(struct i915_gem_context *dst,
1956 struct i915_gem_context *src)
1958 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1959 struct i915_gem_engines *clone;
1963 clone = dst->engines; /* no locking required; sole access */
1964 if (e->num_engines != clone->num_engines) {
1969 for (n = 0; n < e->num_engines; n++) {
1970 struct intel_context *ce = e->engines[n];
1972 if (clone->engines[n]->engine->class != ce->engine->class) {
1973 /* Must have compatible engine maps! */
1978 /* serialises with set_sseu */
1979 err = intel_context_lock_pinned(ce);
1983 clone->engines[n]->sseu = ce->sseu;
1984 intel_context_unlock_pinned(ce);
1989 i915_gem_context_unlock_engines(src);
1993 static int clone_timeline(struct i915_gem_context *dst,
1994 struct i915_gem_context *src)
1996 if (src->timeline) {
1997 GEM_BUG_ON(src->timeline == dst->timeline);
2000 i915_timeline_put(dst->timeline);
2001 dst->timeline = i915_timeline_get(src->timeline);
2007 static int clone_vm(struct i915_gem_context *dst,
2008 struct i915_gem_context *src)
2010 struct i915_hw_ppgtt *ppgtt;
2014 ppgtt = READ_ONCE(src->ppgtt);
2018 if (!kref_get_unless_zero(&ppgtt->ref))
2022 * This ppgtt may have be reallocated between
2023 * the read and the kref, and reassigned to a third
2024 * context. In order to avoid inadvertent sharing
2025 * of this ppgtt with that third context (and not
2026 * src), we have to confirm that we have the same
2027 * ppgtt after passing through the strong memory
2028 * barrier implied by a successful
2029 * kref_get_unless_zero().
2031 * Once we have acquired the current ppgtt of src,
2032 * we no longer care if it is released from src, as
2033 * it cannot be reallocated elsewhere.
2036 if (ppgtt == READ_ONCE(src->ppgtt))
2039 i915_ppgtt_put(ppgtt);
2044 __assign_ppgtt(dst, ppgtt);
2045 i915_ppgtt_put(ppgtt);
2051 static int create_clone(struct i915_user_extension __user *ext, void *data)
2053 static int (* const fn[])(struct i915_gem_context *dst,
2054 struct i915_gem_context *src) = {
2055 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2056 MAP(ENGINES, clone_engines),
2057 MAP(FLAGS, clone_flags),
2058 MAP(SCHEDATTR, clone_schedattr),
2059 MAP(SSEU, clone_sseu),
2060 MAP(TIMELINE, clone_timeline),
2064 struct drm_i915_gem_context_create_ext_clone local;
2065 const struct create_ext *arg = data;
2066 struct i915_gem_context *dst = arg->ctx;
2067 struct i915_gem_context *src;
2070 if (copy_from_user(&local, ext, sizeof(local)))
2073 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2074 I915_CONTEXT_CLONE_UNKNOWN);
2076 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2083 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2088 GEM_BUG_ON(src == dst);
2090 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2091 if (!(local.flags & BIT(bit)))
2094 err = fn[bit](dst, src);
2102 static const i915_user_extension_fn create_extensions[] = {
2103 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2104 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2107 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2109 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2112 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2113 struct drm_file *file)
2115 struct drm_i915_private *i915 = to_i915(dev);
2116 struct drm_i915_gem_context_create_ext *args = data;
2117 struct create_ext ext_data;
2120 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2123 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2126 ret = i915_terminally_wedged(i915);
2130 ext_data.fpriv = file->driver_priv;
2131 if (client_is_banned(ext_data.fpriv)) {
2132 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2134 pid_nr(get_task_pid(current, PIDTYPE_PID)));
2138 ret = i915_mutex_lock_interruptible(dev);
2142 ext_data.ctx = i915_gem_create_context(i915, args->flags);
2143 mutex_unlock(&dev->struct_mutex);
2144 if (IS_ERR(ext_data.ctx))
2145 return PTR_ERR(ext_data.ctx);
2147 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2148 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2150 ARRAY_SIZE(create_extensions),
2156 ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
2161 DRM_DEBUG("HW context %d created\n", args->ctx_id);
2166 mutex_lock(&dev->struct_mutex);
2167 context_close(ext_data.ctx);
2168 mutex_unlock(&dev->struct_mutex);
2172 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2173 struct drm_file *file)
2175 struct drm_i915_gem_context_destroy *args = data;
2176 struct drm_i915_file_private *file_priv = file->driver_priv;
2177 struct i915_gem_context *ctx;
2185 if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2188 ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2189 mutex_unlock(&file_priv->context_idr_lock);
2193 mutex_lock(&dev->struct_mutex);
2195 mutex_unlock(&dev->struct_mutex);
2200 static int get_sseu(struct i915_gem_context *ctx,
2201 struct drm_i915_gem_context_param *args)
2203 struct drm_i915_gem_context_param_sseu user_sseu;
2204 struct intel_context *ce;
2205 unsigned long lookup;
2208 if (args->size == 0)
2210 else if (args->size < sizeof(user_sseu))
2213 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2220 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2224 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2225 lookup |= LOOKUP_USER_INDEX;
2227 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2231 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2233 intel_context_put(ce);
2237 user_sseu.slice_mask = ce->sseu.slice_mask;
2238 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2239 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2240 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2242 intel_context_unlock_pinned(ce);
2243 intel_context_put(ce);
2245 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2250 args->size = sizeof(user_sseu);
2255 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2256 struct drm_file *file)
2258 struct drm_i915_file_private *file_priv = file->driver_priv;
2259 struct drm_i915_gem_context_param *args = data;
2260 struct i915_gem_context *ctx;
2263 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2267 switch (args->param) {
2268 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2270 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2273 case I915_CONTEXT_PARAM_GTT_SIZE:
2276 args->value = ctx->ppgtt->vm.total;
2277 else if (to_i915(dev)->mm.aliasing_ppgtt)
2278 args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
2280 args->value = to_i915(dev)->ggtt.vm.total;
2283 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2285 args->value = i915_gem_context_no_error_capture(ctx);
2288 case I915_CONTEXT_PARAM_BANNABLE:
2290 args->value = i915_gem_context_is_bannable(ctx);
2293 case I915_CONTEXT_PARAM_RECOVERABLE:
2295 args->value = i915_gem_context_is_recoverable(ctx);
2298 case I915_CONTEXT_PARAM_PRIORITY:
2300 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2303 case I915_CONTEXT_PARAM_SSEU:
2304 ret = get_sseu(ctx, args);
2307 case I915_CONTEXT_PARAM_VM:
2308 ret = get_ppgtt(file_priv, ctx, args);
2311 case I915_CONTEXT_PARAM_ENGINES:
2312 ret = get_engines(ctx, args);
2315 case I915_CONTEXT_PARAM_BAN_PERIOD:
2321 i915_gem_context_put(ctx);
2325 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2326 struct drm_file *file)
2328 struct drm_i915_file_private *file_priv = file->driver_priv;
2329 struct drm_i915_gem_context_param *args = data;
2330 struct i915_gem_context *ctx;
2333 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2337 ret = ctx_setparam(file_priv, ctx, args);
2339 i915_gem_context_put(ctx);
2343 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2344 void *data, struct drm_file *file)
2346 struct drm_i915_private *dev_priv = to_i915(dev);
2347 struct drm_i915_reset_stats *args = data;
2348 struct i915_gem_context *ctx;
2351 if (args->flags || args->pad)
2356 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2361 * We opt for unserialised reads here. This may result in tearing
2362 * in the extremely unlikely event of a GPU hang on this context
2363 * as we are querying them. If we need that extra layer of protection,
2364 * we should wrap the hangstats with a seqlock.
2367 if (capable(CAP_SYS_ADMIN))
2368 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2370 args->reset_count = 0;
2372 args->batch_active = atomic_read(&ctx->guilty_count);
2373 args->batch_pending = atomic_read(&ctx->active_count);
2381 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2383 struct drm_i915_private *i915 = ctx->i915;
2386 mutex_lock(&i915->contexts.mutex);
2388 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2390 if (list_empty(&ctx->hw_id_link)) {
2391 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2393 err = assign_hw_id(i915, &ctx->hw_id);
2397 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2400 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2401 atomic_inc(&ctx->hw_id_pin_count);
2404 mutex_unlock(&i915->contexts.mutex);
2408 /* GEM context-engines iterator: for_each_gem_engine() */
2409 struct intel_context *
2410 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2412 const struct i915_gem_engines *e = it->engines;
2413 struct intel_context *ctx;
2416 if (it->idx >= e->num_engines)
2419 ctx = e->engines[it->idx++];
2425 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2426 #include "selftests/mock_context.c"
2427 #include "selftests/i915_gem_context.c"
2430 static void i915_global_gem_context_shrink(void)
2432 kmem_cache_shrink(global.slab_luts);
2435 static void i915_global_gem_context_exit(void)
2437 kmem_cache_destroy(global.slab_luts);
2440 static struct i915_global_gem_context global = { {
2441 .shrink = i915_global_gem_context_shrink,
2442 .exit = i915_global_gem_context_exit,
2445 int __init i915_global_gem_context_init(void)
2447 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2448 if (!global.slab_luts)
2451 i915_global_register(&global.base);