2 * Copyright © 2011-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
88 #include <linux/log2.h>
90 #include <drm/i915_drm.h>
92 #include "gt/intel_lrc_reg.h"
95 #include "i915_globals.h"
96 #include "i915_trace.h"
97 #include "i915_user_extensions.h"
99 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
101 static struct i915_global_gem_context {
102 struct i915_global base;
103 struct kmem_cache *slab_luts;
106 struct i915_lut_handle *i915_lut_handle_alloc(void)
108 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
111 void i915_lut_handle_free(struct i915_lut_handle *lut)
113 return kmem_cache_free(global.slab_luts, lut);
116 static void lut_close(struct i915_gem_context *ctx)
118 struct i915_lut_handle *lut, *ln;
119 struct radix_tree_iter iter;
122 list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
123 list_del(&lut->obj_link);
124 i915_lut_handle_free(lut);
126 INIT_LIST_HEAD(&ctx->handles_list);
129 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
130 struct i915_vma *vma = rcu_dereference_raw(*slot);
132 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
135 __i915_gem_object_release_unless_active(vma->obj);
140 static struct intel_context *
141 lookup_user_engine(struct i915_gem_context *ctx,
143 const struct i915_engine_class_instance *ci)
144 #define LOOKUP_USER_INDEX BIT(0)
148 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
149 return ERR_PTR(-EINVAL);
151 if (!i915_gem_context_user_engines(ctx)) {
152 struct intel_engine_cs *engine;
154 engine = intel_engine_lookup_user(ctx->i915,
156 ci->engine_instance);
158 return ERR_PTR(-EINVAL);
162 idx = ci->engine_instance;
165 return i915_gem_context_get_engine(ctx, idx);
168 static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
172 lockdep_assert_held(&i915->contexts.mutex);
174 if (INTEL_GEN(i915) >= 11)
175 max = GEN11_MAX_CONTEXT_HW_ID;
176 else if (USES_GUC_SUBMISSION(i915))
178 * When using GuC in proxy submission, GuC consumes the
179 * highest bit in the context id to indicate proxy submission.
181 max = MAX_GUC_CONTEXT_HW_ID;
183 max = MAX_CONTEXT_HW_ID;
185 return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
188 static int steal_hw_id(struct drm_i915_private *i915)
190 struct i915_gem_context *ctx, *cn;
194 lockdep_assert_held(&i915->contexts.mutex);
196 list_for_each_entry_safe(ctx, cn,
197 &i915->contexts.hw_id_list, hw_id_link) {
198 if (atomic_read(&ctx->hw_id_pin_count)) {
199 list_move_tail(&ctx->hw_id_link, &pinned);
203 GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
204 list_del_init(&ctx->hw_id_link);
210 * Remember how far we got up on the last repossesion scan, so the
211 * list is kept in a "least recently scanned" order.
213 list_splice_tail(&pinned, &i915->contexts.hw_id_list);
217 static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
221 lockdep_assert_held(&i915->contexts.mutex);
224 * We prefer to steal/stall ourselves and our users over that of the
225 * entire system. That may be a little unfair to our users, and
226 * even hurt high priority clients. The choice is whether to oomkill
227 * something else, or steal a context id.
229 ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
230 if (unlikely(ret < 0)) {
231 ret = steal_hw_id(i915);
232 if (ret < 0) /* once again for the correct errno code */
233 ret = new_hw_id(i915, GFP_KERNEL);
242 static void release_hw_id(struct i915_gem_context *ctx)
244 struct drm_i915_private *i915 = ctx->i915;
246 if (list_empty(&ctx->hw_id_link))
249 mutex_lock(&i915->contexts.mutex);
250 if (!list_empty(&ctx->hw_id_link)) {
251 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
252 list_del_init(&ctx->hw_id_link);
254 mutex_unlock(&i915->contexts.mutex);
257 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
260 if (!e->engines[count])
263 intel_context_put(e->engines[count]);
268 static void free_engines(struct i915_gem_engines *e)
270 __free_engines(e, e->num_engines);
273 static void free_engines_rcu(struct work_struct *wrk)
275 struct i915_gem_engines *e =
276 container_of(wrk, struct i915_gem_engines, rcu.work);
277 struct drm_i915_private *i915 = e->i915;
279 mutex_lock(&i915->drm.struct_mutex);
281 mutex_unlock(&i915->drm.struct_mutex);
284 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
286 struct intel_engine_cs *engine;
287 struct i915_gem_engines *e;
288 enum intel_engine_id id;
290 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
292 return ERR_PTR(-ENOMEM);
295 for_each_engine(engine, ctx->i915, id) {
296 struct intel_context *ce;
298 ce = intel_context_create(ctx, engine);
300 __free_engines(e, id);
311 static void i915_gem_context_free(struct i915_gem_context *ctx)
313 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
314 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
317 i915_ppgtt_put(ctx->ppgtt);
319 free_engines(rcu_access_pointer(ctx->engines));
320 mutex_destroy(&ctx->engines_mutex);
323 i915_timeline_put(ctx->timeline);
328 list_del(&ctx->link);
329 mutex_destroy(&ctx->mutex);
334 static void contexts_free(struct drm_i915_private *i915)
336 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
337 struct i915_gem_context *ctx, *cn;
339 lockdep_assert_held(&i915->drm.struct_mutex);
341 llist_for_each_entry_safe(ctx, cn, freed, free_link)
342 i915_gem_context_free(ctx);
345 static void contexts_free_first(struct drm_i915_private *i915)
347 struct i915_gem_context *ctx;
348 struct llist_node *freed;
350 lockdep_assert_held(&i915->drm.struct_mutex);
352 freed = llist_del_first(&i915->contexts.free_list);
356 ctx = container_of(freed, typeof(*ctx), free_link);
357 i915_gem_context_free(ctx);
360 static void contexts_free_worker(struct work_struct *work)
362 struct drm_i915_private *i915 =
363 container_of(work, typeof(*i915), contexts.free_work);
365 mutex_lock(&i915->drm.struct_mutex);
367 mutex_unlock(&i915->drm.struct_mutex);
370 void i915_gem_context_release(struct kref *ref)
372 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
373 struct drm_i915_private *i915 = ctx->i915;
375 trace_i915_context_free(ctx);
376 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
377 queue_work(i915->wq, &i915->contexts.free_work);
380 static void context_close(struct i915_gem_context *ctx)
382 i915_gem_context_set_closed(ctx);
385 * This context will never again be assinged to HW, so we can
386 * reuse its ID for the next context.
391 * The LUT uses the VMA as a backpointer to unref the object,
392 * so we need to clear the LUT before we close all the VMA (inside
397 ctx->file_priv = ERR_PTR(-EBADF);
398 i915_gem_context_put(ctx);
401 static u32 default_desc_template(const struct drm_i915_private *i915,
402 const struct i915_hw_ppgtt *ppgtt)
407 desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
409 address_mode = INTEL_LEGACY_32B_CONTEXT;
410 if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
411 address_mode = INTEL_LEGACY_64B_CONTEXT;
412 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
415 desc |= GEN8_CTX_L3LLC_COHERENT;
417 /* TODO: WaDisableLiteRestore when we start using semaphore
418 * signalling between Command Streamers
419 * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
425 static struct i915_gem_context *
426 __create_context(struct drm_i915_private *dev_priv)
428 struct i915_gem_context *ctx;
429 struct i915_gem_engines *e;
433 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
435 return ERR_PTR(-ENOMEM);
437 kref_init(&ctx->ref);
438 list_add_tail(&ctx->link, &dev_priv->contexts.list);
439 ctx->i915 = dev_priv;
440 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
441 mutex_init(&ctx->mutex);
443 mutex_init(&ctx->engines_mutex);
444 e = default_engines(ctx);
449 RCU_INIT_POINTER(ctx->engines, e);
451 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
452 INIT_LIST_HEAD(&ctx->handles_list);
453 INIT_LIST_HEAD(&ctx->hw_id_link);
455 /* NB: Mark all slices as needing a remap so that when the context first
456 * loads it will restore whatever remap state already exists. If there
457 * is no remap info, it will be a NOP. */
458 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
460 i915_gem_context_set_bannable(ctx);
461 i915_gem_context_set_recoverable(ctx);
463 ctx->ring_size = 4 * PAGE_SIZE;
465 default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
467 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
468 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
477 static struct i915_hw_ppgtt *
478 __set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
480 struct i915_hw_ppgtt *old = ctx->ppgtt;
482 ctx->ppgtt = i915_ppgtt_get(ppgtt);
483 ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
488 static void __assign_ppgtt(struct i915_gem_context *ctx,
489 struct i915_hw_ppgtt *ppgtt)
491 if (ppgtt == ctx->ppgtt)
494 ppgtt = __set_ppgtt(ctx, ppgtt);
496 i915_ppgtt_put(ppgtt);
499 static struct i915_gem_context *
500 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
502 struct i915_gem_context *ctx;
504 lockdep_assert_held(&dev_priv->drm.struct_mutex);
506 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
507 !HAS_EXECLISTS(dev_priv))
508 return ERR_PTR(-EINVAL);
510 /* Reap the most stale context */
511 contexts_free_first(dev_priv);
513 ctx = __create_context(dev_priv);
517 if (HAS_FULL_PPGTT(dev_priv)) {
518 struct i915_hw_ppgtt *ppgtt;
520 ppgtt = i915_ppgtt_create(dev_priv);
522 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
525 return ERR_CAST(ppgtt);
528 __assign_ppgtt(ctx, ppgtt);
529 i915_ppgtt_put(ppgtt);
532 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
533 struct i915_timeline *timeline;
535 timeline = i915_timeline_create(dev_priv, NULL);
536 if (IS_ERR(timeline)) {
538 return ERR_CAST(timeline);
541 ctx->timeline = timeline;
544 trace_i915_context_create(ctx);
550 * i915_gem_context_create_gvt - create a GVT GEM context
553 * This function is used to create a GVT specific GEM context.
556 * pointer to i915_gem_context on success, error pointer if failed
559 struct i915_gem_context *
560 i915_gem_context_create_gvt(struct drm_device *dev)
562 struct i915_gem_context *ctx;
565 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
566 return ERR_PTR(-ENODEV);
568 ret = i915_mutex_lock_interruptible(dev);
572 ctx = i915_gem_create_context(to_i915(dev), 0);
576 ret = i915_gem_context_pin_hw_id(ctx);
583 ctx->file_priv = ERR_PTR(-EBADF);
584 i915_gem_context_set_closed(ctx); /* not user accessible */
585 i915_gem_context_clear_bannable(ctx);
586 i915_gem_context_set_force_single_submission(ctx);
587 if (!USES_GUC_SUBMISSION(to_i915(dev)))
588 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
590 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
592 mutex_unlock(&dev->struct_mutex);
597 destroy_kernel_context(struct i915_gem_context **ctxp)
599 struct i915_gem_context *ctx;
601 /* Keep the context ref so that we can free it immediately ourselves */
602 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
603 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
606 i915_gem_context_free(ctx);
609 struct i915_gem_context *
610 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
612 struct i915_gem_context *ctx;
615 ctx = i915_gem_create_context(i915, 0);
619 err = i915_gem_context_pin_hw_id(ctx);
621 destroy_kernel_context(&ctx);
625 i915_gem_context_clear_bannable(ctx);
626 ctx->sched.priority = I915_USER_PRIORITY(prio);
627 ctx->ring_size = PAGE_SIZE;
629 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
634 static void init_contexts(struct drm_i915_private *i915)
636 mutex_init(&i915->contexts.mutex);
637 INIT_LIST_HEAD(&i915->contexts.list);
639 /* Using the simple ida interface, the max is limited by sizeof(int) */
640 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
641 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
642 ida_init(&i915->contexts.hw_ida);
643 INIT_LIST_HEAD(&i915->contexts.hw_id_list);
645 INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
646 init_llist_head(&i915->contexts.free_list);
649 static bool needs_preempt_context(struct drm_i915_private *i915)
651 return HAS_EXECLISTS(i915);
654 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
656 struct i915_gem_context *ctx;
658 /* Reassure ourselves we are only called once */
659 GEM_BUG_ON(dev_priv->kernel_context);
660 GEM_BUG_ON(dev_priv->preempt_context);
662 intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
663 init_contexts(dev_priv);
665 /* lowest priority; idle task */
666 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
668 DRM_ERROR("Failed to create default global context\n");
672 * For easy recognisablity, we want the kernel context to be 0 and then
673 * all user contexts will have non-zero hw_id. Kernel contexts are
674 * permanently pinned, so that we never suffer a stall and can
675 * use them from any allocation context (e.g. for evicting other
676 * contexts and from inside the shrinker).
678 GEM_BUG_ON(ctx->hw_id);
679 GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
680 dev_priv->kernel_context = ctx;
682 /* highest priority; preempting task */
683 if (needs_preempt_context(dev_priv)) {
684 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
686 dev_priv->preempt_context = ctx;
688 DRM_ERROR("Failed to create preempt context; disabling preemption\n");
691 DRM_DEBUG_DRIVER("%s context support initialized\n",
692 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
697 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
699 struct intel_engine_cs *engine;
700 enum intel_engine_id id;
702 lockdep_assert_held(&dev_priv->drm.struct_mutex);
704 for_each_engine(engine, dev_priv, id)
705 intel_engine_lost_context(engine);
708 void i915_gem_contexts_fini(struct drm_i915_private *i915)
710 lockdep_assert_held(&i915->drm.struct_mutex);
712 if (i915->preempt_context)
713 destroy_kernel_context(&i915->preempt_context);
714 destroy_kernel_context(&i915->kernel_context);
716 /* Must free all deferred contexts (via flush_workqueue) first */
717 GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
718 ida_destroy(&i915->contexts.hw_ida);
721 static int context_idr_cleanup(int id, void *p, void *data)
727 static int vm_idr_cleanup(int id, void *p, void *data)
733 static int gem_context_register(struct i915_gem_context *ctx,
734 struct drm_i915_file_private *fpriv)
738 ctx->file_priv = fpriv;
740 ctx->ppgtt->vm.file = fpriv;
742 ctx->pid = get_task_pid(current, PIDTYPE_PID);
743 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
744 current->comm, pid_nr(ctx->pid));
750 /* And finally expose ourselves to userspace via the idr */
751 mutex_lock(&fpriv->context_idr_lock);
752 ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
753 mutex_unlock(&fpriv->context_idr_lock);
757 kfree(fetch_and_zero(&ctx->name));
759 put_pid(fetch_and_zero(&ctx->pid));
764 int i915_gem_context_open(struct drm_i915_private *i915,
765 struct drm_file *file)
767 struct drm_i915_file_private *file_priv = file->driver_priv;
768 struct i915_gem_context *ctx;
771 mutex_init(&file_priv->context_idr_lock);
772 mutex_init(&file_priv->vm_idr_lock);
774 idr_init(&file_priv->context_idr);
775 idr_init_base(&file_priv->vm_idr, 1);
777 mutex_lock(&i915->drm.struct_mutex);
778 ctx = i915_gem_create_context(i915, 0);
779 mutex_unlock(&i915->drm.struct_mutex);
785 err = gem_context_register(ctx, file_priv);
789 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
795 mutex_lock(&i915->drm.struct_mutex);
797 mutex_unlock(&i915->drm.struct_mutex);
799 idr_destroy(&file_priv->vm_idr);
800 idr_destroy(&file_priv->context_idr);
801 mutex_destroy(&file_priv->vm_idr_lock);
802 mutex_destroy(&file_priv->context_idr_lock);
806 void i915_gem_context_close(struct drm_file *file)
808 struct drm_i915_file_private *file_priv = file->driver_priv;
810 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
812 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
813 idr_destroy(&file_priv->context_idr);
814 mutex_destroy(&file_priv->context_idr_lock);
816 idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
817 idr_destroy(&file_priv->vm_idr);
818 mutex_destroy(&file_priv->vm_idr_lock);
821 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
822 struct drm_file *file)
824 struct drm_i915_private *i915 = to_i915(dev);
825 struct drm_i915_gem_vm_control *args = data;
826 struct drm_i915_file_private *file_priv = file->driver_priv;
827 struct i915_hw_ppgtt *ppgtt;
830 if (!HAS_FULL_PPGTT(i915))
836 ppgtt = i915_ppgtt_create(i915);
838 return PTR_ERR(ppgtt);
840 ppgtt->vm.file = file_priv;
842 if (args->extensions) {
843 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
850 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
854 err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
858 GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
860 mutex_unlock(&file_priv->vm_idr_lock);
866 mutex_unlock(&file_priv->vm_idr_lock);
868 i915_ppgtt_put(ppgtt);
872 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
873 struct drm_file *file)
875 struct drm_i915_file_private *file_priv = file->driver_priv;
876 struct drm_i915_gem_vm_control *args = data;
877 struct i915_hw_ppgtt *ppgtt;
884 if (args->extensions)
891 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
895 ppgtt = idr_remove(&file_priv->vm_idr, id);
897 mutex_unlock(&file_priv->vm_idr_lock);
901 i915_ppgtt_put(ppgtt);
905 struct context_barrier_task {
906 struct i915_active base;
907 void (*task)(void *data);
911 static void cb_retire(struct i915_active *base)
913 struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
918 i915_active_fini(&cb->base);
922 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
923 static int context_barrier_task(struct i915_gem_context *ctx,
924 intel_engine_mask_t engines,
925 int (*emit)(struct i915_request *rq, void *data),
926 void (*task)(void *data),
929 struct drm_i915_private *i915 = ctx->i915;
930 struct context_barrier_task *cb;
931 struct i915_gem_engines_iter it;
932 struct intel_context *ce;
935 lockdep_assert_held(&i915->drm.struct_mutex);
938 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
942 i915_active_init(i915, &cb->base, cb_retire);
943 i915_active_acquire(&cb->base);
945 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
946 struct i915_request *rq;
948 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
954 if (!(ce->engine->mask & engines) || !ce->state)
957 rq = intel_context_create_request(ce);
965 err = emit(rq, data);
967 err = i915_active_ref(&cb->base, rq->fence.context, rq);
969 i915_request_add(rq);
973 i915_gem_context_unlock_engines(ctx);
975 cb->task = err ? NULL : task; /* caller needs to unwind instead */
978 i915_active_release(&cb->base);
983 static int get_ppgtt(struct drm_i915_file_private *file_priv,
984 struct i915_gem_context *ctx,
985 struct drm_i915_gem_context_param *args)
987 struct i915_hw_ppgtt *ppgtt;
993 /* XXX rcu acquire? */
994 ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
998 ppgtt = i915_ppgtt_get(ctx->ppgtt);
999 mutex_unlock(&ctx->i915->drm.struct_mutex);
1001 ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1005 ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
1010 i915_ppgtt_get(ppgtt);
1017 mutex_unlock(&file_priv->vm_idr_lock);
1019 i915_ppgtt_put(ppgtt);
1023 static void set_ppgtt_barrier(void *data)
1025 struct i915_hw_ppgtt *old = data;
1027 if (INTEL_GEN(old->vm.i915) < 8)
1028 gen6_ppgtt_unpin_all(old);
1030 i915_ppgtt_put(old);
1033 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1035 struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
1036 struct intel_engine_cs *engine = rq->engine;
1037 u32 base = engine->mmio_base;
1041 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1042 const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
1044 cs = intel_ring_begin(rq, 6);
1048 *cs++ = MI_LOAD_REGISTER_IMM(2);
1050 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1051 *cs++ = upper_32_bits(pd_daddr);
1052 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1053 *cs++ = lower_32_bits(pd_daddr);
1056 intel_ring_advance(rq, cs);
1057 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1058 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1062 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1063 for (i = GEN8_3LVL_PDPES; i--; ) {
1064 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1066 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1067 *cs++ = upper_32_bits(pd_daddr);
1068 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1069 *cs++ = lower_32_bits(pd_daddr);
1072 intel_ring_advance(rq, cs);
1074 /* ppGTT is not part of the legacy context image */
1075 gen6_ppgtt_pin(ppgtt);
1081 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1082 struct i915_gem_context *ctx,
1083 struct drm_i915_gem_context_param *args)
1085 struct i915_hw_ppgtt *ppgtt, *old;
1094 if (upper_32_bits(args->value))
1097 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1101 ppgtt = idr_find(&file_priv->vm_idr, args->value);
1103 i915_ppgtt_get(ppgtt);
1104 mutex_unlock(&file_priv->vm_idr_lock);
1108 err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1112 if (ppgtt == ctx->ppgtt)
1115 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1118 old = __set_ppgtt(ctx, ppgtt);
1121 * We need to flush any requests using the current ppgtt before
1122 * we release it as the requests do not hold a reference themselves,
1123 * only indirectly through the context.
1125 err = context_barrier_task(ctx, ALL_ENGINES,
1131 ctx->desc_template = default_desc_template(ctx->i915, old);
1132 i915_ppgtt_put(ppgtt);
1136 mutex_unlock(&ctx->i915->drm.struct_mutex);
1139 i915_ppgtt_put(ppgtt);
1143 static int gen8_emit_rpcs_config(struct i915_request *rq,
1144 struct intel_context *ce,
1145 struct intel_sseu sseu)
1150 cs = intel_ring_begin(rq, 4);
1154 offset = i915_ggtt_offset(ce->state) +
1155 LRC_STATE_PN * PAGE_SIZE +
1156 (CTX_R_PWR_CLK_STATE + 1) * 4;
1158 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1159 *cs++ = lower_32_bits(offset);
1160 *cs++ = upper_32_bits(offset);
1161 *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1163 intel_ring_advance(rq, cs);
1169 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1171 struct i915_request *rq;
1174 lockdep_assert_held(&ce->pin_mutex);
1177 * If the context is not idle, we have to submit an ordered request to
1178 * modify its context image via the kernel context (writing to our own
1179 * image, or into the registers directory, does not stick). Pristine
1180 * and idle contexts will be configured on pinning.
1182 if (!intel_context_is_pinned(ce))
1185 rq = i915_request_create(ce->engine->kernel_context);
1189 /* Queue this switch after all other activity by this context. */
1190 ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
1194 ret = gen8_emit_rpcs_config(rq, ce, sseu);
1199 * Guarantee context image and the timeline remains pinned until the
1200 * modifying request is retired by setting the ce activity tracker.
1202 * But we only need to take one pin on the account of it. Or in other
1203 * words transfer the pinned ce object to tracked active request.
1205 if (!i915_active_request_isset(&ce->active_tracker))
1206 __intel_context_pin(ce);
1207 __i915_active_request_set(&ce->active_tracker, rq);
1210 i915_request_add(rq);
1215 __intel_context_reconfigure_sseu(struct intel_context *ce,
1216 struct intel_sseu sseu)
1220 GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
1221 GEM_BUG_ON(ce->engine->id != RCS0);
1223 ret = intel_context_lock_pinned(ce);
1227 /* Nothing to do if unmodified. */
1228 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1231 ret = gen8_modify_rpcs(ce, sseu);
1236 intel_context_unlock_pinned(ce);
1241 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1243 struct drm_i915_private *i915 = ce->gem_context->i915;
1246 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1250 ret = __intel_context_reconfigure_sseu(ce, sseu);
1252 mutex_unlock(&i915->drm.struct_mutex);
1258 user_to_context_sseu(struct drm_i915_private *i915,
1259 const struct drm_i915_gem_context_param_sseu *user,
1260 struct intel_sseu *context)
1262 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1264 /* No zeros in any field. */
1265 if (!user->slice_mask || !user->subslice_mask ||
1266 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1270 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1274 * Some future proofing on the types since the uAPI is wider than the
1275 * current internal implementation.
1277 if (overflows_type(user->slice_mask, context->slice_mask) ||
1278 overflows_type(user->subslice_mask, context->subslice_mask) ||
1279 overflows_type(user->min_eus_per_subslice,
1280 context->min_eus_per_subslice) ||
1281 overflows_type(user->max_eus_per_subslice,
1282 context->max_eus_per_subslice))
1285 /* Check validity against hardware. */
1286 if (user->slice_mask & ~device->slice_mask)
1289 if (user->subslice_mask & ~device->subslice_mask[0])
1292 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1295 context->slice_mask = user->slice_mask;
1296 context->subslice_mask = user->subslice_mask;
1297 context->min_eus_per_subslice = user->min_eus_per_subslice;
1298 context->max_eus_per_subslice = user->max_eus_per_subslice;
1300 /* Part specific restrictions. */
1301 if (IS_GEN(i915, 11)) {
1302 unsigned int hw_s = hweight8(device->slice_mask);
1303 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1304 unsigned int req_s = hweight8(context->slice_mask);
1305 unsigned int req_ss = hweight8(context->subslice_mask);
1308 * Only full subslice enablement is possible if more than one
1309 * slice is turned on.
1311 if (req_s > 1 && req_ss != hw_ss_per_s)
1315 * If more than four (SScount bitfield limit) subslices are
1316 * requested then the number has to be even.
1318 if (req_ss > 4 && (req_ss & 1))
1322 * If only one slice is enabled and subslice count is below the
1323 * device full enablement, it must be at most half of the all
1324 * available subslices.
1326 if (req_s == 1 && req_ss < hw_ss_per_s &&
1327 req_ss > (hw_ss_per_s / 2))
1330 /* ABI restriction - VME use case only. */
1332 /* All slices or one slice only. */
1333 if (req_s != 1 && req_s != hw_s)
1337 * Half subslices or full enablement only when one slice is
1341 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1344 /* No EU configuration changes. */
1345 if ((user->min_eus_per_subslice !=
1346 device->max_eus_per_subslice) ||
1347 (user->max_eus_per_subslice !=
1348 device->max_eus_per_subslice))
1355 static int set_sseu(struct i915_gem_context *ctx,
1356 struct drm_i915_gem_context_param *args)
1358 struct drm_i915_private *i915 = ctx->i915;
1359 struct drm_i915_gem_context_param_sseu user_sseu;
1360 struct intel_context *ce;
1361 struct intel_sseu sseu;
1362 unsigned long lookup;
1365 if (args->size < sizeof(user_sseu))
1368 if (!IS_GEN(i915, 11))
1371 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1378 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1382 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1383 lookup |= LOOKUP_USER_INDEX;
1385 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1389 /* Only render engine supports RPCS configuration. */
1390 if (ce->engine->class != RENDER_CLASS) {
1395 ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1399 ret = intel_context_reconfigure_sseu(ce, sseu);
1403 args->size = sizeof(user_sseu);
1406 intel_context_put(ce);
1410 struct set_engines {
1411 struct i915_gem_context *ctx;
1412 struct i915_gem_engines *engines;
1415 static const i915_user_extension_fn set_engines__extensions[] = {
1419 set_engines(struct i915_gem_context *ctx,
1420 const struct drm_i915_gem_context_param *args)
1422 struct i915_context_param_engines __user *user =
1423 u64_to_user_ptr(args->value);
1424 struct set_engines set = { .ctx = ctx };
1425 unsigned int num_engines, n;
1429 if (!args->size) { /* switch back to legacy user_ring_map */
1430 if (!i915_gem_context_user_engines(ctx))
1433 set.engines = default_engines(ctx);
1434 if (IS_ERR(set.engines))
1435 return PTR_ERR(set.engines);
1440 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1441 if (args->size < sizeof(*user) ||
1442 !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1443 DRM_DEBUG("Invalid size for engine array: %d\n",
1449 * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1450 * first 64 engines defined here.
1452 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1454 set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1459 set.engines->i915 = ctx->i915;
1460 for (n = 0; n < num_engines; n++) {
1461 struct i915_engine_class_instance ci;
1462 struct intel_engine_cs *engine;
1464 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1465 __free_engines(set.engines, n);
1469 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1470 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1471 set.engines->engines[n] = NULL;
1475 engine = intel_engine_lookup_user(ctx->i915,
1477 ci.engine_instance);
1479 DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1480 n, ci.engine_class, ci.engine_instance);
1481 __free_engines(set.engines, n);
1485 set.engines->engines[n] = intel_context_create(ctx, engine);
1486 if (!set.engines->engines[n]) {
1487 __free_engines(set.engines, n);
1491 set.engines->num_engines = num_engines;
1494 if (!get_user(extensions, &user->extensions))
1495 err = i915_user_extensions(u64_to_user_ptr(extensions),
1496 set_engines__extensions,
1497 ARRAY_SIZE(set_engines__extensions),
1500 free_engines(set.engines);
1505 mutex_lock(&ctx->engines_mutex);
1507 i915_gem_context_set_user_engines(ctx);
1509 i915_gem_context_clear_user_engines(ctx);
1510 rcu_swap_protected(ctx->engines, set.engines, 1);
1511 mutex_unlock(&ctx->engines_mutex);
1513 INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu);
1514 queue_rcu_work(system_wq, &set.engines->rcu);
1519 static struct i915_gem_engines *
1520 __copy_engines(struct i915_gem_engines *e)
1522 struct i915_gem_engines *copy;
1525 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1527 return ERR_PTR(-ENOMEM);
1529 copy->i915 = e->i915;
1530 for (n = 0; n < e->num_engines; n++) {
1532 copy->engines[n] = intel_context_get(e->engines[n]);
1534 copy->engines[n] = NULL;
1536 copy->num_engines = n;
1542 get_engines(struct i915_gem_context *ctx,
1543 struct drm_i915_gem_context_param *args)
1545 struct i915_context_param_engines __user *user;
1546 struct i915_gem_engines *e;
1547 size_t n, count, size;
1550 err = mutex_lock_interruptible(&ctx->engines_mutex);
1555 if (i915_gem_context_user_engines(ctx))
1556 e = __copy_engines(i915_gem_context_engines(ctx));
1557 mutex_unlock(&ctx->engines_mutex);
1558 if (IS_ERR_OR_NULL(e)) {
1560 return PTR_ERR_OR_ZERO(e);
1563 count = e->num_engines;
1565 /* Be paranoid in case we have an impedance mismatch */
1566 if (!check_struct_size(user, engines, count, &size)) {
1570 if (overflows_type(size, args->size)) {
1580 if (args->size < size) {
1585 user = u64_to_user_ptr(args->value);
1586 if (!access_ok(user, size)) {
1591 if (put_user(0, &user->extensions)) {
1596 for (n = 0; n < count; n++) {
1597 struct i915_engine_class_instance ci = {
1598 .engine_class = I915_ENGINE_CLASS_INVALID,
1599 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1602 if (e->engines[n]) {
1603 ci.engine_class = e->engines[n]->engine->uabi_class;
1604 ci.engine_instance = e->engines[n]->engine->instance;
1607 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1616 INIT_RCU_WORK(&e->rcu, free_engines_rcu);
1617 queue_rcu_work(system_wq, &e->rcu);
1621 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1622 struct i915_gem_context *ctx,
1623 struct drm_i915_gem_context_param *args)
1627 switch (args->param) {
1628 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1631 else if (args->value)
1632 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1634 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1637 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1640 else if (args->value)
1641 i915_gem_context_set_no_error_capture(ctx);
1643 i915_gem_context_clear_no_error_capture(ctx);
1646 case I915_CONTEXT_PARAM_BANNABLE:
1649 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1651 else if (args->value)
1652 i915_gem_context_set_bannable(ctx);
1654 i915_gem_context_clear_bannable(ctx);
1657 case I915_CONTEXT_PARAM_RECOVERABLE:
1660 else if (args->value)
1661 i915_gem_context_set_recoverable(ctx);
1663 i915_gem_context_clear_recoverable(ctx);
1666 case I915_CONTEXT_PARAM_PRIORITY:
1668 s64 priority = args->value;
1672 else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1674 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1675 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1677 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1678 !capable(CAP_SYS_NICE))
1681 ctx->sched.priority =
1682 I915_USER_PRIORITY(priority);
1686 case I915_CONTEXT_PARAM_SSEU:
1687 ret = set_sseu(ctx, args);
1690 case I915_CONTEXT_PARAM_VM:
1691 ret = set_ppgtt(fpriv, ctx, args);
1694 case I915_CONTEXT_PARAM_ENGINES:
1695 ret = set_engines(ctx, args);
1698 case I915_CONTEXT_PARAM_BAN_PERIOD:
1708 struct i915_gem_context *ctx;
1709 struct drm_i915_file_private *fpriv;
1712 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1714 struct drm_i915_gem_context_create_ext_setparam local;
1715 const struct create_ext *arg = data;
1717 if (copy_from_user(&local, ext, sizeof(local)))
1720 if (local.param.ctx_id)
1723 return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1726 static int clone_engines(struct i915_gem_context *dst,
1727 struct i915_gem_context *src)
1729 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1730 struct i915_gem_engines *clone;
1734 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1738 clone->i915 = dst->i915;
1739 for (n = 0; n < e->num_engines; n++) {
1740 if (!e->engines[n]) {
1741 clone->engines[n] = NULL;
1746 intel_context_create(dst, e->engines[n]->engine);
1747 if (!clone->engines[n]) {
1748 __free_engines(clone, n);
1752 clone->num_engines = n;
1754 user_engines = i915_gem_context_user_engines(src);
1755 i915_gem_context_unlock_engines(src);
1757 free_engines(dst->engines);
1758 RCU_INIT_POINTER(dst->engines, clone);
1760 i915_gem_context_set_user_engines(dst);
1762 i915_gem_context_clear_user_engines(dst);
1766 i915_gem_context_unlock_engines(src);
1770 static int clone_flags(struct i915_gem_context *dst,
1771 struct i915_gem_context *src)
1773 dst->user_flags = src->user_flags;
1777 static int clone_schedattr(struct i915_gem_context *dst,
1778 struct i915_gem_context *src)
1780 dst->sched = src->sched;
1784 static int clone_sseu(struct i915_gem_context *dst,
1785 struct i915_gem_context *src)
1787 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1788 struct i915_gem_engines *clone;
1792 clone = dst->engines; /* no locking required; sole access */
1793 if (e->num_engines != clone->num_engines) {
1798 for (n = 0; n < e->num_engines; n++) {
1799 struct intel_context *ce = e->engines[n];
1801 if (clone->engines[n]->engine->class != ce->engine->class) {
1802 /* Must have compatible engine maps! */
1807 /* serialises with set_sseu */
1808 err = intel_context_lock_pinned(ce);
1812 clone->engines[n]->sseu = ce->sseu;
1813 intel_context_unlock_pinned(ce);
1818 i915_gem_context_unlock_engines(src);
1822 static int clone_timeline(struct i915_gem_context *dst,
1823 struct i915_gem_context *src)
1825 if (src->timeline) {
1826 GEM_BUG_ON(src->timeline == dst->timeline);
1829 i915_timeline_put(dst->timeline);
1830 dst->timeline = i915_timeline_get(src->timeline);
1836 static int clone_vm(struct i915_gem_context *dst,
1837 struct i915_gem_context *src)
1839 struct i915_hw_ppgtt *ppgtt;
1843 ppgtt = READ_ONCE(src->ppgtt);
1847 if (!kref_get_unless_zero(&ppgtt->ref))
1851 * This ppgtt may have be reallocated between
1852 * the read and the kref, and reassigned to a third
1853 * context. In order to avoid inadvertent sharing
1854 * of this ppgtt with that third context (and not
1855 * src), we have to confirm that we have the same
1856 * ppgtt after passing through the strong memory
1857 * barrier implied by a successful
1858 * kref_get_unless_zero().
1860 * Once we have acquired the current ppgtt of src,
1861 * we no longer care if it is released from src, as
1862 * it cannot be reallocated elsewhere.
1865 if (ppgtt == READ_ONCE(src->ppgtt))
1868 i915_ppgtt_put(ppgtt);
1873 __assign_ppgtt(dst, ppgtt);
1874 i915_ppgtt_put(ppgtt);
1880 static int create_clone(struct i915_user_extension __user *ext, void *data)
1882 static int (* const fn[])(struct i915_gem_context *dst,
1883 struct i915_gem_context *src) = {
1884 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
1885 MAP(ENGINES, clone_engines),
1886 MAP(FLAGS, clone_flags),
1887 MAP(SCHEDATTR, clone_schedattr),
1888 MAP(SSEU, clone_sseu),
1889 MAP(TIMELINE, clone_timeline),
1893 struct drm_i915_gem_context_create_ext_clone local;
1894 const struct create_ext *arg = data;
1895 struct i915_gem_context *dst = arg->ctx;
1896 struct i915_gem_context *src;
1899 if (copy_from_user(&local, ext, sizeof(local)))
1902 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
1903 I915_CONTEXT_CLONE_UNKNOWN);
1905 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
1912 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
1917 GEM_BUG_ON(src == dst);
1919 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
1920 if (!(local.flags & BIT(bit)))
1923 err = fn[bit](dst, src);
1931 static const i915_user_extension_fn create_extensions[] = {
1932 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
1933 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
1936 static bool client_is_banned(struct drm_i915_file_private *file_priv)
1938 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
1941 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1942 struct drm_file *file)
1944 struct drm_i915_private *i915 = to_i915(dev);
1945 struct drm_i915_gem_context_create_ext *args = data;
1946 struct create_ext ext_data;
1949 if (!DRIVER_CAPS(i915)->has_logical_contexts)
1952 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
1955 ret = i915_terminally_wedged(i915);
1959 ext_data.fpriv = file->driver_priv;
1960 if (client_is_banned(ext_data.fpriv)) {
1961 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
1963 pid_nr(get_task_pid(current, PIDTYPE_PID)));
1967 ret = i915_mutex_lock_interruptible(dev);
1971 ext_data.ctx = i915_gem_create_context(i915, args->flags);
1972 mutex_unlock(&dev->struct_mutex);
1973 if (IS_ERR(ext_data.ctx))
1974 return PTR_ERR(ext_data.ctx);
1976 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
1977 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
1979 ARRAY_SIZE(create_extensions),
1985 ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
1990 DRM_DEBUG("HW context %d created\n", args->ctx_id);
1995 mutex_lock(&dev->struct_mutex);
1996 context_close(ext_data.ctx);
1997 mutex_unlock(&dev->struct_mutex);
2001 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2002 struct drm_file *file)
2004 struct drm_i915_gem_context_destroy *args = data;
2005 struct drm_i915_file_private *file_priv = file->driver_priv;
2006 struct i915_gem_context *ctx;
2014 if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2017 ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2018 mutex_unlock(&file_priv->context_idr_lock);
2022 mutex_lock(&dev->struct_mutex);
2024 mutex_unlock(&dev->struct_mutex);
2029 static int get_sseu(struct i915_gem_context *ctx,
2030 struct drm_i915_gem_context_param *args)
2032 struct drm_i915_gem_context_param_sseu user_sseu;
2033 struct intel_context *ce;
2034 unsigned long lookup;
2037 if (args->size == 0)
2039 else if (args->size < sizeof(user_sseu))
2042 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2049 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2053 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2054 lookup |= LOOKUP_USER_INDEX;
2056 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2060 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2062 intel_context_put(ce);
2066 user_sseu.slice_mask = ce->sseu.slice_mask;
2067 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2068 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2069 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2071 intel_context_unlock_pinned(ce);
2072 intel_context_put(ce);
2074 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2079 args->size = sizeof(user_sseu);
2084 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2085 struct drm_file *file)
2087 struct drm_i915_file_private *file_priv = file->driver_priv;
2088 struct drm_i915_gem_context_param *args = data;
2089 struct i915_gem_context *ctx;
2092 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2096 switch (args->param) {
2097 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2099 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2102 case I915_CONTEXT_PARAM_GTT_SIZE:
2105 args->value = ctx->ppgtt->vm.total;
2106 else if (to_i915(dev)->mm.aliasing_ppgtt)
2107 args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
2109 args->value = to_i915(dev)->ggtt.vm.total;
2112 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2114 args->value = i915_gem_context_no_error_capture(ctx);
2117 case I915_CONTEXT_PARAM_BANNABLE:
2119 args->value = i915_gem_context_is_bannable(ctx);
2122 case I915_CONTEXT_PARAM_RECOVERABLE:
2124 args->value = i915_gem_context_is_recoverable(ctx);
2127 case I915_CONTEXT_PARAM_PRIORITY:
2129 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2132 case I915_CONTEXT_PARAM_SSEU:
2133 ret = get_sseu(ctx, args);
2136 case I915_CONTEXT_PARAM_VM:
2137 ret = get_ppgtt(file_priv, ctx, args);
2140 case I915_CONTEXT_PARAM_ENGINES:
2141 ret = get_engines(ctx, args);
2144 case I915_CONTEXT_PARAM_BAN_PERIOD:
2150 i915_gem_context_put(ctx);
2154 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2155 struct drm_file *file)
2157 struct drm_i915_file_private *file_priv = file->driver_priv;
2158 struct drm_i915_gem_context_param *args = data;
2159 struct i915_gem_context *ctx;
2162 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2166 ret = ctx_setparam(file_priv, ctx, args);
2168 i915_gem_context_put(ctx);
2172 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2173 void *data, struct drm_file *file)
2175 struct drm_i915_private *dev_priv = to_i915(dev);
2176 struct drm_i915_reset_stats *args = data;
2177 struct i915_gem_context *ctx;
2180 if (args->flags || args->pad)
2185 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2190 * We opt for unserialised reads here. This may result in tearing
2191 * in the extremely unlikely event of a GPU hang on this context
2192 * as we are querying them. If we need that extra layer of protection,
2193 * we should wrap the hangstats with a seqlock.
2196 if (capable(CAP_SYS_ADMIN))
2197 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2199 args->reset_count = 0;
2201 args->batch_active = atomic_read(&ctx->guilty_count);
2202 args->batch_pending = atomic_read(&ctx->active_count);
2210 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2212 struct drm_i915_private *i915 = ctx->i915;
2215 mutex_lock(&i915->contexts.mutex);
2217 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2219 if (list_empty(&ctx->hw_id_link)) {
2220 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2222 err = assign_hw_id(i915, &ctx->hw_id);
2226 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2229 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2230 atomic_inc(&ctx->hw_id_pin_count);
2233 mutex_unlock(&i915->contexts.mutex);
2237 /* GEM context-engines iterator: for_each_gem_engine() */
2238 struct intel_context *
2239 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2241 const struct i915_gem_engines *e = it->engines;
2242 struct intel_context *ctx;
2245 if (it->idx >= e->num_engines)
2248 ctx = e->engines[it->idx++];
2254 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2255 #include "selftests/mock_context.c"
2256 #include "selftests/i915_gem_context.c"
2259 static void i915_global_gem_context_shrink(void)
2261 kmem_cache_shrink(global.slab_luts);
2264 static void i915_global_gem_context_exit(void)
2266 kmem_cache_destroy(global.slab_luts);
2269 static struct i915_global_gem_context global = { {
2270 .shrink = i915_global_gem_context_shrink,
2271 .exit = i915_global_gem_context_exit,
2274 int __init i915_global_gem_context_init(void)
2276 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2277 if (!global.slab_luts)
2280 i915_global_register(&global.base);