]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gem/i915_gem_context.c
drm/i915: Fix up the inverse mapping for default ctx->engines[]
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69
70 #include <drm/i915_drm.h>
71
72 #include "gt/intel_lrc_reg.h"
73 #include "gt/intel_engine_user.h"
74
75 #include "i915_gem_context.h"
76 #include "i915_globals.h"
77 #include "i915_trace.h"
78 #include "i915_user_extensions.h"
79
80 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
81
82 static struct i915_global_gem_context {
83         struct i915_global base;
84         struct kmem_cache *slab_luts;
85 } global;
86
87 struct i915_lut_handle *i915_lut_handle_alloc(void)
88 {
89         return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
90 }
91
92 void i915_lut_handle_free(struct i915_lut_handle *lut)
93 {
94         return kmem_cache_free(global.slab_luts, lut);
95 }
96
97 static void lut_close(struct i915_gem_context *ctx)
98 {
99         struct radix_tree_iter iter;
100         void __rcu **slot;
101
102         lockdep_assert_held(&ctx->mutex);
103
104         rcu_read_lock();
105         radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
106                 struct i915_vma *vma = rcu_dereference_raw(*slot);
107                 struct drm_i915_gem_object *obj = vma->obj;
108                 struct i915_lut_handle *lut;
109
110                 if (!kref_get_unless_zero(&obj->base.refcount))
111                         continue;
112
113                 rcu_read_unlock();
114                 i915_gem_object_lock(obj);
115                 list_for_each_entry(lut, &obj->lut_list, obj_link) {
116                         if (lut->ctx != ctx)
117                                 continue;
118
119                         if (lut->handle != iter.index)
120                                 continue;
121
122                         list_del(&lut->obj_link);
123                         break;
124                 }
125                 i915_gem_object_unlock(obj);
126                 rcu_read_lock();
127
128                 if (&lut->obj_link != &obj->lut_list) {
129                         i915_lut_handle_free(lut);
130                         radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
131                         if (atomic_dec_and_test(&vma->open_count) &&
132                             !i915_vma_is_ggtt(vma))
133                                 i915_vma_close(vma);
134                         i915_gem_object_put(obj);
135                 }
136
137                 i915_gem_object_put(obj);
138         }
139         rcu_read_unlock();
140 }
141
142 static struct intel_context *
143 lookup_user_engine(struct i915_gem_context *ctx,
144                    unsigned long flags,
145                    const struct i915_engine_class_instance *ci)
146 #define LOOKUP_USER_INDEX BIT(0)
147 {
148         int idx;
149
150         if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
151                 return ERR_PTR(-EINVAL);
152
153         if (!i915_gem_context_user_engines(ctx)) {
154                 struct intel_engine_cs *engine;
155
156                 engine = intel_engine_lookup_user(ctx->i915,
157                                                   ci->engine_class,
158                                                   ci->engine_instance);
159                 if (!engine)
160                         return ERR_PTR(-EINVAL);
161
162                 idx = engine->legacy_idx;
163         } else {
164                 idx = ci->engine_instance;
165         }
166
167         return i915_gem_context_get_engine(ctx, idx);
168 }
169
170 static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
171 {
172         unsigned int max;
173
174         lockdep_assert_held(&i915->contexts.mutex);
175
176         if (INTEL_GEN(i915) >= 11)
177                 max = GEN11_MAX_CONTEXT_HW_ID;
178         else if (USES_GUC_SUBMISSION(i915))
179                 /*
180                  * When using GuC in proxy submission, GuC consumes the
181                  * highest bit in the context id to indicate proxy submission.
182                  */
183                 max = MAX_GUC_CONTEXT_HW_ID;
184         else
185                 max = MAX_CONTEXT_HW_ID;
186
187         return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
188 }
189
190 static int steal_hw_id(struct drm_i915_private *i915)
191 {
192         struct i915_gem_context *ctx, *cn;
193         LIST_HEAD(pinned);
194         int id = -ENOSPC;
195
196         lockdep_assert_held(&i915->contexts.mutex);
197
198         list_for_each_entry_safe(ctx, cn,
199                                  &i915->contexts.hw_id_list, hw_id_link) {
200                 if (atomic_read(&ctx->hw_id_pin_count)) {
201                         list_move_tail(&ctx->hw_id_link, &pinned);
202                         continue;
203                 }
204
205                 GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
206                 list_del_init(&ctx->hw_id_link);
207                 id = ctx->hw_id;
208                 break;
209         }
210
211         /*
212          * Remember how far we got up on the last repossesion scan, so the
213          * list is kept in a "least recently scanned" order.
214          */
215         list_splice_tail(&pinned, &i915->contexts.hw_id_list);
216         return id;
217 }
218
219 static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
220 {
221         int ret;
222
223         lockdep_assert_held(&i915->contexts.mutex);
224
225         /*
226          * We prefer to steal/stall ourselves and our users over that of the
227          * entire system. That may be a little unfair to our users, and
228          * even hurt high priority clients. The choice is whether to oomkill
229          * something else, or steal a context id.
230          */
231         ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
232         if (unlikely(ret < 0)) {
233                 ret = steal_hw_id(i915);
234                 if (ret < 0) /* once again for the correct errno code */
235                         ret = new_hw_id(i915, GFP_KERNEL);
236                 if (ret < 0)
237                         return ret;
238         }
239
240         *out = ret;
241         return 0;
242 }
243
244 static void release_hw_id(struct i915_gem_context *ctx)
245 {
246         struct drm_i915_private *i915 = ctx->i915;
247
248         if (list_empty(&ctx->hw_id_link))
249                 return;
250
251         mutex_lock(&i915->contexts.mutex);
252         if (!list_empty(&ctx->hw_id_link)) {
253                 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
254                 list_del_init(&ctx->hw_id_link);
255         }
256         mutex_unlock(&i915->contexts.mutex);
257 }
258
259 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
260 {
261         while (count--) {
262                 if (!e->engines[count])
263                         continue;
264
265                 intel_context_put(e->engines[count]);
266         }
267         kfree(e);
268 }
269
270 static void free_engines(struct i915_gem_engines *e)
271 {
272         __free_engines(e, e->num_engines);
273 }
274
275 static void free_engines_rcu(struct rcu_head *rcu)
276 {
277         free_engines(container_of(rcu, struct i915_gem_engines, rcu));
278 }
279
280 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
281 {
282         const struct intel_gt *gt = &ctx->i915->gt;
283         struct intel_engine_cs *engine;
284         struct i915_gem_engines *e;
285         enum intel_engine_id id;
286
287         e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
288         if (!e)
289                 return ERR_PTR(-ENOMEM);
290
291         init_rcu_head(&e->rcu);
292         for_each_engine(engine, gt, id) {
293                 struct intel_context *ce;
294
295                 ce = intel_context_create(ctx, engine);
296                 if (IS_ERR(ce)) {
297                         __free_engines(e, id);
298                         return ERR_CAST(ce);
299                 }
300
301                 e->engines[id] = ce;
302                 e->num_engines = id + 1;
303         }
304
305         return e;
306 }
307
308 static void i915_gem_context_free(struct i915_gem_context *ctx)
309 {
310         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
311         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
312
313         release_hw_id(ctx);
314         if (ctx->vm)
315                 i915_vm_put(ctx->vm);
316
317         free_engines(rcu_access_pointer(ctx->engines));
318         mutex_destroy(&ctx->engines_mutex);
319
320         if (ctx->timeline)
321                 intel_timeline_put(ctx->timeline);
322
323         kfree(ctx->name);
324         put_pid(ctx->pid);
325
326         list_del(&ctx->link);
327         mutex_destroy(&ctx->mutex);
328
329         kfree_rcu(ctx, rcu);
330 }
331
332 static void contexts_free(struct drm_i915_private *i915)
333 {
334         struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
335         struct i915_gem_context *ctx, *cn;
336
337         lockdep_assert_held(&i915->drm.struct_mutex);
338
339         llist_for_each_entry_safe(ctx, cn, freed, free_link)
340                 i915_gem_context_free(ctx);
341 }
342
343 static void contexts_free_first(struct drm_i915_private *i915)
344 {
345         struct i915_gem_context *ctx;
346         struct llist_node *freed;
347
348         lockdep_assert_held(&i915->drm.struct_mutex);
349
350         freed = llist_del_first(&i915->contexts.free_list);
351         if (!freed)
352                 return;
353
354         ctx = container_of(freed, typeof(*ctx), free_link);
355         i915_gem_context_free(ctx);
356 }
357
358 static void contexts_free_worker(struct work_struct *work)
359 {
360         struct drm_i915_private *i915 =
361                 container_of(work, typeof(*i915), contexts.free_work);
362
363         mutex_lock(&i915->drm.struct_mutex);
364         contexts_free(i915);
365         mutex_unlock(&i915->drm.struct_mutex);
366 }
367
368 void i915_gem_context_release(struct kref *ref)
369 {
370         struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
371         struct drm_i915_private *i915 = ctx->i915;
372
373         trace_i915_context_free(ctx);
374         if (llist_add(&ctx->free_link, &i915->contexts.free_list))
375                 queue_work(i915->wq, &i915->contexts.free_work);
376 }
377
378 static void context_close(struct i915_gem_context *ctx)
379 {
380         mutex_lock(&ctx->mutex);
381
382         i915_gem_context_set_closed(ctx);
383         ctx->file_priv = ERR_PTR(-EBADF);
384
385         /*
386          * This context will never again be assinged to HW, so we can
387          * reuse its ID for the next context.
388          */
389         release_hw_id(ctx);
390
391         /*
392          * The LUT uses the VMA as a backpointer to unref the object,
393          * so we need to clear the LUT before we close all the VMA (inside
394          * the ppgtt).
395          */
396         lut_close(ctx);
397
398         mutex_unlock(&ctx->mutex);
399         i915_gem_context_put(ctx);
400 }
401
402 static struct i915_gem_context *
403 __create_context(struct drm_i915_private *i915)
404 {
405         struct i915_gem_context *ctx;
406         struct i915_gem_engines *e;
407         int err;
408         int i;
409
410         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
411         if (!ctx)
412                 return ERR_PTR(-ENOMEM);
413
414         kref_init(&ctx->ref);
415         list_add_tail(&ctx->link, &i915->contexts.list);
416         ctx->i915 = i915;
417         ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
418         mutex_init(&ctx->mutex);
419
420         mutex_init(&ctx->engines_mutex);
421         e = default_engines(ctx);
422         if (IS_ERR(e)) {
423                 err = PTR_ERR(e);
424                 goto err_free;
425         }
426         RCU_INIT_POINTER(ctx->engines, e);
427
428         INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
429         INIT_LIST_HEAD(&ctx->hw_id_link);
430
431         /* NB: Mark all slices as needing a remap so that when the context first
432          * loads it will restore whatever remap state already exists. If there
433          * is no remap info, it will be a NOP. */
434         ctx->remap_slice = ALL_L3_SLICES(i915);
435
436         i915_gem_context_set_bannable(ctx);
437         i915_gem_context_set_recoverable(ctx);
438
439         ctx->ring_size = 4 * PAGE_SIZE;
440
441         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
442                 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
443
444         return ctx;
445
446 err_free:
447         kfree(ctx);
448         return ERR_PTR(err);
449 }
450
451 static struct i915_address_space *
452 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
453 {
454         struct i915_address_space *old = ctx->vm;
455         struct i915_gem_engines_iter it;
456         struct intel_context *ce;
457
458         GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
459
460         ctx->vm = i915_vm_get(vm);
461
462         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
463                 i915_vm_put(ce->vm);
464                 ce->vm = i915_vm_get(vm);
465         }
466         i915_gem_context_unlock_engines(ctx);
467
468         return old;
469 }
470
471 static void __assign_ppgtt(struct i915_gem_context *ctx,
472                            struct i915_address_space *vm)
473 {
474         if (vm == ctx->vm)
475                 return;
476
477         vm = __set_ppgtt(ctx, vm);
478         if (vm)
479                 i915_vm_put(vm);
480 }
481
482 static struct i915_gem_context *
483 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
484 {
485         struct i915_gem_context *ctx;
486
487         lockdep_assert_held(&dev_priv->drm.struct_mutex);
488
489         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
490             !HAS_EXECLISTS(dev_priv))
491                 return ERR_PTR(-EINVAL);
492
493         /* Reap the most stale context */
494         contexts_free_first(dev_priv);
495
496         ctx = __create_context(dev_priv);
497         if (IS_ERR(ctx))
498                 return ctx;
499
500         if (HAS_FULL_PPGTT(dev_priv)) {
501                 struct i915_ppgtt *ppgtt;
502
503                 ppgtt = i915_ppgtt_create(dev_priv);
504                 if (IS_ERR(ppgtt)) {
505                         DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
506                                          PTR_ERR(ppgtt));
507                         context_close(ctx);
508                         return ERR_CAST(ppgtt);
509                 }
510
511                 __assign_ppgtt(ctx, &ppgtt->vm);
512                 i915_vm_put(&ppgtt->vm);
513         }
514
515         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
516                 struct intel_timeline *timeline;
517
518                 timeline = intel_timeline_create(&dev_priv->gt, NULL);
519                 if (IS_ERR(timeline)) {
520                         context_close(ctx);
521                         return ERR_CAST(timeline);
522                 }
523
524                 ctx->timeline = timeline;
525         }
526
527         trace_i915_context_create(ctx);
528
529         return ctx;
530 }
531
532 /**
533  * i915_gem_context_create_gvt - create a GVT GEM context
534  * @dev: drm device *
535  *
536  * This function is used to create a GVT specific GEM context.
537  *
538  * Returns:
539  * pointer to i915_gem_context on success, error pointer if failed
540  *
541  */
542 struct i915_gem_context *
543 i915_gem_context_create_gvt(struct drm_device *dev)
544 {
545         struct i915_gem_context *ctx;
546         int ret;
547
548         if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
549                 return ERR_PTR(-ENODEV);
550
551         ret = i915_mutex_lock_interruptible(dev);
552         if (ret)
553                 return ERR_PTR(ret);
554
555         ctx = i915_gem_create_context(to_i915(dev), 0);
556         if (IS_ERR(ctx))
557                 goto out;
558
559         ret = i915_gem_context_pin_hw_id(ctx);
560         if (ret) {
561                 context_close(ctx);
562                 ctx = ERR_PTR(ret);
563                 goto out;
564         }
565
566         ctx->file_priv = ERR_PTR(-EBADF);
567         i915_gem_context_set_closed(ctx); /* not user accessible */
568         i915_gem_context_clear_bannable(ctx);
569         i915_gem_context_set_force_single_submission(ctx);
570         if (!USES_GUC_SUBMISSION(to_i915(dev)))
571                 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
572
573         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
574 out:
575         mutex_unlock(&dev->struct_mutex);
576         return ctx;
577 }
578
579 static void
580 destroy_kernel_context(struct i915_gem_context **ctxp)
581 {
582         struct i915_gem_context *ctx;
583
584         /* Keep the context ref so that we can free it immediately ourselves */
585         ctx = i915_gem_context_get(fetch_and_zero(ctxp));
586         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
587
588         context_close(ctx);
589         i915_gem_context_free(ctx);
590 }
591
592 struct i915_gem_context *
593 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
594 {
595         struct i915_gem_context *ctx;
596         int err;
597
598         ctx = i915_gem_create_context(i915, 0);
599         if (IS_ERR(ctx))
600                 return ctx;
601
602         err = i915_gem_context_pin_hw_id(ctx);
603         if (err) {
604                 destroy_kernel_context(&ctx);
605                 return ERR_PTR(err);
606         }
607
608         i915_gem_context_clear_bannable(ctx);
609         ctx->sched.priority = I915_USER_PRIORITY(prio);
610         ctx->ring_size = PAGE_SIZE;
611
612         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
613
614         return ctx;
615 }
616
617 static void init_contexts(struct drm_i915_private *i915)
618 {
619         mutex_init(&i915->contexts.mutex);
620         INIT_LIST_HEAD(&i915->contexts.list);
621
622         /* Using the simple ida interface, the max is limited by sizeof(int) */
623         BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
624         BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
625         ida_init(&i915->contexts.hw_ida);
626         INIT_LIST_HEAD(&i915->contexts.hw_id_list);
627
628         INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
629         init_llist_head(&i915->contexts.free_list);
630 }
631
632 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
633 {
634         struct i915_gem_context *ctx;
635
636         /* Reassure ourselves we are only called once */
637         GEM_BUG_ON(dev_priv->kernel_context);
638
639         init_contexts(dev_priv);
640
641         /* lowest priority; idle task */
642         ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
643         if (IS_ERR(ctx)) {
644                 DRM_ERROR("Failed to create default global context\n");
645                 return PTR_ERR(ctx);
646         }
647         /*
648          * For easy recognisablity, we want the kernel context to be 0 and then
649          * all user contexts will have non-zero hw_id. Kernel contexts are
650          * permanently pinned, so that we never suffer a stall and can
651          * use them from any allocation context (e.g. for evicting other
652          * contexts and from inside the shrinker).
653          */
654         GEM_BUG_ON(ctx->hw_id);
655         GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
656         dev_priv->kernel_context = ctx;
657
658         DRM_DEBUG_DRIVER("%s context support initialized\n",
659                          DRIVER_CAPS(dev_priv)->has_logical_contexts ?
660                          "logical" : "fake");
661         return 0;
662 }
663
664 void i915_gem_contexts_fini(struct drm_i915_private *i915)
665 {
666         lockdep_assert_held(&i915->drm.struct_mutex);
667
668         destroy_kernel_context(&i915->kernel_context);
669
670         /* Must free all deferred contexts (via flush_workqueue) first */
671         GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
672         ida_destroy(&i915->contexts.hw_ida);
673 }
674
675 static int context_idr_cleanup(int id, void *p, void *data)
676 {
677         context_close(p);
678         return 0;
679 }
680
681 static int vm_idr_cleanup(int id, void *p, void *data)
682 {
683         i915_vm_put(p);
684         return 0;
685 }
686
687 static int gem_context_register(struct i915_gem_context *ctx,
688                                 struct drm_i915_file_private *fpriv)
689 {
690         int ret;
691
692         ctx->file_priv = fpriv;
693         if (ctx->vm)
694                 ctx->vm->file = fpriv;
695
696         ctx->pid = get_task_pid(current, PIDTYPE_PID);
697         ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
698                               current->comm, pid_nr(ctx->pid));
699         if (!ctx->name) {
700                 ret = -ENOMEM;
701                 goto err_pid;
702         }
703
704         /* And finally expose ourselves to userspace via the idr */
705         mutex_lock(&fpriv->context_idr_lock);
706         ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
707         mutex_unlock(&fpriv->context_idr_lock);
708         if (ret >= 0)
709                 goto out;
710
711         kfree(fetch_and_zero(&ctx->name));
712 err_pid:
713         put_pid(fetch_and_zero(&ctx->pid));
714 out:
715         return ret;
716 }
717
718 int i915_gem_context_open(struct drm_i915_private *i915,
719                           struct drm_file *file)
720 {
721         struct drm_i915_file_private *file_priv = file->driver_priv;
722         struct i915_gem_context *ctx;
723         int err;
724
725         mutex_init(&file_priv->context_idr_lock);
726         mutex_init(&file_priv->vm_idr_lock);
727
728         idr_init(&file_priv->context_idr);
729         idr_init_base(&file_priv->vm_idr, 1);
730
731         mutex_lock(&i915->drm.struct_mutex);
732         ctx = i915_gem_create_context(i915, 0);
733         mutex_unlock(&i915->drm.struct_mutex);
734         if (IS_ERR(ctx)) {
735                 err = PTR_ERR(ctx);
736                 goto err;
737         }
738
739         err = gem_context_register(ctx, file_priv);
740         if (err < 0)
741                 goto err_ctx;
742
743         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
744         GEM_BUG_ON(err > 0);
745
746         return 0;
747
748 err_ctx:
749         context_close(ctx);
750 err:
751         idr_destroy(&file_priv->vm_idr);
752         idr_destroy(&file_priv->context_idr);
753         mutex_destroy(&file_priv->vm_idr_lock);
754         mutex_destroy(&file_priv->context_idr_lock);
755         return err;
756 }
757
758 void i915_gem_context_close(struct drm_file *file)
759 {
760         struct drm_i915_file_private *file_priv = file->driver_priv;
761
762         idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
763         idr_destroy(&file_priv->context_idr);
764         mutex_destroy(&file_priv->context_idr_lock);
765
766         idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
767         idr_destroy(&file_priv->vm_idr);
768         mutex_destroy(&file_priv->vm_idr_lock);
769 }
770
771 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
772                              struct drm_file *file)
773 {
774         struct drm_i915_private *i915 = to_i915(dev);
775         struct drm_i915_gem_vm_control *args = data;
776         struct drm_i915_file_private *file_priv = file->driver_priv;
777         struct i915_ppgtt *ppgtt;
778         int err;
779
780         if (!HAS_FULL_PPGTT(i915))
781                 return -ENODEV;
782
783         if (args->flags)
784                 return -EINVAL;
785
786         ppgtt = i915_ppgtt_create(i915);
787         if (IS_ERR(ppgtt))
788                 return PTR_ERR(ppgtt);
789
790         ppgtt->vm.file = file_priv;
791
792         if (args->extensions) {
793                 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
794                                            NULL, 0,
795                                            ppgtt);
796                 if (err)
797                         goto err_put;
798         }
799
800         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
801         if (err)
802                 goto err_put;
803
804         err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
805         if (err < 0)
806                 goto err_unlock;
807
808         GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
809
810         mutex_unlock(&file_priv->vm_idr_lock);
811
812         args->vm_id = err;
813         return 0;
814
815 err_unlock:
816         mutex_unlock(&file_priv->vm_idr_lock);
817 err_put:
818         i915_vm_put(&ppgtt->vm);
819         return err;
820 }
821
822 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
823                               struct drm_file *file)
824 {
825         struct drm_i915_file_private *file_priv = file->driver_priv;
826         struct drm_i915_gem_vm_control *args = data;
827         struct i915_address_space *vm;
828         int err;
829         u32 id;
830
831         if (args->flags)
832                 return -EINVAL;
833
834         if (args->extensions)
835                 return -EINVAL;
836
837         id = args->vm_id;
838         if (!id)
839                 return -ENOENT;
840
841         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
842         if (err)
843                 return err;
844
845         vm = idr_remove(&file_priv->vm_idr, id);
846
847         mutex_unlock(&file_priv->vm_idr_lock);
848         if (!vm)
849                 return -ENOENT;
850
851         i915_vm_put(vm);
852         return 0;
853 }
854
855 struct context_barrier_task {
856         struct i915_active base;
857         void (*task)(void *data);
858         void *data;
859 };
860
861 static void cb_retire(struct i915_active *base)
862 {
863         struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
864
865         if (cb->task)
866                 cb->task(cb->data);
867
868         i915_active_fini(&cb->base);
869         kfree(cb);
870 }
871
872 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
873 static int context_barrier_task(struct i915_gem_context *ctx,
874                                 intel_engine_mask_t engines,
875                                 bool (*skip)(struct intel_context *ce, void *data),
876                                 int (*emit)(struct i915_request *rq, void *data),
877                                 void (*task)(void *data),
878                                 void *data)
879 {
880         struct drm_i915_private *i915 = ctx->i915;
881         struct context_barrier_task *cb;
882         struct i915_gem_engines_iter it;
883         struct intel_context *ce;
884         int err = 0;
885
886         lockdep_assert_held(&i915->drm.struct_mutex);
887         GEM_BUG_ON(!task);
888
889         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
890         if (!cb)
891                 return -ENOMEM;
892
893         i915_active_init(i915, &cb->base, NULL, cb_retire);
894         err = i915_active_acquire(&cb->base);
895         if (err) {
896                 kfree(cb);
897                 return err;
898         }
899
900         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
901                 struct i915_request *rq;
902
903                 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
904                                        ce->engine->mask)) {
905                         err = -ENXIO;
906                         break;
907                 }
908
909                 if (!(ce->engine->mask & engines))
910                         continue;
911
912                 if (skip && skip(ce, data))
913                         continue;
914
915                 rq = intel_context_create_request(ce);
916                 if (IS_ERR(rq)) {
917                         err = PTR_ERR(rq);
918                         break;
919                 }
920
921                 err = 0;
922                 if (emit)
923                         err = emit(rq, data);
924                 if (err == 0)
925                         err = i915_active_ref(&cb->base, rq->fence.context, rq);
926
927                 i915_request_add(rq);
928                 if (err)
929                         break;
930         }
931         i915_gem_context_unlock_engines(ctx);
932
933         cb->task = err ? NULL : task; /* caller needs to unwind instead */
934         cb->data = data;
935
936         i915_active_release(&cb->base);
937
938         return err;
939 }
940
941 static int get_ppgtt(struct drm_i915_file_private *file_priv,
942                      struct i915_gem_context *ctx,
943                      struct drm_i915_gem_context_param *args)
944 {
945         struct i915_address_space *vm;
946         int ret;
947
948         if (!ctx->vm)
949                 return -ENODEV;
950
951         /* XXX rcu acquire? */
952         ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
953         if (ret)
954                 return ret;
955
956         vm = i915_vm_get(ctx->vm);
957         mutex_unlock(&ctx->i915->drm.struct_mutex);
958
959         ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
960         if (ret)
961                 goto err_put;
962
963         ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
964         GEM_BUG_ON(!ret);
965         if (ret < 0)
966                 goto err_unlock;
967
968         i915_vm_get(vm);
969
970         args->size = 0;
971         args->value = ret;
972
973         ret = 0;
974 err_unlock:
975         mutex_unlock(&file_priv->vm_idr_lock);
976 err_put:
977         i915_vm_put(vm);
978         return ret;
979 }
980
981 static void set_ppgtt_barrier(void *data)
982 {
983         struct i915_address_space *old = data;
984
985         if (INTEL_GEN(old->i915) < 8)
986                 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
987
988         i915_vm_put(old);
989 }
990
991 static int emit_ppgtt_update(struct i915_request *rq, void *data)
992 {
993         struct i915_address_space *vm = rq->hw_context->vm;
994         struct intel_engine_cs *engine = rq->engine;
995         u32 base = engine->mmio_base;
996         u32 *cs;
997         int i;
998
999         if (i915_vm_is_4lvl(vm)) {
1000                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1001                 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1002
1003                 cs = intel_ring_begin(rq, 6);
1004                 if (IS_ERR(cs))
1005                         return PTR_ERR(cs);
1006
1007                 *cs++ = MI_LOAD_REGISTER_IMM(2);
1008
1009                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1010                 *cs++ = upper_32_bits(pd_daddr);
1011                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1012                 *cs++ = lower_32_bits(pd_daddr);
1013
1014                 *cs++ = MI_NOOP;
1015                 intel_ring_advance(rq, cs);
1016         } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1017                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1018
1019                 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1020                 if (IS_ERR(cs))
1021                         return PTR_ERR(cs);
1022
1023                 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1024                 for (i = GEN8_3LVL_PDPES; i--; ) {
1025                         const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1026
1027                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1028                         *cs++ = upper_32_bits(pd_daddr);
1029                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1030                         *cs++ = lower_32_bits(pd_daddr);
1031                 }
1032                 *cs++ = MI_NOOP;
1033                 intel_ring_advance(rq, cs);
1034         } else {
1035                 /* ppGTT is not part of the legacy context image */
1036                 gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
1037         }
1038
1039         return 0;
1040 }
1041
1042 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1043 {
1044         if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1045                 return !ce->state;
1046         else
1047                 return !atomic_read(&ce->pin_count);
1048 }
1049
1050 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1051                      struct i915_gem_context *ctx,
1052                      struct drm_i915_gem_context_param *args)
1053 {
1054         struct i915_address_space *vm, *old;
1055         int err;
1056
1057         if (args->size)
1058                 return -EINVAL;
1059
1060         if (!ctx->vm)
1061                 return -ENODEV;
1062
1063         if (upper_32_bits(args->value))
1064                 return -ENOENT;
1065
1066         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1067         if (err)
1068                 return err;
1069
1070         vm = idr_find(&file_priv->vm_idr, args->value);
1071         if (vm)
1072                 i915_vm_get(vm);
1073         mutex_unlock(&file_priv->vm_idr_lock);
1074         if (!vm)
1075                 return -ENOENT;
1076
1077         err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1078         if (err)
1079                 goto out;
1080
1081         if (vm == ctx->vm)
1082                 goto unlock;
1083
1084         /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1085         mutex_lock(&ctx->mutex);
1086         lut_close(ctx);
1087         mutex_unlock(&ctx->mutex);
1088
1089         old = __set_ppgtt(ctx, vm);
1090
1091         /*
1092          * We need to flush any requests using the current ppgtt before
1093          * we release it as the requests do not hold a reference themselves,
1094          * only indirectly through the context.
1095          */
1096         err = context_barrier_task(ctx, ALL_ENGINES,
1097                                    skip_ppgtt_update,
1098                                    emit_ppgtt_update,
1099                                    set_ppgtt_barrier,
1100                                    old);
1101         if (err) {
1102                 i915_vm_put(__set_ppgtt(ctx, old));
1103                 i915_vm_put(old);
1104         }
1105
1106 unlock:
1107         mutex_unlock(&ctx->i915->drm.struct_mutex);
1108
1109 out:
1110         i915_vm_put(vm);
1111         return err;
1112 }
1113
1114 static int gen8_emit_rpcs_config(struct i915_request *rq,
1115                                  struct intel_context *ce,
1116                                  struct intel_sseu sseu)
1117 {
1118         u64 offset;
1119         u32 *cs;
1120
1121         cs = intel_ring_begin(rq, 4);
1122         if (IS_ERR(cs))
1123                 return PTR_ERR(cs);
1124
1125         offset = i915_ggtt_offset(ce->state) +
1126                  LRC_STATE_PN * PAGE_SIZE +
1127                  (CTX_R_PWR_CLK_STATE + 1) * 4;
1128
1129         *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1130         *cs++ = lower_32_bits(offset);
1131         *cs++ = upper_32_bits(offset);
1132         *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1133
1134         intel_ring_advance(rq, cs);
1135
1136         return 0;
1137 }
1138
1139 static int
1140 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1141 {
1142         struct i915_request *rq;
1143         int ret;
1144
1145         lockdep_assert_held(&ce->pin_mutex);
1146
1147         /*
1148          * If the context is not idle, we have to submit an ordered request to
1149          * modify its context image via the kernel context (writing to our own
1150          * image, or into the registers directory, does not stick). Pristine
1151          * and idle contexts will be configured on pinning.
1152          */
1153         if (!intel_context_is_pinned(ce))
1154                 return 0;
1155
1156         rq = i915_request_create(ce->engine->kernel_context);
1157         if (IS_ERR(rq))
1158                 return PTR_ERR(rq);
1159
1160         /* Serialise with the remote context */
1161         ret = intel_context_prepare_remote_request(ce, rq);
1162         if (ret == 0)
1163                 ret = gen8_emit_rpcs_config(rq, ce, sseu);
1164
1165         i915_request_add(rq);
1166         return ret;
1167 }
1168
1169 static int
1170 __intel_context_reconfigure_sseu(struct intel_context *ce,
1171                                  struct intel_sseu sseu)
1172 {
1173         int ret;
1174
1175         GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
1176
1177         ret = intel_context_lock_pinned(ce);
1178         if (ret)
1179                 return ret;
1180
1181         /* Nothing to do if unmodified. */
1182         if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1183                 goto unlock;
1184
1185         ret = gen8_modify_rpcs(ce, sseu);
1186         if (!ret)
1187                 ce->sseu = sseu;
1188
1189 unlock:
1190         intel_context_unlock_pinned(ce);
1191         return ret;
1192 }
1193
1194 static int
1195 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1196 {
1197         struct drm_i915_private *i915 = ce->engine->i915;
1198         int ret;
1199
1200         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1201         if (ret)
1202                 return ret;
1203
1204         ret = __intel_context_reconfigure_sseu(ce, sseu);
1205
1206         mutex_unlock(&i915->drm.struct_mutex);
1207
1208         return ret;
1209 }
1210
1211 static int
1212 user_to_context_sseu(struct drm_i915_private *i915,
1213                      const struct drm_i915_gem_context_param_sseu *user,
1214                      struct intel_sseu *context)
1215 {
1216         const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1217
1218         /* No zeros in any field. */
1219         if (!user->slice_mask || !user->subslice_mask ||
1220             !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1221                 return -EINVAL;
1222
1223         /* Max > min. */
1224         if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1225                 return -EINVAL;
1226
1227         /*
1228          * Some future proofing on the types since the uAPI is wider than the
1229          * current internal implementation.
1230          */
1231         if (overflows_type(user->slice_mask, context->slice_mask) ||
1232             overflows_type(user->subslice_mask, context->subslice_mask) ||
1233             overflows_type(user->min_eus_per_subslice,
1234                            context->min_eus_per_subslice) ||
1235             overflows_type(user->max_eus_per_subslice,
1236                            context->max_eus_per_subslice))
1237                 return -EINVAL;
1238
1239         /* Check validity against hardware. */
1240         if (user->slice_mask & ~device->slice_mask)
1241                 return -EINVAL;
1242
1243         if (user->subslice_mask & ~device->subslice_mask[0])
1244                 return -EINVAL;
1245
1246         if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1247                 return -EINVAL;
1248
1249         context->slice_mask = user->slice_mask;
1250         context->subslice_mask = user->subslice_mask;
1251         context->min_eus_per_subslice = user->min_eus_per_subslice;
1252         context->max_eus_per_subslice = user->max_eus_per_subslice;
1253
1254         /* Part specific restrictions. */
1255         if (IS_GEN(i915, 11)) {
1256                 unsigned int hw_s = hweight8(device->slice_mask);
1257                 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1258                 unsigned int req_s = hweight8(context->slice_mask);
1259                 unsigned int req_ss = hweight8(context->subslice_mask);
1260
1261                 /*
1262                  * Only full subslice enablement is possible if more than one
1263                  * slice is turned on.
1264                  */
1265                 if (req_s > 1 && req_ss != hw_ss_per_s)
1266                         return -EINVAL;
1267
1268                 /*
1269                  * If more than four (SScount bitfield limit) subslices are
1270                  * requested then the number has to be even.
1271                  */
1272                 if (req_ss > 4 && (req_ss & 1))
1273                         return -EINVAL;
1274
1275                 /*
1276                  * If only one slice is enabled and subslice count is below the
1277                  * device full enablement, it must be at most half of the all
1278                  * available subslices.
1279                  */
1280                 if (req_s == 1 && req_ss < hw_ss_per_s &&
1281                     req_ss > (hw_ss_per_s / 2))
1282                         return -EINVAL;
1283
1284                 /* ABI restriction - VME use case only. */
1285
1286                 /* All slices or one slice only. */
1287                 if (req_s != 1 && req_s != hw_s)
1288                         return -EINVAL;
1289
1290                 /*
1291                  * Half subslices or full enablement only when one slice is
1292                  * enabled.
1293                  */
1294                 if (req_s == 1 &&
1295                     (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1296                         return -EINVAL;
1297
1298                 /* No EU configuration changes. */
1299                 if ((user->min_eus_per_subslice !=
1300                      device->max_eus_per_subslice) ||
1301                     (user->max_eus_per_subslice !=
1302                      device->max_eus_per_subslice))
1303                         return -EINVAL;
1304         }
1305
1306         return 0;
1307 }
1308
1309 static int set_sseu(struct i915_gem_context *ctx,
1310                     struct drm_i915_gem_context_param *args)
1311 {
1312         struct drm_i915_private *i915 = ctx->i915;
1313         struct drm_i915_gem_context_param_sseu user_sseu;
1314         struct intel_context *ce;
1315         struct intel_sseu sseu;
1316         unsigned long lookup;
1317         int ret;
1318
1319         if (args->size < sizeof(user_sseu))
1320                 return -EINVAL;
1321
1322         if (!IS_GEN(i915, 11))
1323                 return -ENODEV;
1324
1325         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1326                            sizeof(user_sseu)))
1327                 return -EFAULT;
1328
1329         if (user_sseu.rsvd)
1330                 return -EINVAL;
1331
1332         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1333                 return -EINVAL;
1334
1335         lookup = 0;
1336         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1337                 lookup |= LOOKUP_USER_INDEX;
1338
1339         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1340         if (IS_ERR(ce))
1341                 return PTR_ERR(ce);
1342
1343         /* Only render engine supports RPCS configuration. */
1344         if (ce->engine->class != RENDER_CLASS) {
1345                 ret = -ENODEV;
1346                 goto out_ce;
1347         }
1348
1349         ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1350         if (ret)
1351                 goto out_ce;
1352
1353         ret = intel_context_reconfigure_sseu(ce, sseu);
1354         if (ret)
1355                 goto out_ce;
1356
1357         args->size = sizeof(user_sseu);
1358
1359 out_ce:
1360         intel_context_put(ce);
1361         return ret;
1362 }
1363
1364 struct set_engines {
1365         struct i915_gem_context *ctx;
1366         struct i915_gem_engines *engines;
1367 };
1368
1369 static int
1370 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1371 {
1372         struct i915_context_engines_load_balance __user *ext =
1373                 container_of_user(base, typeof(*ext), base);
1374         const struct set_engines *set = data;
1375         struct intel_engine_cs *stack[16];
1376         struct intel_engine_cs **siblings;
1377         struct intel_context *ce;
1378         u16 num_siblings, idx;
1379         unsigned int n;
1380         int err;
1381
1382         if (!HAS_EXECLISTS(set->ctx->i915))
1383                 return -ENODEV;
1384
1385         if (USES_GUC_SUBMISSION(set->ctx->i915))
1386                 return -ENODEV; /* not implement yet */
1387
1388         if (get_user(idx, &ext->engine_index))
1389                 return -EFAULT;
1390
1391         if (idx >= set->engines->num_engines) {
1392                 DRM_DEBUG("Invalid placement value, %d >= %d\n",
1393                           idx, set->engines->num_engines);
1394                 return -EINVAL;
1395         }
1396
1397         idx = array_index_nospec(idx, set->engines->num_engines);
1398         if (set->engines->engines[idx]) {
1399                 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1400                 return -EEXIST;
1401         }
1402
1403         if (get_user(num_siblings, &ext->num_siblings))
1404                 return -EFAULT;
1405
1406         err = check_user_mbz(&ext->flags);
1407         if (err)
1408                 return err;
1409
1410         err = check_user_mbz(&ext->mbz64);
1411         if (err)
1412                 return err;
1413
1414         siblings = stack;
1415         if (num_siblings > ARRAY_SIZE(stack)) {
1416                 siblings = kmalloc_array(num_siblings,
1417                                          sizeof(*siblings),
1418                                          GFP_KERNEL);
1419                 if (!siblings)
1420                         return -ENOMEM;
1421         }
1422
1423         for (n = 0; n < num_siblings; n++) {
1424                 struct i915_engine_class_instance ci;
1425
1426                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1427                         err = -EFAULT;
1428                         goto out_siblings;
1429                 }
1430
1431                 siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1432                                                        ci.engine_class,
1433                                                        ci.engine_instance);
1434                 if (!siblings[n]) {
1435                         DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1436                                   n, ci.engine_class, ci.engine_instance);
1437                         err = -EINVAL;
1438                         goto out_siblings;
1439                 }
1440         }
1441
1442         ce = intel_execlists_create_virtual(set->ctx, siblings, n);
1443         if (IS_ERR(ce)) {
1444                 err = PTR_ERR(ce);
1445                 goto out_siblings;
1446         }
1447
1448         if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1449                 intel_context_put(ce);
1450                 err = -EEXIST;
1451                 goto out_siblings;
1452         }
1453
1454 out_siblings:
1455         if (siblings != stack)
1456                 kfree(siblings);
1457
1458         return err;
1459 }
1460
1461 static int
1462 set_engines__bond(struct i915_user_extension __user *base, void *data)
1463 {
1464         struct i915_context_engines_bond __user *ext =
1465                 container_of_user(base, typeof(*ext), base);
1466         const struct set_engines *set = data;
1467         struct i915_engine_class_instance ci;
1468         struct intel_engine_cs *virtual;
1469         struct intel_engine_cs *master;
1470         u16 idx, num_bonds;
1471         int err, n;
1472
1473         if (get_user(idx, &ext->virtual_index))
1474                 return -EFAULT;
1475
1476         if (idx >= set->engines->num_engines) {
1477                 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1478                           idx, set->engines->num_engines);
1479                 return -EINVAL;
1480         }
1481
1482         idx = array_index_nospec(idx, set->engines->num_engines);
1483         if (!set->engines->engines[idx]) {
1484                 DRM_DEBUG("Invalid engine at %d\n", idx);
1485                 return -EINVAL;
1486         }
1487         virtual = set->engines->engines[idx]->engine;
1488
1489         err = check_user_mbz(&ext->flags);
1490         if (err)
1491                 return err;
1492
1493         for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1494                 err = check_user_mbz(&ext->mbz64[n]);
1495                 if (err)
1496                         return err;
1497         }
1498
1499         if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1500                 return -EFAULT;
1501
1502         master = intel_engine_lookup_user(set->ctx->i915,
1503                                           ci.engine_class, ci.engine_instance);
1504         if (!master) {
1505                 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1506                           ci.engine_class, ci.engine_instance);
1507                 return -EINVAL;
1508         }
1509
1510         if (get_user(num_bonds, &ext->num_bonds))
1511                 return -EFAULT;
1512
1513         for (n = 0; n < num_bonds; n++) {
1514                 struct intel_engine_cs *bond;
1515
1516                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1517                         return -EFAULT;
1518
1519                 bond = intel_engine_lookup_user(set->ctx->i915,
1520                                                 ci.engine_class,
1521                                                 ci.engine_instance);
1522                 if (!bond) {
1523                         DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1524                                   n, ci.engine_class, ci.engine_instance);
1525                         return -EINVAL;
1526                 }
1527
1528                 /*
1529                  * A non-virtual engine has no siblings to choose between; and
1530                  * a submit fence will always be directed to the one engine.
1531                  */
1532                 if (intel_engine_is_virtual(virtual)) {
1533                         err = intel_virtual_engine_attach_bond(virtual,
1534                                                                master,
1535                                                                bond);
1536                         if (err)
1537                                 return err;
1538                 }
1539         }
1540
1541         return 0;
1542 }
1543
1544 static const i915_user_extension_fn set_engines__extensions[] = {
1545         [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1546         [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1547 };
1548
1549 static int
1550 set_engines(struct i915_gem_context *ctx,
1551             const struct drm_i915_gem_context_param *args)
1552 {
1553         struct i915_context_param_engines __user *user =
1554                 u64_to_user_ptr(args->value);
1555         struct set_engines set = { .ctx = ctx };
1556         unsigned int num_engines, n;
1557         u64 extensions;
1558         int err;
1559
1560         if (!args->size) { /* switch back to legacy user_ring_map */
1561                 if (!i915_gem_context_user_engines(ctx))
1562                         return 0;
1563
1564                 set.engines = default_engines(ctx);
1565                 if (IS_ERR(set.engines))
1566                         return PTR_ERR(set.engines);
1567
1568                 goto replace;
1569         }
1570
1571         BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1572         if (args->size < sizeof(*user) ||
1573             !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1574                 DRM_DEBUG("Invalid size for engine array: %d\n",
1575                           args->size);
1576                 return -EINVAL;
1577         }
1578
1579         /*
1580          * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1581          * first 64 engines defined here.
1582          */
1583         num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1584
1585         set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1586                               GFP_KERNEL);
1587         if (!set.engines)
1588                 return -ENOMEM;
1589
1590         init_rcu_head(&set.engines->rcu);
1591         for (n = 0; n < num_engines; n++) {
1592                 struct i915_engine_class_instance ci;
1593                 struct intel_engine_cs *engine;
1594
1595                 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1596                         __free_engines(set.engines, n);
1597                         return -EFAULT;
1598                 }
1599
1600                 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1601                     ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1602                         set.engines->engines[n] = NULL;
1603                         continue;
1604                 }
1605
1606                 engine = intel_engine_lookup_user(ctx->i915,
1607                                                   ci.engine_class,
1608                                                   ci.engine_instance);
1609                 if (!engine) {
1610                         DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1611                                   n, ci.engine_class, ci.engine_instance);
1612                         __free_engines(set.engines, n);
1613                         return -ENOENT;
1614                 }
1615
1616                 set.engines->engines[n] = intel_context_create(ctx, engine);
1617                 if (!set.engines->engines[n]) {
1618                         __free_engines(set.engines, n);
1619                         return -ENOMEM;
1620                 }
1621         }
1622         set.engines->num_engines = num_engines;
1623
1624         err = -EFAULT;
1625         if (!get_user(extensions, &user->extensions))
1626                 err = i915_user_extensions(u64_to_user_ptr(extensions),
1627                                            set_engines__extensions,
1628                                            ARRAY_SIZE(set_engines__extensions),
1629                                            &set);
1630         if (err) {
1631                 free_engines(set.engines);
1632                 return err;
1633         }
1634
1635 replace:
1636         mutex_lock(&ctx->engines_mutex);
1637         if (args->size)
1638                 i915_gem_context_set_user_engines(ctx);
1639         else
1640                 i915_gem_context_clear_user_engines(ctx);
1641         rcu_swap_protected(ctx->engines, set.engines, 1);
1642         mutex_unlock(&ctx->engines_mutex);
1643
1644         call_rcu(&set.engines->rcu, free_engines_rcu);
1645
1646         return 0;
1647 }
1648
1649 static struct i915_gem_engines *
1650 __copy_engines(struct i915_gem_engines *e)
1651 {
1652         struct i915_gem_engines *copy;
1653         unsigned int n;
1654
1655         copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1656         if (!copy)
1657                 return ERR_PTR(-ENOMEM);
1658
1659         init_rcu_head(&copy->rcu);
1660         for (n = 0; n < e->num_engines; n++) {
1661                 if (e->engines[n])
1662                         copy->engines[n] = intel_context_get(e->engines[n]);
1663                 else
1664                         copy->engines[n] = NULL;
1665         }
1666         copy->num_engines = n;
1667
1668         return copy;
1669 }
1670
1671 static int
1672 get_engines(struct i915_gem_context *ctx,
1673             struct drm_i915_gem_context_param *args)
1674 {
1675         struct i915_context_param_engines __user *user;
1676         struct i915_gem_engines *e;
1677         size_t n, count, size;
1678         int err = 0;
1679
1680         err = mutex_lock_interruptible(&ctx->engines_mutex);
1681         if (err)
1682                 return err;
1683
1684         e = NULL;
1685         if (i915_gem_context_user_engines(ctx))
1686                 e = __copy_engines(i915_gem_context_engines(ctx));
1687         mutex_unlock(&ctx->engines_mutex);
1688         if (IS_ERR_OR_NULL(e)) {
1689                 args->size = 0;
1690                 return PTR_ERR_OR_ZERO(e);
1691         }
1692
1693         count = e->num_engines;
1694
1695         /* Be paranoid in case we have an impedance mismatch */
1696         if (!check_struct_size(user, engines, count, &size)) {
1697                 err = -EINVAL;
1698                 goto err_free;
1699         }
1700         if (overflows_type(size, args->size)) {
1701                 err = -EINVAL;
1702                 goto err_free;
1703         }
1704
1705         if (!args->size) {
1706                 args->size = size;
1707                 goto err_free;
1708         }
1709
1710         if (args->size < size) {
1711                 err = -EINVAL;
1712                 goto err_free;
1713         }
1714
1715         user = u64_to_user_ptr(args->value);
1716         if (!access_ok(user, size)) {
1717                 err = -EFAULT;
1718                 goto err_free;
1719         }
1720
1721         if (put_user(0, &user->extensions)) {
1722                 err = -EFAULT;
1723                 goto err_free;
1724         }
1725
1726         for (n = 0; n < count; n++) {
1727                 struct i915_engine_class_instance ci = {
1728                         .engine_class = I915_ENGINE_CLASS_INVALID,
1729                         .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1730                 };
1731
1732                 if (e->engines[n]) {
1733                         ci.engine_class = e->engines[n]->engine->uabi_class;
1734                         ci.engine_instance = e->engines[n]->engine->uabi_instance;
1735                 }
1736
1737                 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1738                         err = -EFAULT;
1739                         goto err_free;
1740                 }
1741         }
1742
1743         args->size = size;
1744
1745 err_free:
1746         free_engines(e);
1747         return err;
1748 }
1749
1750 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1751                         struct i915_gem_context *ctx,
1752                         struct drm_i915_gem_context_param *args)
1753 {
1754         int ret = 0;
1755
1756         switch (args->param) {
1757         case I915_CONTEXT_PARAM_NO_ZEROMAP:
1758                 if (args->size)
1759                         ret = -EINVAL;
1760                 else if (args->value)
1761                         set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1762                 else
1763                         clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1764                 break;
1765
1766         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1767                 if (args->size)
1768                         ret = -EINVAL;
1769                 else if (args->value)
1770                         i915_gem_context_set_no_error_capture(ctx);
1771                 else
1772                         i915_gem_context_clear_no_error_capture(ctx);
1773                 break;
1774
1775         case I915_CONTEXT_PARAM_BANNABLE:
1776                 if (args->size)
1777                         ret = -EINVAL;
1778                 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1779                         ret = -EPERM;
1780                 else if (args->value)
1781                         i915_gem_context_set_bannable(ctx);
1782                 else
1783                         i915_gem_context_clear_bannable(ctx);
1784                 break;
1785
1786         case I915_CONTEXT_PARAM_RECOVERABLE:
1787                 if (args->size)
1788                         ret = -EINVAL;
1789                 else if (args->value)
1790                         i915_gem_context_set_recoverable(ctx);
1791                 else
1792                         i915_gem_context_clear_recoverable(ctx);
1793                 break;
1794
1795         case I915_CONTEXT_PARAM_PRIORITY:
1796                 {
1797                         s64 priority = args->value;
1798
1799                         if (args->size)
1800                                 ret = -EINVAL;
1801                         else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1802                                 ret = -ENODEV;
1803                         else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1804                                  priority < I915_CONTEXT_MIN_USER_PRIORITY)
1805                                 ret = -EINVAL;
1806                         else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1807                                  !capable(CAP_SYS_NICE))
1808                                 ret = -EPERM;
1809                         else
1810                                 ctx->sched.priority =
1811                                         I915_USER_PRIORITY(priority);
1812                 }
1813                 break;
1814
1815         case I915_CONTEXT_PARAM_SSEU:
1816                 ret = set_sseu(ctx, args);
1817                 break;
1818
1819         case I915_CONTEXT_PARAM_VM:
1820                 ret = set_ppgtt(fpriv, ctx, args);
1821                 break;
1822
1823         case I915_CONTEXT_PARAM_ENGINES:
1824                 ret = set_engines(ctx, args);
1825                 break;
1826
1827         case I915_CONTEXT_PARAM_BAN_PERIOD:
1828         default:
1829                 ret = -EINVAL;
1830                 break;
1831         }
1832
1833         return ret;
1834 }
1835
1836 struct create_ext {
1837         struct i915_gem_context *ctx;
1838         struct drm_i915_file_private *fpriv;
1839 };
1840
1841 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1842 {
1843         struct drm_i915_gem_context_create_ext_setparam local;
1844         const struct create_ext *arg = data;
1845
1846         if (copy_from_user(&local, ext, sizeof(local)))
1847                 return -EFAULT;
1848
1849         if (local.param.ctx_id)
1850                 return -EINVAL;
1851
1852         return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1853 }
1854
1855 static int clone_engines(struct i915_gem_context *dst,
1856                          struct i915_gem_context *src)
1857 {
1858         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1859         struct i915_gem_engines *clone;
1860         bool user_engines;
1861         unsigned long n;
1862
1863         clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1864         if (!clone)
1865                 goto err_unlock;
1866
1867         init_rcu_head(&clone->rcu);
1868         for (n = 0; n < e->num_engines; n++) {
1869                 struct intel_engine_cs *engine;
1870
1871                 if (!e->engines[n]) {
1872                         clone->engines[n] = NULL;
1873                         continue;
1874                 }
1875                 engine = e->engines[n]->engine;
1876
1877                 /*
1878                  * Virtual engines are singletons; they can only exist
1879                  * inside a single context, because they embed their
1880                  * HW context... As each virtual context implies a single
1881                  * timeline (each engine can only dequeue a single request
1882                  * at any time), it would be surprising for two contexts
1883                  * to use the same engine. So let's create a copy of
1884                  * the virtual engine instead.
1885                  */
1886                 if (intel_engine_is_virtual(engine))
1887                         clone->engines[n] =
1888                                 intel_execlists_clone_virtual(dst, engine);
1889                 else
1890                         clone->engines[n] = intel_context_create(dst, engine);
1891                 if (IS_ERR_OR_NULL(clone->engines[n])) {
1892                         __free_engines(clone, n);
1893                         goto err_unlock;
1894                 }
1895         }
1896         clone->num_engines = n;
1897
1898         user_engines = i915_gem_context_user_engines(src);
1899         i915_gem_context_unlock_engines(src);
1900
1901         free_engines(dst->engines);
1902         RCU_INIT_POINTER(dst->engines, clone);
1903         if (user_engines)
1904                 i915_gem_context_set_user_engines(dst);
1905         else
1906                 i915_gem_context_clear_user_engines(dst);
1907         return 0;
1908
1909 err_unlock:
1910         i915_gem_context_unlock_engines(src);
1911         return -ENOMEM;
1912 }
1913
1914 static int clone_flags(struct i915_gem_context *dst,
1915                        struct i915_gem_context *src)
1916 {
1917         dst->user_flags = src->user_flags;
1918         return 0;
1919 }
1920
1921 static int clone_schedattr(struct i915_gem_context *dst,
1922                            struct i915_gem_context *src)
1923 {
1924         dst->sched = src->sched;
1925         return 0;
1926 }
1927
1928 static int clone_sseu(struct i915_gem_context *dst,
1929                       struct i915_gem_context *src)
1930 {
1931         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1932         struct i915_gem_engines *clone;
1933         unsigned long n;
1934         int err;
1935
1936         clone = dst->engines; /* no locking required; sole access */
1937         if (e->num_engines != clone->num_engines) {
1938                 err = -EINVAL;
1939                 goto unlock;
1940         }
1941
1942         for (n = 0; n < e->num_engines; n++) {
1943                 struct intel_context *ce = e->engines[n];
1944
1945                 if (clone->engines[n]->engine->class != ce->engine->class) {
1946                         /* Must have compatible engine maps! */
1947                         err = -EINVAL;
1948                         goto unlock;
1949                 }
1950
1951                 /* serialises with set_sseu */
1952                 err = intel_context_lock_pinned(ce);
1953                 if (err)
1954                         goto unlock;
1955
1956                 clone->engines[n]->sseu = ce->sseu;
1957                 intel_context_unlock_pinned(ce);
1958         }
1959
1960         err = 0;
1961 unlock:
1962         i915_gem_context_unlock_engines(src);
1963         return err;
1964 }
1965
1966 static int clone_timeline(struct i915_gem_context *dst,
1967                           struct i915_gem_context *src)
1968 {
1969         if (src->timeline) {
1970                 GEM_BUG_ON(src->timeline == dst->timeline);
1971
1972                 if (dst->timeline)
1973                         intel_timeline_put(dst->timeline);
1974                 dst->timeline = intel_timeline_get(src->timeline);
1975         }
1976
1977         return 0;
1978 }
1979
1980 static int clone_vm(struct i915_gem_context *dst,
1981                     struct i915_gem_context *src)
1982 {
1983         struct i915_address_space *vm;
1984
1985         rcu_read_lock();
1986         do {
1987                 vm = READ_ONCE(src->vm);
1988                 if (!vm)
1989                         break;
1990
1991                 if (!kref_get_unless_zero(&vm->ref))
1992                         continue;
1993
1994                 /*
1995                  * This ppgtt may have be reallocated between
1996                  * the read and the kref, and reassigned to a third
1997                  * context. In order to avoid inadvertent sharing
1998                  * of this ppgtt with that third context (and not
1999                  * src), we have to confirm that we have the same
2000                  * ppgtt after passing through the strong memory
2001                  * barrier implied by a successful
2002                  * kref_get_unless_zero().
2003                  *
2004                  * Once we have acquired the current ppgtt of src,
2005                  * we no longer care if it is released from src, as
2006                  * it cannot be reallocated elsewhere.
2007                  */
2008
2009                 if (vm == READ_ONCE(src->vm))
2010                         break;
2011
2012                 i915_vm_put(vm);
2013         } while (1);
2014         rcu_read_unlock();
2015
2016         if (vm) {
2017                 __assign_ppgtt(dst, vm);
2018                 i915_vm_put(vm);
2019         }
2020
2021         return 0;
2022 }
2023
2024 static int create_clone(struct i915_user_extension __user *ext, void *data)
2025 {
2026         static int (* const fn[])(struct i915_gem_context *dst,
2027                                   struct i915_gem_context *src) = {
2028 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2029                 MAP(ENGINES, clone_engines),
2030                 MAP(FLAGS, clone_flags),
2031                 MAP(SCHEDATTR, clone_schedattr),
2032                 MAP(SSEU, clone_sseu),
2033                 MAP(TIMELINE, clone_timeline),
2034                 MAP(VM, clone_vm),
2035 #undef MAP
2036         };
2037         struct drm_i915_gem_context_create_ext_clone local;
2038         const struct create_ext *arg = data;
2039         struct i915_gem_context *dst = arg->ctx;
2040         struct i915_gem_context *src;
2041         int err, bit;
2042
2043         if (copy_from_user(&local, ext, sizeof(local)))
2044                 return -EFAULT;
2045
2046         BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2047                      I915_CONTEXT_CLONE_UNKNOWN);
2048
2049         if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2050                 return -EINVAL;
2051
2052         if (local.rsvd)
2053                 return -EINVAL;
2054
2055         rcu_read_lock();
2056         src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2057         rcu_read_unlock();
2058         if (!src)
2059                 return -ENOENT;
2060
2061         GEM_BUG_ON(src == dst);
2062
2063         for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2064                 if (!(local.flags & BIT(bit)))
2065                         continue;
2066
2067                 err = fn[bit](dst, src);
2068                 if (err)
2069                         return err;
2070         }
2071
2072         return 0;
2073 }
2074
2075 static const i915_user_extension_fn create_extensions[] = {
2076         [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2077         [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2078 };
2079
2080 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2081 {
2082         return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2083 }
2084
2085 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2086                                   struct drm_file *file)
2087 {
2088         struct drm_i915_private *i915 = to_i915(dev);
2089         struct drm_i915_gem_context_create_ext *args = data;
2090         struct create_ext ext_data;
2091         int ret;
2092
2093         if (!DRIVER_CAPS(i915)->has_logical_contexts)
2094                 return -ENODEV;
2095
2096         if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2097                 return -EINVAL;
2098
2099         ret = intel_gt_terminally_wedged(&i915->gt);
2100         if (ret)
2101                 return ret;
2102
2103         ext_data.fpriv = file->driver_priv;
2104         if (client_is_banned(ext_data.fpriv)) {
2105                 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2106                           current->comm,
2107                           pid_nr(get_task_pid(current, PIDTYPE_PID)));
2108                 return -EIO;
2109         }
2110
2111         ret = i915_mutex_lock_interruptible(dev);
2112         if (ret)
2113                 return ret;
2114
2115         ext_data.ctx = i915_gem_create_context(i915, args->flags);
2116         mutex_unlock(&dev->struct_mutex);
2117         if (IS_ERR(ext_data.ctx))
2118                 return PTR_ERR(ext_data.ctx);
2119
2120         if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2121                 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2122                                            create_extensions,
2123                                            ARRAY_SIZE(create_extensions),
2124                                            &ext_data);
2125                 if (ret)
2126                         goto err_ctx;
2127         }
2128
2129         ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
2130         if (ret < 0)
2131                 goto err_ctx;
2132
2133         args->ctx_id = ret;
2134         DRM_DEBUG("HW context %d created\n", args->ctx_id);
2135
2136         return 0;
2137
2138 err_ctx:
2139         context_close(ext_data.ctx);
2140         return ret;
2141 }
2142
2143 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2144                                    struct drm_file *file)
2145 {
2146         struct drm_i915_gem_context_destroy *args = data;
2147         struct drm_i915_file_private *file_priv = file->driver_priv;
2148         struct i915_gem_context *ctx;
2149
2150         if (args->pad != 0)
2151                 return -EINVAL;
2152
2153         if (!args->ctx_id)
2154                 return -ENOENT;
2155
2156         if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2157                 return -EINTR;
2158
2159         ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2160         mutex_unlock(&file_priv->context_idr_lock);
2161         if (!ctx)
2162                 return -ENOENT;
2163
2164         context_close(ctx);
2165         return 0;
2166 }
2167
2168 static int get_sseu(struct i915_gem_context *ctx,
2169                     struct drm_i915_gem_context_param *args)
2170 {
2171         struct drm_i915_gem_context_param_sseu user_sseu;
2172         struct intel_context *ce;
2173         unsigned long lookup;
2174         int err;
2175
2176         if (args->size == 0)
2177                 goto out;
2178         else if (args->size < sizeof(user_sseu))
2179                 return -EINVAL;
2180
2181         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2182                            sizeof(user_sseu)))
2183                 return -EFAULT;
2184
2185         if (user_sseu.rsvd)
2186                 return -EINVAL;
2187
2188         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2189                 return -EINVAL;
2190
2191         lookup = 0;
2192         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2193                 lookup |= LOOKUP_USER_INDEX;
2194
2195         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2196         if (IS_ERR(ce))
2197                 return PTR_ERR(ce);
2198
2199         err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2200         if (err) {
2201                 intel_context_put(ce);
2202                 return err;
2203         }
2204
2205         user_sseu.slice_mask = ce->sseu.slice_mask;
2206         user_sseu.subslice_mask = ce->sseu.subslice_mask;
2207         user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2208         user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2209
2210         intel_context_unlock_pinned(ce);
2211         intel_context_put(ce);
2212
2213         if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2214                          sizeof(user_sseu)))
2215                 return -EFAULT;
2216
2217 out:
2218         args->size = sizeof(user_sseu);
2219
2220         return 0;
2221 }
2222
2223 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2224                                     struct drm_file *file)
2225 {
2226         struct drm_i915_file_private *file_priv = file->driver_priv;
2227         struct drm_i915_gem_context_param *args = data;
2228         struct i915_gem_context *ctx;
2229         int ret = 0;
2230
2231         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2232         if (!ctx)
2233                 return -ENOENT;
2234
2235         switch (args->param) {
2236         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2237                 args->size = 0;
2238                 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2239                 break;
2240
2241         case I915_CONTEXT_PARAM_GTT_SIZE:
2242                 args->size = 0;
2243                 if (ctx->vm)
2244                         args->value = ctx->vm->total;
2245                 else if (to_i915(dev)->ggtt.alias)
2246                         args->value = to_i915(dev)->ggtt.alias->vm.total;
2247                 else
2248                         args->value = to_i915(dev)->ggtt.vm.total;
2249                 break;
2250
2251         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2252                 args->size = 0;
2253                 args->value = i915_gem_context_no_error_capture(ctx);
2254                 break;
2255
2256         case I915_CONTEXT_PARAM_BANNABLE:
2257                 args->size = 0;
2258                 args->value = i915_gem_context_is_bannable(ctx);
2259                 break;
2260
2261         case I915_CONTEXT_PARAM_RECOVERABLE:
2262                 args->size = 0;
2263                 args->value = i915_gem_context_is_recoverable(ctx);
2264                 break;
2265
2266         case I915_CONTEXT_PARAM_PRIORITY:
2267                 args->size = 0;
2268                 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2269                 break;
2270
2271         case I915_CONTEXT_PARAM_SSEU:
2272                 ret = get_sseu(ctx, args);
2273                 break;
2274
2275         case I915_CONTEXT_PARAM_VM:
2276                 ret = get_ppgtt(file_priv, ctx, args);
2277                 break;
2278
2279         case I915_CONTEXT_PARAM_ENGINES:
2280                 ret = get_engines(ctx, args);
2281                 break;
2282
2283         case I915_CONTEXT_PARAM_BAN_PERIOD:
2284         default:
2285                 ret = -EINVAL;
2286                 break;
2287         }
2288
2289         i915_gem_context_put(ctx);
2290         return ret;
2291 }
2292
2293 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2294                                     struct drm_file *file)
2295 {
2296         struct drm_i915_file_private *file_priv = file->driver_priv;
2297         struct drm_i915_gem_context_param *args = data;
2298         struct i915_gem_context *ctx;
2299         int ret;
2300
2301         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2302         if (!ctx)
2303                 return -ENOENT;
2304
2305         ret = ctx_setparam(file_priv, ctx, args);
2306
2307         i915_gem_context_put(ctx);
2308         return ret;
2309 }
2310
2311 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2312                                        void *data, struct drm_file *file)
2313 {
2314         struct drm_i915_private *dev_priv = to_i915(dev);
2315         struct drm_i915_reset_stats *args = data;
2316         struct i915_gem_context *ctx;
2317         int ret;
2318
2319         if (args->flags || args->pad)
2320                 return -EINVAL;
2321
2322         ret = -ENOENT;
2323         rcu_read_lock();
2324         ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2325         if (!ctx)
2326                 goto out;
2327
2328         /*
2329          * We opt for unserialised reads here. This may result in tearing
2330          * in the extremely unlikely event of a GPU hang on this context
2331          * as we are querying them. If we need that extra layer of protection,
2332          * we should wrap the hangstats with a seqlock.
2333          */
2334
2335         if (capable(CAP_SYS_ADMIN))
2336                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2337         else
2338                 args->reset_count = 0;
2339
2340         args->batch_active = atomic_read(&ctx->guilty_count);
2341         args->batch_pending = atomic_read(&ctx->active_count);
2342
2343         ret = 0;
2344 out:
2345         rcu_read_unlock();
2346         return ret;
2347 }
2348
2349 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2350 {
2351         struct drm_i915_private *i915 = ctx->i915;
2352         int err = 0;
2353
2354         mutex_lock(&i915->contexts.mutex);
2355
2356         GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2357
2358         if (list_empty(&ctx->hw_id_link)) {
2359                 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2360
2361                 err = assign_hw_id(i915, &ctx->hw_id);
2362                 if (err)
2363                         goto out_unlock;
2364
2365                 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2366         }
2367
2368         GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2369         atomic_inc(&ctx->hw_id_pin_count);
2370
2371 out_unlock:
2372         mutex_unlock(&i915->contexts.mutex);
2373         return err;
2374 }
2375
2376 /* GEM context-engines iterator: for_each_gem_engine() */
2377 struct intel_context *
2378 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2379 {
2380         const struct i915_gem_engines *e = it->engines;
2381         struct intel_context *ctx;
2382
2383         do {
2384                 if (it->idx >= e->num_engines)
2385                         return NULL;
2386
2387                 ctx = e->engines[it->idx++];
2388         } while (!ctx);
2389
2390         return ctx;
2391 }
2392
2393 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2394 #include "selftests/mock_context.c"
2395 #include "selftests/i915_gem_context.c"
2396 #endif
2397
2398 static void i915_global_gem_context_shrink(void)
2399 {
2400         kmem_cache_shrink(global.slab_luts);
2401 }
2402
2403 static void i915_global_gem_context_exit(void)
2404 {
2405         kmem_cache_destroy(global.slab_luts);
2406 }
2407
2408 static struct i915_global_gem_context global = { {
2409         .shrink = i915_global_gem_context_shrink,
2410         .exit = i915_global_gem_context_exit,
2411 } };
2412
2413 int __init i915_global_gem_context_init(void)
2414 {
2415         global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2416         if (!global.slab_luts)
2417                 return -ENOMEM;
2418
2419         i915_global_register(&global.base);
2420         return 0;
2421 }