]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gem/i915_gem_context.c
drm/i915/gt: Move the [class][inst] lookup for engines onto the GT
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69
70 #include <drm/i915_drm.h>
71
72 #include "gt/intel_lrc_reg.h"
73 #include "gt/intel_engine_user.h"
74
75 #include "i915_gem_context.h"
76 #include "i915_globals.h"
77 #include "i915_trace.h"
78 #include "i915_user_extensions.h"
79
80 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
81
82 static struct i915_global_gem_context {
83         struct i915_global base;
84         struct kmem_cache *slab_luts;
85 } global;
86
87 struct i915_lut_handle *i915_lut_handle_alloc(void)
88 {
89         return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
90 }
91
92 void i915_lut_handle_free(struct i915_lut_handle *lut)
93 {
94         return kmem_cache_free(global.slab_luts, lut);
95 }
96
97 static void lut_close(struct i915_gem_context *ctx)
98 {
99         struct radix_tree_iter iter;
100         void __rcu **slot;
101
102         lockdep_assert_held(&ctx->mutex);
103
104         rcu_read_lock();
105         radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
106                 struct i915_vma *vma = rcu_dereference_raw(*slot);
107                 struct drm_i915_gem_object *obj = vma->obj;
108                 struct i915_lut_handle *lut;
109
110                 if (!kref_get_unless_zero(&obj->base.refcount))
111                         continue;
112
113                 rcu_read_unlock();
114                 i915_gem_object_lock(obj);
115                 list_for_each_entry(lut, &obj->lut_list, obj_link) {
116                         if (lut->ctx != ctx)
117                                 continue;
118
119                         if (lut->handle != iter.index)
120                                 continue;
121
122                         list_del(&lut->obj_link);
123                         break;
124                 }
125                 i915_gem_object_unlock(obj);
126                 rcu_read_lock();
127
128                 if (&lut->obj_link != &obj->lut_list) {
129                         i915_lut_handle_free(lut);
130                         radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
131                         if (atomic_dec_and_test(&vma->open_count) &&
132                             !i915_vma_is_ggtt(vma))
133                                 i915_vma_close(vma);
134                         i915_gem_object_put(obj);
135                 }
136
137                 i915_gem_object_put(obj);
138         }
139         rcu_read_unlock();
140 }
141
142 static struct intel_context *
143 lookup_user_engine(struct i915_gem_context *ctx,
144                    unsigned long flags,
145                    const struct i915_engine_class_instance *ci)
146 #define LOOKUP_USER_INDEX BIT(0)
147 {
148         int idx;
149
150         if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
151                 return ERR_PTR(-EINVAL);
152
153         if (!i915_gem_context_user_engines(ctx)) {
154                 struct intel_engine_cs *engine;
155
156                 engine = intel_engine_lookup_user(ctx->i915,
157                                                   ci->engine_class,
158                                                   ci->engine_instance);
159                 if (!engine)
160                         return ERR_PTR(-EINVAL);
161
162                 idx = engine->id;
163         } else {
164                 idx = ci->engine_instance;
165         }
166
167         return i915_gem_context_get_engine(ctx, idx);
168 }
169
170 static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
171 {
172         unsigned int max;
173
174         lockdep_assert_held(&i915->contexts.mutex);
175
176         if (INTEL_GEN(i915) >= 11)
177                 max = GEN11_MAX_CONTEXT_HW_ID;
178         else if (USES_GUC_SUBMISSION(i915))
179                 /*
180                  * When using GuC in proxy submission, GuC consumes the
181                  * highest bit in the context id to indicate proxy submission.
182                  */
183                 max = MAX_GUC_CONTEXT_HW_ID;
184         else
185                 max = MAX_CONTEXT_HW_ID;
186
187         return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
188 }
189
190 static int steal_hw_id(struct drm_i915_private *i915)
191 {
192         struct i915_gem_context *ctx, *cn;
193         LIST_HEAD(pinned);
194         int id = -ENOSPC;
195
196         lockdep_assert_held(&i915->contexts.mutex);
197
198         list_for_each_entry_safe(ctx, cn,
199                                  &i915->contexts.hw_id_list, hw_id_link) {
200                 if (atomic_read(&ctx->hw_id_pin_count)) {
201                         list_move_tail(&ctx->hw_id_link, &pinned);
202                         continue;
203                 }
204
205                 GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
206                 list_del_init(&ctx->hw_id_link);
207                 id = ctx->hw_id;
208                 break;
209         }
210
211         /*
212          * Remember how far we got up on the last repossesion scan, so the
213          * list is kept in a "least recently scanned" order.
214          */
215         list_splice_tail(&pinned, &i915->contexts.hw_id_list);
216         return id;
217 }
218
219 static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
220 {
221         int ret;
222
223         lockdep_assert_held(&i915->contexts.mutex);
224
225         /*
226          * We prefer to steal/stall ourselves and our users over that of the
227          * entire system. That may be a little unfair to our users, and
228          * even hurt high priority clients. The choice is whether to oomkill
229          * something else, or steal a context id.
230          */
231         ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
232         if (unlikely(ret < 0)) {
233                 ret = steal_hw_id(i915);
234                 if (ret < 0) /* once again for the correct errno code */
235                         ret = new_hw_id(i915, GFP_KERNEL);
236                 if (ret < 0)
237                         return ret;
238         }
239
240         *out = ret;
241         return 0;
242 }
243
244 static void release_hw_id(struct i915_gem_context *ctx)
245 {
246         struct drm_i915_private *i915 = ctx->i915;
247
248         if (list_empty(&ctx->hw_id_link))
249                 return;
250
251         mutex_lock(&i915->contexts.mutex);
252         if (!list_empty(&ctx->hw_id_link)) {
253                 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
254                 list_del_init(&ctx->hw_id_link);
255         }
256         mutex_unlock(&i915->contexts.mutex);
257 }
258
259 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
260 {
261         while (count--) {
262                 if (!e->engines[count])
263                         continue;
264
265                 intel_context_put(e->engines[count]);
266         }
267         kfree(e);
268 }
269
270 static void free_engines(struct i915_gem_engines *e)
271 {
272         __free_engines(e, e->num_engines);
273 }
274
275 static void free_engines_rcu(struct rcu_head *rcu)
276 {
277         free_engines(container_of(rcu, struct i915_gem_engines, rcu));
278 }
279
280 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
281 {
282         struct intel_engine_cs *engine;
283         struct i915_gem_engines *e;
284         enum intel_engine_id id;
285
286         e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
287         if (!e)
288                 return ERR_PTR(-ENOMEM);
289
290         init_rcu_head(&e->rcu);
291         for_each_engine(engine, ctx->i915, id) {
292                 struct intel_context *ce;
293
294                 ce = intel_context_create(ctx, engine);
295                 if (IS_ERR(ce)) {
296                         __free_engines(e, id);
297                         return ERR_CAST(ce);
298                 }
299
300                 e->engines[id] = ce;
301         }
302         e->num_engines = id;
303
304         return e;
305 }
306
307 static void i915_gem_context_free(struct i915_gem_context *ctx)
308 {
309         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
310         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
311
312         release_hw_id(ctx);
313         if (ctx->vm)
314                 i915_vm_put(ctx->vm);
315
316         free_engines(rcu_access_pointer(ctx->engines));
317         mutex_destroy(&ctx->engines_mutex);
318
319         if (ctx->timeline)
320                 intel_timeline_put(ctx->timeline);
321
322         kfree(ctx->name);
323         put_pid(ctx->pid);
324
325         list_del(&ctx->link);
326         mutex_destroy(&ctx->mutex);
327
328         kfree_rcu(ctx, rcu);
329 }
330
331 static void contexts_free(struct drm_i915_private *i915)
332 {
333         struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
334         struct i915_gem_context *ctx, *cn;
335
336         lockdep_assert_held(&i915->drm.struct_mutex);
337
338         llist_for_each_entry_safe(ctx, cn, freed, free_link)
339                 i915_gem_context_free(ctx);
340 }
341
342 static void contexts_free_first(struct drm_i915_private *i915)
343 {
344         struct i915_gem_context *ctx;
345         struct llist_node *freed;
346
347         lockdep_assert_held(&i915->drm.struct_mutex);
348
349         freed = llist_del_first(&i915->contexts.free_list);
350         if (!freed)
351                 return;
352
353         ctx = container_of(freed, typeof(*ctx), free_link);
354         i915_gem_context_free(ctx);
355 }
356
357 static void contexts_free_worker(struct work_struct *work)
358 {
359         struct drm_i915_private *i915 =
360                 container_of(work, typeof(*i915), contexts.free_work);
361
362         mutex_lock(&i915->drm.struct_mutex);
363         contexts_free(i915);
364         mutex_unlock(&i915->drm.struct_mutex);
365 }
366
367 void i915_gem_context_release(struct kref *ref)
368 {
369         struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
370         struct drm_i915_private *i915 = ctx->i915;
371
372         trace_i915_context_free(ctx);
373         if (llist_add(&ctx->free_link, &i915->contexts.free_list))
374                 queue_work(i915->wq, &i915->contexts.free_work);
375 }
376
377 static void context_close(struct i915_gem_context *ctx)
378 {
379         mutex_lock(&ctx->mutex);
380
381         i915_gem_context_set_closed(ctx);
382         ctx->file_priv = ERR_PTR(-EBADF);
383
384         /*
385          * This context will never again be assinged to HW, so we can
386          * reuse its ID for the next context.
387          */
388         release_hw_id(ctx);
389
390         /*
391          * The LUT uses the VMA as a backpointer to unref the object,
392          * so we need to clear the LUT before we close all the VMA (inside
393          * the ppgtt).
394          */
395         lut_close(ctx);
396
397         mutex_unlock(&ctx->mutex);
398         i915_gem_context_put(ctx);
399 }
400
401 static struct i915_gem_context *
402 __create_context(struct drm_i915_private *i915)
403 {
404         struct i915_gem_context *ctx;
405         struct i915_gem_engines *e;
406         int err;
407         int i;
408
409         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
410         if (!ctx)
411                 return ERR_PTR(-ENOMEM);
412
413         kref_init(&ctx->ref);
414         list_add_tail(&ctx->link, &i915->contexts.list);
415         ctx->i915 = i915;
416         ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
417         mutex_init(&ctx->mutex);
418
419         mutex_init(&ctx->engines_mutex);
420         e = default_engines(ctx);
421         if (IS_ERR(e)) {
422                 err = PTR_ERR(e);
423                 goto err_free;
424         }
425         RCU_INIT_POINTER(ctx->engines, e);
426
427         INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
428         INIT_LIST_HEAD(&ctx->hw_id_link);
429
430         /* NB: Mark all slices as needing a remap so that when the context first
431          * loads it will restore whatever remap state already exists. If there
432          * is no remap info, it will be a NOP. */
433         ctx->remap_slice = ALL_L3_SLICES(i915);
434
435         i915_gem_context_set_bannable(ctx);
436         i915_gem_context_set_recoverable(ctx);
437
438         ctx->ring_size = 4 * PAGE_SIZE;
439
440         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
441                 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
442
443         return ctx;
444
445 err_free:
446         kfree(ctx);
447         return ERR_PTR(err);
448 }
449
450 static struct i915_address_space *
451 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
452 {
453         struct i915_address_space *old = ctx->vm;
454         struct i915_gem_engines_iter it;
455         struct intel_context *ce;
456
457         GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
458
459         ctx->vm = i915_vm_get(vm);
460
461         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
462                 i915_vm_put(ce->vm);
463                 ce->vm = i915_vm_get(vm);
464         }
465         i915_gem_context_unlock_engines(ctx);
466
467         return old;
468 }
469
470 static void __assign_ppgtt(struct i915_gem_context *ctx,
471                            struct i915_address_space *vm)
472 {
473         if (vm == ctx->vm)
474                 return;
475
476         vm = __set_ppgtt(ctx, vm);
477         if (vm)
478                 i915_vm_put(vm);
479 }
480
481 static struct i915_gem_context *
482 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
483 {
484         struct i915_gem_context *ctx;
485
486         lockdep_assert_held(&dev_priv->drm.struct_mutex);
487
488         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
489             !HAS_EXECLISTS(dev_priv))
490                 return ERR_PTR(-EINVAL);
491
492         /* Reap the most stale context */
493         contexts_free_first(dev_priv);
494
495         ctx = __create_context(dev_priv);
496         if (IS_ERR(ctx))
497                 return ctx;
498
499         if (HAS_FULL_PPGTT(dev_priv)) {
500                 struct i915_ppgtt *ppgtt;
501
502                 ppgtt = i915_ppgtt_create(dev_priv);
503                 if (IS_ERR(ppgtt)) {
504                         DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
505                                          PTR_ERR(ppgtt));
506                         context_close(ctx);
507                         return ERR_CAST(ppgtt);
508                 }
509
510                 __assign_ppgtt(ctx, &ppgtt->vm);
511                 i915_vm_put(&ppgtt->vm);
512         }
513
514         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
515                 struct intel_timeline *timeline;
516
517                 timeline = intel_timeline_create(&dev_priv->gt, NULL);
518                 if (IS_ERR(timeline)) {
519                         context_close(ctx);
520                         return ERR_CAST(timeline);
521                 }
522
523                 ctx->timeline = timeline;
524         }
525
526         trace_i915_context_create(ctx);
527
528         return ctx;
529 }
530
531 /**
532  * i915_gem_context_create_gvt - create a GVT GEM context
533  * @dev: drm device *
534  *
535  * This function is used to create a GVT specific GEM context.
536  *
537  * Returns:
538  * pointer to i915_gem_context on success, error pointer if failed
539  *
540  */
541 struct i915_gem_context *
542 i915_gem_context_create_gvt(struct drm_device *dev)
543 {
544         struct i915_gem_context *ctx;
545         int ret;
546
547         if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
548                 return ERR_PTR(-ENODEV);
549
550         ret = i915_mutex_lock_interruptible(dev);
551         if (ret)
552                 return ERR_PTR(ret);
553
554         ctx = i915_gem_create_context(to_i915(dev), 0);
555         if (IS_ERR(ctx))
556                 goto out;
557
558         ret = i915_gem_context_pin_hw_id(ctx);
559         if (ret) {
560                 context_close(ctx);
561                 ctx = ERR_PTR(ret);
562                 goto out;
563         }
564
565         ctx->file_priv = ERR_PTR(-EBADF);
566         i915_gem_context_set_closed(ctx); /* not user accessible */
567         i915_gem_context_clear_bannable(ctx);
568         i915_gem_context_set_force_single_submission(ctx);
569         if (!USES_GUC_SUBMISSION(to_i915(dev)))
570                 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
571
572         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
573 out:
574         mutex_unlock(&dev->struct_mutex);
575         return ctx;
576 }
577
578 static void
579 destroy_kernel_context(struct i915_gem_context **ctxp)
580 {
581         struct i915_gem_context *ctx;
582
583         /* Keep the context ref so that we can free it immediately ourselves */
584         ctx = i915_gem_context_get(fetch_and_zero(ctxp));
585         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
586
587         context_close(ctx);
588         i915_gem_context_free(ctx);
589 }
590
591 struct i915_gem_context *
592 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
593 {
594         struct i915_gem_context *ctx;
595         int err;
596
597         ctx = i915_gem_create_context(i915, 0);
598         if (IS_ERR(ctx))
599                 return ctx;
600
601         err = i915_gem_context_pin_hw_id(ctx);
602         if (err) {
603                 destroy_kernel_context(&ctx);
604                 return ERR_PTR(err);
605         }
606
607         i915_gem_context_clear_bannable(ctx);
608         ctx->sched.priority = I915_USER_PRIORITY(prio);
609         ctx->ring_size = PAGE_SIZE;
610
611         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
612
613         return ctx;
614 }
615
616 static void init_contexts(struct drm_i915_private *i915)
617 {
618         mutex_init(&i915->contexts.mutex);
619         INIT_LIST_HEAD(&i915->contexts.list);
620
621         /* Using the simple ida interface, the max is limited by sizeof(int) */
622         BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
623         BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
624         ida_init(&i915->contexts.hw_ida);
625         INIT_LIST_HEAD(&i915->contexts.hw_id_list);
626
627         INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
628         init_llist_head(&i915->contexts.free_list);
629 }
630
631 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
632 {
633         struct i915_gem_context *ctx;
634
635         /* Reassure ourselves we are only called once */
636         GEM_BUG_ON(dev_priv->kernel_context);
637
638         init_contexts(dev_priv);
639
640         /* lowest priority; idle task */
641         ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
642         if (IS_ERR(ctx)) {
643                 DRM_ERROR("Failed to create default global context\n");
644                 return PTR_ERR(ctx);
645         }
646         /*
647          * For easy recognisablity, we want the kernel context to be 0 and then
648          * all user contexts will have non-zero hw_id. Kernel contexts are
649          * permanently pinned, so that we never suffer a stall and can
650          * use them from any allocation context (e.g. for evicting other
651          * contexts and from inside the shrinker).
652          */
653         GEM_BUG_ON(ctx->hw_id);
654         GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
655         dev_priv->kernel_context = ctx;
656
657         DRM_DEBUG_DRIVER("%s context support initialized\n",
658                          DRIVER_CAPS(dev_priv)->has_logical_contexts ?
659                          "logical" : "fake");
660         return 0;
661 }
662
663 void i915_gem_contexts_fini(struct drm_i915_private *i915)
664 {
665         lockdep_assert_held(&i915->drm.struct_mutex);
666
667         destroy_kernel_context(&i915->kernel_context);
668
669         /* Must free all deferred contexts (via flush_workqueue) first */
670         GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
671         ida_destroy(&i915->contexts.hw_ida);
672 }
673
674 static int context_idr_cleanup(int id, void *p, void *data)
675 {
676         context_close(p);
677         return 0;
678 }
679
680 static int vm_idr_cleanup(int id, void *p, void *data)
681 {
682         i915_vm_put(p);
683         return 0;
684 }
685
686 static int gem_context_register(struct i915_gem_context *ctx,
687                                 struct drm_i915_file_private *fpriv)
688 {
689         int ret;
690
691         ctx->file_priv = fpriv;
692         if (ctx->vm)
693                 ctx->vm->file = fpriv;
694
695         ctx->pid = get_task_pid(current, PIDTYPE_PID);
696         ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
697                               current->comm, pid_nr(ctx->pid));
698         if (!ctx->name) {
699                 ret = -ENOMEM;
700                 goto err_pid;
701         }
702
703         /* And finally expose ourselves to userspace via the idr */
704         mutex_lock(&fpriv->context_idr_lock);
705         ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
706         mutex_unlock(&fpriv->context_idr_lock);
707         if (ret >= 0)
708                 goto out;
709
710         kfree(fetch_and_zero(&ctx->name));
711 err_pid:
712         put_pid(fetch_and_zero(&ctx->pid));
713 out:
714         return ret;
715 }
716
717 int i915_gem_context_open(struct drm_i915_private *i915,
718                           struct drm_file *file)
719 {
720         struct drm_i915_file_private *file_priv = file->driver_priv;
721         struct i915_gem_context *ctx;
722         int err;
723
724         mutex_init(&file_priv->context_idr_lock);
725         mutex_init(&file_priv->vm_idr_lock);
726
727         idr_init(&file_priv->context_idr);
728         idr_init_base(&file_priv->vm_idr, 1);
729
730         mutex_lock(&i915->drm.struct_mutex);
731         ctx = i915_gem_create_context(i915, 0);
732         mutex_unlock(&i915->drm.struct_mutex);
733         if (IS_ERR(ctx)) {
734                 err = PTR_ERR(ctx);
735                 goto err;
736         }
737
738         err = gem_context_register(ctx, file_priv);
739         if (err < 0)
740                 goto err_ctx;
741
742         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
743         GEM_BUG_ON(err > 0);
744
745         return 0;
746
747 err_ctx:
748         context_close(ctx);
749 err:
750         idr_destroy(&file_priv->vm_idr);
751         idr_destroy(&file_priv->context_idr);
752         mutex_destroy(&file_priv->vm_idr_lock);
753         mutex_destroy(&file_priv->context_idr_lock);
754         return err;
755 }
756
757 void i915_gem_context_close(struct drm_file *file)
758 {
759         struct drm_i915_file_private *file_priv = file->driver_priv;
760
761         idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
762         idr_destroy(&file_priv->context_idr);
763         mutex_destroy(&file_priv->context_idr_lock);
764
765         idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
766         idr_destroy(&file_priv->vm_idr);
767         mutex_destroy(&file_priv->vm_idr_lock);
768 }
769
770 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
771                              struct drm_file *file)
772 {
773         struct drm_i915_private *i915 = to_i915(dev);
774         struct drm_i915_gem_vm_control *args = data;
775         struct drm_i915_file_private *file_priv = file->driver_priv;
776         struct i915_ppgtt *ppgtt;
777         int err;
778
779         if (!HAS_FULL_PPGTT(i915))
780                 return -ENODEV;
781
782         if (args->flags)
783                 return -EINVAL;
784
785         ppgtt = i915_ppgtt_create(i915);
786         if (IS_ERR(ppgtt))
787                 return PTR_ERR(ppgtt);
788
789         ppgtt->vm.file = file_priv;
790
791         if (args->extensions) {
792                 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
793                                            NULL, 0,
794                                            ppgtt);
795                 if (err)
796                         goto err_put;
797         }
798
799         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
800         if (err)
801                 goto err_put;
802
803         err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
804         if (err < 0)
805                 goto err_unlock;
806
807         GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
808
809         mutex_unlock(&file_priv->vm_idr_lock);
810
811         args->vm_id = err;
812         return 0;
813
814 err_unlock:
815         mutex_unlock(&file_priv->vm_idr_lock);
816 err_put:
817         i915_vm_put(&ppgtt->vm);
818         return err;
819 }
820
821 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
822                               struct drm_file *file)
823 {
824         struct drm_i915_file_private *file_priv = file->driver_priv;
825         struct drm_i915_gem_vm_control *args = data;
826         struct i915_address_space *vm;
827         int err;
828         u32 id;
829
830         if (args->flags)
831                 return -EINVAL;
832
833         if (args->extensions)
834                 return -EINVAL;
835
836         id = args->vm_id;
837         if (!id)
838                 return -ENOENT;
839
840         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
841         if (err)
842                 return err;
843
844         vm = idr_remove(&file_priv->vm_idr, id);
845
846         mutex_unlock(&file_priv->vm_idr_lock);
847         if (!vm)
848                 return -ENOENT;
849
850         i915_vm_put(vm);
851         return 0;
852 }
853
854 struct context_barrier_task {
855         struct i915_active base;
856         void (*task)(void *data);
857         void *data;
858 };
859
860 static void cb_retire(struct i915_active *base)
861 {
862         struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
863
864         if (cb->task)
865                 cb->task(cb->data);
866
867         i915_active_fini(&cb->base);
868         kfree(cb);
869 }
870
871 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
872 static int context_barrier_task(struct i915_gem_context *ctx,
873                                 intel_engine_mask_t engines,
874                                 bool (*skip)(struct intel_context *ce, void *data),
875                                 int (*emit)(struct i915_request *rq, void *data),
876                                 void (*task)(void *data),
877                                 void *data)
878 {
879         struct drm_i915_private *i915 = ctx->i915;
880         struct context_barrier_task *cb;
881         struct i915_gem_engines_iter it;
882         struct intel_context *ce;
883         int err = 0;
884
885         lockdep_assert_held(&i915->drm.struct_mutex);
886         GEM_BUG_ON(!task);
887
888         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
889         if (!cb)
890                 return -ENOMEM;
891
892         i915_active_init(i915, &cb->base, NULL, cb_retire);
893         err = i915_active_acquire(&cb->base);
894         if (err) {
895                 kfree(cb);
896                 return err;
897         }
898
899         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
900                 struct i915_request *rq;
901
902                 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
903                                        ce->engine->mask)) {
904                         err = -ENXIO;
905                         break;
906                 }
907
908                 if (!(ce->engine->mask & engines))
909                         continue;
910
911                 if (skip && skip(ce, data))
912                         continue;
913
914                 rq = intel_context_create_request(ce);
915                 if (IS_ERR(rq)) {
916                         err = PTR_ERR(rq);
917                         break;
918                 }
919
920                 err = 0;
921                 if (emit)
922                         err = emit(rq, data);
923                 if (err == 0)
924                         err = i915_active_ref(&cb->base, rq->fence.context, rq);
925
926                 i915_request_add(rq);
927                 if (err)
928                         break;
929         }
930         i915_gem_context_unlock_engines(ctx);
931
932         cb->task = err ? NULL : task; /* caller needs to unwind instead */
933         cb->data = data;
934
935         i915_active_release(&cb->base);
936
937         return err;
938 }
939
940 static int get_ppgtt(struct drm_i915_file_private *file_priv,
941                      struct i915_gem_context *ctx,
942                      struct drm_i915_gem_context_param *args)
943 {
944         struct i915_address_space *vm;
945         int ret;
946
947         if (!ctx->vm)
948                 return -ENODEV;
949
950         /* XXX rcu acquire? */
951         ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
952         if (ret)
953                 return ret;
954
955         vm = i915_vm_get(ctx->vm);
956         mutex_unlock(&ctx->i915->drm.struct_mutex);
957
958         ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
959         if (ret)
960                 goto err_put;
961
962         ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
963         GEM_BUG_ON(!ret);
964         if (ret < 0)
965                 goto err_unlock;
966
967         i915_vm_get(vm);
968
969         args->size = 0;
970         args->value = ret;
971
972         ret = 0;
973 err_unlock:
974         mutex_unlock(&file_priv->vm_idr_lock);
975 err_put:
976         i915_vm_put(vm);
977         return ret;
978 }
979
980 static void set_ppgtt_barrier(void *data)
981 {
982         struct i915_address_space *old = data;
983
984         if (INTEL_GEN(old->i915) < 8)
985                 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
986
987         i915_vm_put(old);
988 }
989
990 static int emit_ppgtt_update(struct i915_request *rq, void *data)
991 {
992         struct i915_address_space *vm = rq->hw_context->vm;
993         struct intel_engine_cs *engine = rq->engine;
994         u32 base = engine->mmio_base;
995         u32 *cs;
996         int i;
997
998         if (i915_vm_is_4lvl(vm)) {
999                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1000                 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1001
1002                 cs = intel_ring_begin(rq, 6);
1003                 if (IS_ERR(cs))
1004                         return PTR_ERR(cs);
1005
1006                 *cs++ = MI_LOAD_REGISTER_IMM(2);
1007
1008                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1009                 *cs++ = upper_32_bits(pd_daddr);
1010                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1011                 *cs++ = lower_32_bits(pd_daddr);
1012
1013                 *cs++ = MI_NOOP;
1014                 intel_ring_advance(rq, cs);
1015         } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1016                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1017
1018                 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1019                 if (IS_ERR(cs))
1020                         return PTR_ERR(cs);
1021
1022                 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1023                 for (i = GEN8_3LVL_PDPES; i--; ) {
1024                         const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1025
1026                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1027                         *cs++ = upper_32_bits(pd_daddr);
1028                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1029                         *cs++ = lower_32_bits(pd_daddr);
1030                 }
1031                 *cs++ = MI_NOOP;
1032                 intel_ring_advance(rq, cs);
1033         } else {
1034                 /* ppGTT is not part of the legacy context image */
1035                 gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
1036         }
1037
1038         return 0;
1039 }
1040
1041 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1042 {
1043         if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1044                 return !ce->state;
1045         else
1046                 return !atomic_read(&ce->pin_count);
1047 }
1048
1049 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1050                      struct i915_gem_context *ctx,
1051                      struct drm_i915_gem_context_param *args)
1052 {
1053         struct i915_address_space *vm, *old;
1054         int err;
1055
1056         if (args->size)
1057                 return -EINVAL;
1058
1059         if (!ctx->vm)
1060                 return -ENODEV;
1061
1062         if (upper_32_bits(args->value))
1063                 return -ENOENT;
1064
1065         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1066         if (err)
1067                 return err;
1068
1069         vm = idr_find(&file_priv->vm_idr, args->value);
1070         if (vm)
1071                 i915_vm_get(vm);
1072         mutex_unlock(&file_priv->vm_idr_lock);
1073         if (!vm)
1074                 return -ENOENT;
1075
1076         err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1077         if (err)
1078                 goto out;
1079
1080         if (vm == ctx->vm)
1081                 goto unlock;
1082
1083         /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1084         mutex_lock(&ctx->mutex);
1085         lut_close(ctx);
1086         mutex_unlock(&ctx->mutex);
1087
1088         old = __set_ppgtt(ctx, vm);
1089
1090         /*
1091          * We need to flush any requests using the current ppgtt before
1092          * we release it as the requests do not hold a reference themselves,
1093          * only indirectly through the context.
1094          */
1095         err = context_barrier_task(ctx, ALL_ENGINES,
1096                                    skip_ppgtt_update,
1097                                    emit_ppgtt_update,
1098                                    set_ppgtt_barrier,
1099                                    old);
1100         if (err) {
1101                 i915_vm_put(__set_ppgtt(ctx, old));
1102                 i915_vm_put(old);
1103         }
1104
1105 unlock:
1106         mutex_unlock(&ctx->i915->drm.struct_mutex);
1107
1108 out:
1109         i915_vm_put(vm);
1110         return err;
1111 }
1112
1113 static int gen8_emit_rpcs_config(struct i915_request *rq,
1114                                  struct intel_context *ce,
1115                                  struct intel_sseu sseu)
1116 {
1117         u64 offset;
1118         u32 *cs;
1119
1120         cs = intel_ring_begin(rq, 4);
1121         if (IS_ERR(cs))
1122                 return PTR_ERR(cs);
1123
1124         offset = i915_ggtt_offset(ce->state) +
1125                  LRC_STATE_PN * PAGE_SIZE +
1126                  (CTX_R_PWR_CLK_STATE + 1) * 4;
1127
1128         *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1129         *cs++ = lower_32_bits(offset);
1130         *cs++ = upper_32_bits(offset);
1131         *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1132
1133         intel_ring_advance(rq, cs);
1134
1135         return 0;
1136 }
1137
1138 static int
1139 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1140 {
1141         struct i915_request *rq;
1142         int ret;
1143
1144         lockdep_assert_held(&ce->pin_mutex);
1145
1146         /*
1147          * If the context is not idle, we have to submit an ordered request to
1148          * modify its context image via the kernel context (writing to our own
1149          * image, or into the registers directory, does not stick). Pristine
1150          * and idle contexts will be configured on pinning.
1151          */
1152         if (!intel_context_is_pinned(ce))
1153                 return 0;
1154
1155         rq = i915_request_create(ce->engine->kernel_context);
1156         if (IS_ERR(rq))
1157                 return PTR_ERR(rq);
1158
1159         /* Serialise with the remote context */
1160         ret = intel_context_prepare_remote_request(ce, rq);
1161         if (ret == 0)
1162                 ret = gen8_emit_rpcs_config(rq, ce, sseu);
1163
1164         i915_request_add(rq);
1165         return ret;
1166 }
1167
1168 static int
1169 __intel_context_reconfigure_sseu(struct intel_context *ce,
1170                                  struct intel_sseu sseu)
1171 {
1172         int ret;
1173
1174         GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
1175
1176         ret = intel_context_lock_pinned(ce);
1177         if (ret)
1178                 return ret;
1179
1180         /* Nothing to do if unmodified. */
1181         if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1182                 goto unlock;
1183
1184         ret = gen8_modify_rpcs(ce, sseu);
1185         if (!ret)
1186                 ce->sseu = sseu;
1187
1188 unlock:
1189         intel_context_unlock_pinned(ce);
1190         return ret;
1191 }
1192
1193 static int
1194 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1195 {
1196         struct drm_i915_private *i915 = ce->engine->i915;
1197         int ret;
1198
1199         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1200         if (ret)
1201                 return ret;
1202
1203         ret = __intel_context_reconfigure_sseu(ce, sseu);
1204
1205         mutex_unlock(&i915->drm.struct_mutex);
1206
1207         return ret;
1208 }
1209
1210 static int
1211 user_to_context_sseu(struct drm_i915_private *i915,
1212                      const struct drm_i915_gem_context_param_sseu *user,
1213                      struct intel_sseu *context)
1214 {
1215         const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1216
1217         /* No zeros in any field. */
1218         if (!user->slice_mask || !user->subslice_mask ||
1219             !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1220                 return -EINVAL;
1221
1222         /* Max > min. */
1223         if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1224                 return -EINVAL;
1225
1226         /*
1227          * Some future proofing on the types since the uAPI is wider than the
1228          * current internal implementation.
1229          */
1230         if (overflows_type(user->slice_mask, context->slice_mask) ||
1231             overflows_type(user->subslice_mask, context->subslice_mask) ||
1232             overflows_type(user->min_eus_per_subslice,
1233                            context->min_eus_per_subslice) ||
1234             overflows_type(user->max_eus_per_subslice,
1235                            context->max_eus_per_subslice))
1236                 return -EINVAL;
1237
1238         /* Check validity against hardware. */
1239         if (user->slice_mask & ~device->slice_mask)
1240                 return -EINVAL;
1241
1242         if (user->subslice_mask & ~device->subslice_mask[0])
1243                 return -EINVAL;
1244
1245         if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1246                 return -EINVAL;
1247
1248         context->slice_mask = user->slice_mask;
1249         context->subslice_mask = user->subslice_mask;
1250         context->min_eus_per_subslice = user->min_eus_per_subslice;
1251         context->max_eus_per_subslice = user->max_eus_per_subslice;
1252
1253         /* Part specific restrictions. */
1254         if (IS_GEN(i915, 11)) {
1255                 unsigned int hw_s = hweight8(device->slice_mask);
1256                 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1257                 unsigned int req_s = hweight8(context->slice_mask);
1258                 unsigned int req_ss = hweight8(context->subslice_mask);
1259
1260                 /*
1261                  * Only full subslice enablement is possible if more than one
1262                  * slice is turned on.
1263                  */
1264                 if (req_s > 1 && req_ss != hw_ss_per_s)
1265                         return -EINVAL;
1266
1267                 /*
1268                  * If more than four (SScount bitfield limit) subslices are
1269                  * requested then the number has to be even.
1270                  */
1271                 if (req_ss > 4 && (req_ss & 1))
1272                         return -EINVAL;
1273
1274                 /*
1275                  * If only one slice is enabled and subslice count is below the
1276                  * device full enablement, it must be at most half of the all
1277                  * available subslices.
1278                  */
1279                 if (req_s == 1 && req_ss < hw_ss_per_s &&
1280                     req_ss > (hw_ss_per_s / 2))
1281                         return -EINVAL;
1282
1283                 /* ABI restriction - VME use case only. */
1284
1285                 /* All slices or one slice only. */
1286                 if (req_s != 1 && req_s != hw_s)
1287                         return -EINVAL;
1288
1289                 /*
1290                  * Half subslices or full enablement only when one slice is
1291                  * enabled.
1292                  */
1293                 if (req_s == 1 &&
1294                     (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1295                         return -EINVAL;
1296
1297                 /* No EU configuration changes. */
1298                 if ((user->min_eus_per_subslice !=
1299                      device->max_eus_per_subslice) ||
1300                     (user->max_eus_per_subslice !=
1301                      device->max_eus_per_subslice))
1302                         return -EINVAL;
1303         }
1304
1305         return 0;
1306 }
1307
1308 static int set_sseu(struct i915_gem_context *ctx,
1309                     struct drm_i915_gem_context_param *args)
1310 {
1311         struct drm_i915_private *i915 = ctx->i915;
1312         struct drm_i915_gem_context_param_sseu user_sseu;
1313         struct intel_context *ce;
1314         struct intel_sseu sseu;
1315         unsigned long lookup;
1316         int ret;
1317
1318         if (args->size < sizeof(user_sseu))
1319                 return -EINVAL;
1320
1321         if (!IS_GEN(i915, 11))
1322                 return -ENODEV;
1323
1324         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1325                            sizeof(user_sseu)))
1326                 return -EFAULT;
1327
1328         if (user_sseu.rsvd)
1329                 return -EINVAL;
1330
1331         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1332                 return -EINVAL;
1333
1334         lookup = 0;
1335         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1336                 lookup |= LOOKUP_USER_INDEX;
1337
1338         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1339         if (IS_ERR(ce))
1340                 return PTR_ERR(ce);
1341
1342         /* Only render engine supports RPCS configuration. */
1343         if (ce->engine->class != RENDER_CLASS) {
1344                 ret = -ENODEV;
1345                 goto out_ce;
1346         }
1347
1348         ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1349         if (ret)
1350                 goto out_ce;
1351
1352         ret = intel_context_reconfigure_sseu(ce, sseu);
1353         if (ret)
1354                 goto out_ce;
1355
1356         args->size = sizeof(user_sseu);
1357
1358 out_ce:
1359         intel_context_put(ce);
1360         return ret;
1361 }
1362
1363 struct set_engines {
1364         struct i915_gem_context *ctx;
1365         struct i915_gem_engines *engines;
1366 };
1367
1368 static int
1369 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1370 {
1371         struct i915_context_engines_load_balance __user *ext =
1372                 container_of_user(base, typeof(*ext), base);
1373         const struct set_engines *set = data;
1374         struct intel_engine_cs *stack[16];
1375         struct intel_engine_cs **siblings;
1376         struct intel_context *ce;
1377         u16 num_siblings, idx;
1378         unsigned int n;
1379         int err;
1380
1381         if (!HAS_EXECLISTS(set->ctx->i915))
1382                 return -ENODEV;
1383
1384         if (USES_GUC_SUBMISSION(set->ctx->i915))
1385                 return -ENODEV; /* not implement yet */
1386
1387         if (get_user(idx, &ext->engine_index))
1388                 return -EFAULT;
1389
1390         if (idx >= set->engines->num_engines) {
1391                 DRM_DEBUG("Invalid placement value, %d >= %d\n",
1392                           idx, set->engines->num_engines);
1393                 return -EINVAL;
1394         }
1395
1396         idx = array_index_nospec(idx, set->engines->num_engines);
1397         if (set->engines->engines[idx]) {
1398                 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1399                 return -EEXIST;
1400         }
1401
1402         if (get_user(num_siblings, &ext->num_siblings))
1403                 return -EFAULT;
1404
1405         err = check_user_mbz(&ext->flags);
1406         if (err)
1407                 return err;
1408
1409         err = check_user_mbz(&ext->mbz64);
1410         if (err)
1411                 return err;
1412
1413         siblings = stack;
1414         if (num_siblings > ARRAY_SIZE(stack)) {
1415                 siblings = kmalloc_array(num_siblings,
1416                                          sizeof(*siblings),
1417                                          GFP_KERNEL);
1418                 if (!siblings)
1419                         return -ENOMEM;
1420         }
1421
1422         for (n = 0; n < num_siblings; n++) {
1423                 struct i915_engine_class_instance ci;
1424
1425                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1426                         err = -EFAULT;
1427                         goto out_siblings;
1428                 }
1429
1430                 siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1431                                                        ci.engine_class,
1432                                                        ci.engine_instance);
1433                 if (!siblings[n]) {
1434                         DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1435                                   n, ci.engine_class, ci.engine_instance);
1436                         err = -EINVAL;
1437                         goto out_siblings;
1438                 }
1439         }
1440
1441         ce = intel_execlists_create_virtual(set->ctx, siblings, n);
1442         if (IS_ERR(ce)) {
1443                 err = PTR_ERR(ce);
1444                 goto out_siblings;
1445         }
1446
1447         if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1448                 intel_context_put(ce);
1449                 err = -EEXIST;
1450                 goto out_siblings;
1451         }
1452
1453 out_siblings:
1454         if (siblings != stack)
1455                 kfree(siblings);
1456
1457         return err;
1458 }
1459
1460 static int
1461 set_engines__bond(struct i915_user_extension __user *base, void *data)
1462 {
1463         struct i915_context_engines_bond __user *ext =
1464                 container_of_user(base, typeof(*ext), base);
1465         const struct set_engines *set = data;
1466         struct i915_engine_class_instance ci;
1467         struct intel_engine_cs *virtual;
1468         struct intel_engine_cs *master;
1469         u16 idx, num_bonds;
1470         int err, n;
1471
1472         if (get_user(idx, &ext->virtual_index))
1473                 return -EFAULT;
1474
1475         if (idx >= set->engines->num_engines) {
1476                 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1477                           idx, set->engines->num_engines);
1478                 return -EINVAL;
1479         }
1480
1481         idx = array_index_nospec(idx, set->engines->num_engines);
1482         if (!set->engines->engines[idx]) {
1483                 DRM_DEBUG("Invalid engine at %d\n", idx);
1484                 return -EINVAL;
1485         }
1486         virtual = set->engines->engines[idx]->engine;
1487
1488         err = check_user_mbz(&ext->flags);
1489         if (err)
1490                 return err;
1491
1492         for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1493                 err = check_user_mbz(&ext->mbz64[n]);
1494                 if (err)
1495                         return err;
1496         }
1497
1498         if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1499                 return -EFAULT;
1500
1501         master = intel_engine_lookup_user(set->ctx->i915,
1502                                           ci.engine_class, ci.engine_instance);
1503         if (!master) {
1504                 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1505                           ci.engine_class, ci.engine_instance);
1506                 return -EINVAL;
1507         }
1508
1509         if (get_user(num_bonds, &ext->num_bonds))
1510                 return -EFAULT;
1511
1512         for (n = 0; n < num_bonds; n++) {
1513                 struct intel_engine_cs *bond;
1514
1515                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1516                         return -EFAULT;
1517
1518                 bond = intel_engine_lookup_user(set->ctx->i915,
1519                                                 ci.engine_class,
1520                                                 ci.engine_instance);
1521                 if (!bond) {
1522                         DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1523                                   n, ci.engine_class, ci.engine_instance);
1524                         return -EINVAL;
1525                 }
1526
1527                 /*
1528                  * A non-virtual engine has no siblings to choose between; and
1529                  * a submit fence will always be directed to the one engine.
1530                  */
1531                 if (intel_engine_is_virtual(virtual)) {
1532                         err = intel_virtual_engine_attach_bond(virtual,
1533                                                                master,
1534                                                                bond);
1535                         if (err)
1536                                 return err;
1537                 }
1538         }
1539
1540         return 0;
1541 }
1542
1543 static const i915_user_extension_fn set_engines__extensions[] = {
1544         [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1545         [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1546 };
1547
1548 static int
1549 set_engines(struct i915_gem_context *ctx,
1550             const struct drm_i915_gem_context_param *args)
1551 {
1552         struct i915_context_param_engines __user *user =
1553                 u64_to_user_ptr(args->value);
1554         struct set_engines set = { .ctx = ctx };
1555         unsigned int num_engines, n;
1556         u64 extensions;
1557         int err;
1558
1559         if (!args->size) { /* switch back to legacy user_ring_map */
1560                 if (!i915_gem_context_user_engines(ctx))
1561                         return 0;
1562
1563                 set.engines = default_engines(ctx);
1564                 if (IS_ERR(set.engines))
1565                         return PTR_ERR(set.engines);
1566
1567                 goto replace;
1568         }
1569
1570         BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1571         if (args->size < sizeof(*user) ||
1572             !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1573                 DRM_DEBUG("Invalid size for engine array: %d\n",
1574                           args->size);
1575                 return -EINVAL;
1576         }
1577
1578         /*
1579          * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1580          * first 64 engines defined here.
1581          */
1582         num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1583
1584         set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1585                               GFP_KERNEL);
1586         if (!set.engines)
1587                 return -ENOMEM;
1588
1589         init_rcu_head(&set.engines->rcu);
1590         for (n = 0; n < num_engines; n++) {
1591                 struct i915_engine_class_instance ci;
1592                 struct intel_engine_cs *engine;
1593
1594                 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1595                         __free_engines(set.engines, n);
1596                         return -EFAULT;
1597                 }
1598
1599                 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1600                     ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1601                         set.engines->engines[n] = NULL;
1602                         continue;
1603                 }
1604
1605                 engine = intel_engine_lookup_user(ctx->i915,
1606                                                   ci.engine_class,
1607                                                   ci.engine_instance);
1608                 if (!engine) {
1609                         DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1610                                   n, ci.engine_class, ci.engine_instance);
1611                         __free_engines(set.engines, n);
1612                         return -ENOENT;
1613                 }
1614
1615                 set.engines->engines[n] = intel_context_create(ctx, engine);
1616                 if (!set.engines->engines[n]) {
1617                         __free_engines(set.engines, n);
1618                         return -ENOMEM;
1619                 }
1620         }
1621         set.engines->num_engines = num_engines;
1622
1623         err = -EFAULT;
1624         if (!get_user(extensions, &user->extensions))
1625                 err = i915_user_extensions(u64_to_user_ptr(extensions),
1626                                            set_engines__extensions,
1627                                            ARRAY_SIZE(set_engines__extensions),
1628                                            &set);
1629         if (err) {
1630                 free_engines(set.engines);
1631                 return err;
1632         }
1633
1634 replace:
1635         mutex_lock(&ctx->engines_mutex);
1636         if (args->size)
1637                 i915_gem_context_set_user_engines(ctx);
1638         else
1639                 i915_gem_context_clear_user_engines(ctx);
1640         rcu_swap_protected(ctx->engines, set.engines, 1);
1641         mutex_unlock(&ctx->engines_mutex);
1642
1643         call_rcu(&set.engines->rcu, free_engines_rcu);
1644
1645         return 0;
1646 }
1647
1648 static struct i915_gem_engines *
1649 __copy_engines(struct i915_gem_engines *e)
1650 {
1651         struct i915_gem_engines *copy;
1652         unsigned int n;
1653
1654         copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1655         if (!copy)
1656                 return ERR_PTR(-ENOMEM);
1657
1658         init_rcu_head(&copy->rcu);
1659         for (n = 0; n < e->num_engines; n++) {
1660                 if (e->engines[n])
1661                         copy->engines[n] = intel_context_get(e->engines[n]);
1662                 else
1663                         copy->engines[n] = NULL;
1664         }
1665         copy->num_engines = n;
1666
1667         return copy;
1668 }
1669
1670 static int
1671 get_engines(struct i915_gem_context *ctx,
1672             struct drm_i915_gem_context_param *args)
1673 {
1674         struct i915_context_param_engines __user *user;
1675         struct i915_gem_engines *e;
1676         size_t n, count, size;
1677         int err = 0;
1678
1679         err = mutex_lock_interruptible(&ctx->engines_mutex);
1680         if (err)
1681                 return err;
1682
1683         e = NULL;
1684         if (i915_gem_context_user_engines(ctx))
1685                 e = __copy_engines(i915_gem_context_engines(ctx));
1686         mutex_unlock(&ctx->engines_mutex);
1687         if (IS_ERR_OR_NULL(e)) {
1688                 args->size = 0;
1689                 return PTR_ERR_OR_ZERO(e);
1690         }
1691
1692         count = e->num_engines;
1693
1694         /* Be paranoid in case we have an impedance mismatch */
1695         if (!check_struct_size(user, engines, count, &size)) {
1696                 err = -EINVAL;
1697                 goto err_free;
1698         }
1699         if (overflows_type(size, args->size)) {
1700                 err = -EINVAL;
1701                 goto err_free;
1702         }
1703
1704         if (!args->size) {
1705                 args->size = size;
1706                 goto err_free;
1707         }
1708
1709         if (args->size < size) {
1710                 err = -EINVAL;
1711                 goto err_free;
1712         }
1713
1714         user = u64_to_user_ptr(args->value);
1715         if (!access_ok(user, size)) {
1716                 err = -EFAULT;
1717                 goto err_free;
1718         }
1719
1720         if (put_user(0, &user->extensions)) {
1721                 err = -EFAULT;
1722                 goto err_free;
1723         }
1724
1725         for (n = 0; n < count; n++) {
1726                 struct i915_engine_class_instance ci = {
1727                         .engine_class = I915_ENGINE_CLASS_INVALID,
1728                         .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1729                 };
1730
1731                 if (e->engines[n]) {
1732                         ci.engine_class = e->engines[n]->engine->uabi_class;
1733                         ci.engine_instance = e->engines[n]->engine->uabi_instance;
1734                 }
1735
1736                 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1737                         err = -EFAULT;
1738                         goto err_free;
1739                 }
1740         }
1741
1742         args->size = size;
1743
1744 err_free:
1745         free_engines(e);
1746         return err;
1747 }
1748
1749 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1750                         struct i915_gem_context *ctx,
1751                         struct drm_i915_gem_context_param *args)
1752 {
1753         int ret = 0;
1754
1755         switch (args->param) {
1756         case I915_CONTEXT_PARAM_NO_ZEROMAP:
1757                 if (args->size)
1758                         ret = -EINVAL;
1759                 else if (args->value)
1760                         set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1761                 else
1762                         clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1763                 break;
1764
1765         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1766                 if (args->size)
1767                         ret = -EINVAL;
1768                 else if (args->value)
1769                         i915_gem_context_set_no_error_capture(ctx);
1770                 else
1771                         i915_gem_context_clear_no_error_capture(ctx);
1772                 break;
1773
1774         case I915_CONTEXT_PARAM_BANNABLE:
1775                 if (args->size)
1776                         ret = -EINVAL;
1777                 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1778                         ret = -EPERM;
1779                 else if (args->value)
1780                         i915_gem_context_set_bannable(ctx);
1781                 else
1782                         i915_gem_context_clear_bannable(ctx);
1783                 break;
1784
1785         case I915_CONTEXT_PARAM_RECOVERABLE:
1786                 if (args->size)
1787                         ret = -EINVAL;
1788                 else if (args->value)
1789                         i915_gem_context_set_recoverable(ctx);
1790                 else
1791                         i915_gem_context_clear_recoverable(ctx);
1792                 break;
1793
1794         case I915_CONTEXT_PARAM_PRIORITY:
1795                 {
1796                         s64 priority = args->value;
1797
1798                         if (args->size)
1799                                 ret = -EINVAL;
1800                         else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1801                                 ret = -ENODEV;
1802                         else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1803                                  priority < I915_CONTEXT_MIN_USER_PRIORITY)
1804                                 ret = -EINVAL;
1805                         else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1806                                  !capable(CAP_SYS_NICE))
1807                                 ret = -EPERM;
1808                         else
1809                                 ctx->sched.priority =
1810                                         I915_USER_PRIORITY(priority);
1811                 }
1812                 break;
1813
1814         case I915_CONTEXT_PARAM_SSEU:
1815                 ret = set_sseu(ctx, args);
1816                 break;
1817
1818         case I915_CONTEXT_PARAM_VM:
1819                 ret = set_ppgtt(fpriv, ctx, args);
1820                 break;
1821
1822         case I915_CONTEXT_PARAM_ENGINES:
1823                 ret = set_engines(ctx, args);
1824                 break;
1825
1826         case I915_CONTEXT_PARAM_BAN_PERIOD:
1827         default:
1828                 ret = -EINVAL;
1829                 break;
1830         }
1831
1832         return ret;
1833 }
1834
1835 struct create_ext {
1836         struct i915_gem_context *ctx;
1837         struct drm_i915_file_private *fpriv;
1838 };
1839
1840 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1841 {
1842         struct drm_i915_gem_context_create_ext_setparam local;
1843         const struct create_ext *arg = data;
1844
1845         if (copy_from_user(&local, ext, sizeof(local)))
1846                 return -EFAULT;
1847
1848         if (local.param.ctx_id)
1849                 return -EINVAL;
1850
1851         return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1852 }
1853
1854 static int clone_engines(struct i915_gem_context *dst,
1855                          struct i915_gem_context *src)
1856 {
1857         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1858         struct i915_gem_engines *clone;
1859         bool user_engines;
1860         unsigned long n;
1861
1862         clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1863         if (!clone)
1864                 goto err_unlock;
1865
1866         init_rcu_head(&clone->rcu);
1867         for (n = 0; n < e->num_engines; n++) {
1868                 struct intel_engine_cs *engine;
1869
1870                 if (!e->engines[n]) {
1871                         clone->engines[n] = NULL;
1872                         continue;
1873                 }
1874                 engine = e->engines[n]->engine;
1875
1876                 /*
1877                  * Virtual engines are singletons; they can only exist
1878                  * inside a single context, because they embed their
1879                  * HW context... As each virtual context implies a single
1880                  * timeline (each engine can only dequeue a single request
1881                  * at any time), it would be surprising for two contexts
1882                  * to use the same engine. So let's create a copy of
1883                  * the virtual engine instead.
1884                  */
1885                 if (intel_engine_is_virtual(engine))
1886                         clone->engines[n] =
1887                                 intel_execlists_clone_virtual(dst, engine);
1888                 else
1889                         clone->engines[n] = intel_context_create(dst, engine);
1890                 if (IS_ERR_OR_NULL(clone->engines[n])) {
1891                         __free_engines(clone, n);
1892                         goto err_unlock;
1893                 }
1894         }
1895         clone->num_engines = n;
1896
1897         user_engines = i915_gem_context_user_engines(src);
1898         i915_gem_context_unlock_engines(src);
1899
1900         free_engines(dst->engines);
1901         RCU_INIT_POINTER(dst->engines, clone);
1902         if (user_engines)
1903                 i915_gem_context_set_user_engines(dst);
1904         else
1905                 i915_gem_context_clear_user_engines(dst);
1906         return 0;
1907
1908 err_unlock:
1909         i915_gem_context_unlock_engines(src);
1910         return -ENOMEM;
1911 }
1912
1913 static int clone_flags(struct i915_gem_context *dst,
1914                        struct i915_gem_context *src)
1915 {
1916         dst->user_flags = src->user_flags;
1917         return 0;
1918 }
1919
1920 static int clone_schedattr(struct i915_gem_context *dst,
1921                            struct i915_gem_context *src)
1922 {
1923         dst->sched = src->sched;
1924         return 0;
1925 }
1926
1927 static int clone_sseu(struct i915_gem_context *dst,
1928                       struct i915_gem_context *src)
1929 {
1930         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1931         struct i915_gem_engines *clone;
1932         unsigned long n;
1933         int err;
1934
1935         clone = dst->engines; /* no locking required; sole access */
1936         if (e->num_engines != clone->num_engines) {
1937                 err = -EINVAL;
1938                 goto unlock;
1939         }
1940
1941         for (n = 0; n < e->num_engines; n++) {
1942                 struct intel_context *ce = e->engines[n];
1943
1944                 if (clone->engines[n]->engine->class != ce->engine->class) {
1945                         /* Must have compatible engine maps! */
1946                         err = -EINVAL;
1947                         goto unlock;
1948                 }
1949
1950                 /* serialises with set_sseu */
1951                 err = intel_context_lock_pinned(ce);
1952                 if (err)
1953                         goto unlock;
1954
1955                 clone->engines[n]->sseu = ce->sseu;
1956                 intel_context_unlock_pinned(ce);
1957         }
1958
1959         err = 0;
1960 unlock:
1961         i915_gem_context_unlock_engines(src);
1962         return err;
1963 }
1964
1965 static int clone_timeline(struct i915_gem_context *dst,
1966                           struct i915_gem_context *src)
1967 {
1968         if (src->timeline) {
1969                 GEM_BUG_ON(src->timeline == dst->timeline);
1970
1971                 if (dst->timeline)
1972                         intel_timeline_put(dst->timeline);
1973                 dst->timeline = intel_timeline_get(src->timeline);
1974         }
1975
1976         return 0;
1977 }
1978
1979 static int clone_vm(struct i915_gem_context *dst,
1980                     struct i915_gem_context *src)
1981 {
1982         struct i915_address_space *vm;
1983
1984         rcu_read_lock();
1985         do {
1986                 vm = READ_ONCE(src->vm);
1987                 if (!vm)
1988                         break;
1989
1990                 if (!kref_get_unless_zero(&vm->ref))
1991                         continue;
1992
1993                 /*
1994                  * This ppgtt may have be reallocated between
1995                  * the read and the kref, and reassigned to a third
1996                  * context. In order to avoid inadvertent sharing
1997                  * of this ppgtt with that third context (and not
1998                  * src), we have to confirm that we have the same
1999                  * ppgtt after passing through the strong memory
2000                  * barrier implied by a successful
2001                  * kref_get_unless_zero().
2002                  *
2003                  * Once we have acquired the current ppgtt of src,
2004                  * we no longer care if it is released from src, as
2005                  * it cannot be reallocated elsewhere.
2006                  */
2007
2008                 if (vm == READ_ONCE(src->vm))
2009                         break;
2010
2011                 i915_vm_put(vm);
2012         } while (1);
2013         rcu_read_unlock();
2014
2015         if (vm) {
2016                 __assign_ppgtt(dst, vm);
2017                 i915_vm_put(vm);
2018         }
2019
2020         return 0;
2021 }
2022
2023 static int create_clone(struct i915_user_extension __user *ext, void *data)
2024 {
2025         static int (* const fn[])(struct i915_gem_context *dst,
2026                                   struct i915_gem_context *src) = {
2027 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2028                 MAP(ENGINES, clone_engines),
2029                 MAP(FLAGS, clone_flags),
2030                 MAP(SCHEDATTR, clone_schedattr),
2031                 MAP(SSEU, clone_sseu),
2032                 MAP(TIMELINE, clone_timeline),
2033                 MAP(VM, clone_vm),
2034 #undef MAP
2035         };
2036         struct drm_i915_gem_context_create_ext_clone local;
2037         const struct create_ext *arg = data;
2038         struct i915_gem_context *dst = arg->ctx;
2039         struct i915_gem_context *src;
2040         int err, bit;
2041
2042         if (copy_from_user(&local, ext, sizeof(local)))
2043                 return -EFAULT;
2044
2045         BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2046                      I915_CONTEXT_CLONE_UNKNOWN);
2047
2048         if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2049                 return -EINVAL;
2050
2051         if (local.rsvd)
2052                 return -EINVAL;
2053
2054         rcu_read_lock();
2055         src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2056         rcu_read_unlock();
2057         if (!src)
2058                 return -ENOENT;
2059
2060         GEM_BUG_ON(src == dst);
2061
2062         for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2063                 if (!(local.flags & BIT(bit)))
2064                         continue;
2065
2066                 err = fn[bit](dst, src);
2067                 if (err)
2068                         return err;
2069         }
2070
2071         return 0;
2072 }
2073
2074 static const i915_user_extension_fn create_extensions[] = {
2075         [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2076         [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2077 };
2078
2079 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2080 {
2081         return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2082 }
2083
2084 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2085                                   struct drm_file *file)
2086 {
2087         struct drm_i915_private *i915 = to_i915(dev);
2088         struct drm_i915_gem_context_create_ext *args = data;
2089         struct create_ext ext_data;
2090         int ret;
2091
2092         if (!DRIVER_CAPS(i915)->has_logical_contexts)
2093                 return -ENODEV;
2094
2095         if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2096                 return -EINVAL;
2097
2098         ret = intel_gt_terminally_wedged(&i915->gt);
2099         if (ret)
2100                 return ret;
2101
2102         ext_data.fpriv = file->driver_priv;
2103         if (client_is_banned(ext_data.fpriv)) {
2104                 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2105                           current->comm,
2106                           pid_nr(get_task_pid(current, PIDTYPE_PID)));
2107                 return -EIO;
2108         }
2109
2110         ret = i915_mutex_lock_interruptible(dev);
2111         if (ret)
2112                 return ret;
2113
2114         ext_data.ctx = i915_gem_create_context(i915, args->flags);
2115         mutex_unlock(&dev->struct_mutex);
2116         if (IS_ERR(ext_data.ctx))
2117                 return PTR_ERR(ext_data.ctx);
2118
2119         if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2120                 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2121                                            create_extensions,
2122                                            ARRAY_SIZE(create_extensions),
2123                                            &ext_data);
2124                 if (ret)
2125                         goto err_ctx;
2126         }
2127
2128         ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
2129         if (ret < 0)
2130                 goto err_ctx;
2131
2132         args->ctx_id = ret;
2133         DRM_DEBUG("HW context %d created\n", args->ctx_id);
2134
2135         return 0;
2136
2137 err_ctx:
2138         context_close(ext_data.ctx);
2139         return ret;
2140 }
2141
2142 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2143                                    struct drm_file *file)
2144 {
2145         struct drm_i915_gem_context_destroy *args = data;
2146         struct drm_i915_file_private *file_priv = file->driver_priv;
2147         struct i915_gem_context *ctx;
2148
2149         if (args->pad != 0)
2150                 return -EINVAL;
2151
2152         if (!args->ctx_id)
2153                 return -ENOENT;
2154
2155         if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2156                 return -EINTR;
2157
2158         ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2159         mutex_unlock(&file_priv->context_idr_lock);
2160         if (!ctx)
2161                 return -ENOENT;
2162
2163         context_close(ctx);
2164         return 0;
2165 }
2166
2167 static int get_sseu(struct i915_gem_context *ctx,
2168                     struct drm_i915_gem_context_param *args)
2169 {
2170         struct drm_i915_gem_context_param_sseu user_sseu;
2171         struct intel_context *ce;
2172         unsigned long lookup;
2173         int err;
2174
2175         if (args->size == 0)
2176                 goto out;
2177         else if (args->size < sizeof(user_sseu))
2178                 return -EINVAL;
2179
2180         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2181                            sizeof(user_sseu)))
2182                 return -EFAULT;
2183
2184         if (user_sseu.rsvd)
2185                 return -EINVAL;
2186
2187         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2188                 return -EINVAL;
2189
2190         lookup = 0;
2191         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2192                 lookup |= LOOKUP_USER_INDEX;
2193
2194         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2195         if (IS_ERR(ce))
2196                 return PTR_ERR(ce);
2197
2198         err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2199         if (err) {
2200                 intel_context_put(ce);
2201                 return err;
2202         }
2203
2204         user_sseu.slice_mask = ce->sseu.slice_mask;
2205         user_sseu.subslice_mask = ce->sseu.subslice_mask;
2206         user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2207         user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2208
2209         intel_context_unlock_pinned(ce);
2210         intel_context_put(ce);
2211
2212         if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2213                          sizeof(user_sseu)))
2214                 return -EFAULT;
2215
2216 out:
2217         args->size = sizeof(user_sseu);
2218
2219         return 0;
2220 }
2221
2222 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2223                                     struct drm_file *file)
2224 {
2225         struct drm_i915_file_private *file_priv = file->driver_priv;
2226         struct drm_i915_gem_context_param *args = data;
2227         struct i915_gem_context *ctx;
2228         int ret = 0;
2229
2230         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2231         if (!ctx)
2232                 return -ENOENT;
2233
2234         switch (args->param) {
2235         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2236                 args->size = 0;
2237                 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2238                 break;
2239
2240         case I915_CONTEXT_PARAM_GTT_SIZE:
2241                 args->size = 0;
2242                 if (ctx->vm)
2243                         args->value = ctx->vm->total;
2244                 else if (to_i915(dev)->ggtt.alias)
2245                         args->value = to_i915(dev)->ggtt.alias->vm.total;
2246                 else
2247                         args->value = to_i915(dev)->ggtt.vm.total;
2248                 break;
2249
2250         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2251                 args->size = 0;
2252                 args->value = i915_gem_context_no_error_capture(ctx);
2253                 break;
2254
2255         case I915_CONTEXT_PARAM_BANNABLE:
2256                 args->size = 0;
2257                 args->value = i915_gem_context_is_bannable(ctx);
2258                 break;
2259
2260         case I915_CONTEXT_PARAM_RECOVERABLE:
2261                 args->size = 0;
2262                 args->value = i915_gem_context_is_recoverable(ctx);
2263                 break;
2264
2265         case I915_CONTEXT_PARAM_PRIORITY:
2266                 args->size = 0;
2267                 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2268                 break;
2269
2270         case I915_CONTEXT_PARAM_SSEU:
2271                 ret = get_sseu(ctx, args);
2272                 break;
2273
2274         case I915_CONTEXT_PARAM_VM:
2275                 ret = get_ppgtt(file_priv, ctx, args);
2276                 break;
2277
2278         case I915_CONTEXT_PARAM_ENGINES:
2279                 ret = get_engines(ctx, args);
2280                 break;
2281
2282         case I915_CONTEXT_PARAM_BAN_PERIOD:
2283         default:
2284                 ret = -EINVAL;
2285                 break;
2286         }
2287
2288         i915_gem_context_put(ctx);
2289         return ret;
2290 }
2291
2292 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2293                                     struct drm_file *file)
2294 {
2295         struct drm_i915_file_private *file_priv = file->driver_priv;
2296         struct drm_i915_gem_context_param *args = data;
2297         struct i915_gem_context *ctx;
2298         int ret;
2299
2300         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2301         if (!ctx)
2302                 return -ENOENT;
2303
2304         ret = ctx_setparam(file_priv, ctx, args);
2305
2306         i915_gem_context_put(ctx);
2307         return ret;
2308 }
2309
2310 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2311                                        void *data, struct drm_file *file)
2312 {
2313         struct drm_i915_private *dev_priv = to_i915(dev);
2314         struct drm_i915_reset_stats *args = data;
2315         struct i915_gem_context *ctx;
2316         int ret;
2317
2318         if (args->flags || args->pad)
2319                 return -EINVAL;
2320
2321         ret = -ENOENT;
2322         rcu_read_lock();
2323         ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2324         if (!ctx)
2325                 goto out;
2326
2327         /*
2328          * We opt for unserialised reads here. This may result in tearing
2329          * in the extremely unlikely event of a GPU hang on this context
2330          * as we are querying them. If we need that extra layer of protection,
2331          * we should wrap the hangstats with a seqlock.
2332          */
2333
2334         if (capable(CAP_SYS_ADMIN))
2335                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2336         else
2337                 args->reset_count = 0;
2338
2339         args->batch_active = atomic_read(&ctx->guilty_count);
2340         args->batch_pending = atomic_read(&ctx->active_count);
2341
2342         ret = 0;
2343 out:
2344         rcu_read_unlock();
2345         return ret;
2346 }
2347
2348 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2349 {
2350         struct drm_i915_private *i915 = ctx->i915;
2351         int err = 0;
2352
2353         mutex_lock(&i915->contexts.mutex);
2354
2355         GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2356
2357         if (list_empty(&ctx->hw_id_link)) {
2358                 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2359
2360                 err = assign_hw_id(i915, &ctx->hw_id);
2361                 if (err)
2362                         goto out_unlock;
2363
2364                 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2365         }
2366
2367         GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2368         atomic_inc(&ctx->hw_id_pin_count);
2369
2370 out_unlock:
2371         mutex_unlock(&i915->contexts.mutex);
2372         return err;
2373 }
2374
2375 /* GEM context-engines iterator: for_each_gem_engine() */
2376 struct intel_context *
2377 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2378 {
2379         const struct i915_gem_engines *e = it->engines;
2380         struct intel_context *ctx;
2381
2382         do {
2383                 if (it->idx >= e->num_engines)
2384                         return NULL;
2385
2386                 ctx = e->engines[it->idx++];
2387         } while (!ctx);
2388
2389         return ctx;
2390 }
2391
2392 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2393 #include "selftests/mock_context.c"
2394 #include "selftests/i915_gem_context.c"
2395 #endif
2396
2397 static void i915_global_gem_context_shrink(void)
2398 {
2399         kmem_cache_shrink(global.slab_luts);
2400 }
2401
2402 static void i915_global_gem_context_exit(void)
2403 {
2404         kmem_cache_destroy(global.slab_luts);
2405 }
2406
2407 static struct i915_global_gem_context global = { {
2408         .shrink = i915_global_gem_context_shrink,
2409         .exit = i915_global_gem_context_exit,
2410 } };
2411
2412 int __init i915_global_gem_context_init(void)
2413 {
2414         global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2415         if (!global.slab_luts)
2416                 return -ENOMEM;
2417
2418         i915_global_register(&global.base);
2419         return 0;
2420 }