]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gem/i915_gem_context.c
drm/i915/guc: Remove preemption support for current fw
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69
70 #include <drm/i915_drm.h>
71
72 #include "gt/intel_lrc_reg.h"
73
74 #include "i915_gem_context.h"
75 #include "i915_globals.h"
76 #include "i915_trace.h"
77 #include "i915_user_extensions.h"
78
79 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
80
81 static struct i915_global_gem_context {
82         struct i915_global base;
83         struct kmem_cache *slab_luts;
84 } global;
85
86 struct i915_lut_handle *i915_lut_handle_alloc(void)
87 {
88         return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
89 }
90
91 void i915_lut_handle_free(struct i915_lut_handle *lut)
92 {
93         return kmem_cache_free(global.slab_luts, lut);
94 }
95
96 static void lut_close(struct i915_gem_context *ctx)
97 {
98         struct radix_tree_iter iter;
99         void __rcu **slot;
100
101         lockdep_assert_held(&ctx->mutex);
102
103         rcu_read_lock();
104         radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
105                 struct i915_vma *vma = rcu_dereference_raw(*slot);
106                 struct drm_i915_gem_object *obj = vma->obj;
107                 struct i915_lut_handle *lut;
108
109                 if (!kref_get_unless_zero(&obj->base.refcount))
110                         continue;
111
112                 rcu_read_unlock();
113                 i915_gem_object_lock(obj);
114                 list_for_each_entry(lut, &obj->lut_list, obj_link) {
115                         if (lut->ctx != ctx)
116                                 continue;
117
118                         if (lut->handle != iter.index)
119                                 continue;
120
121                         list_del(&lut->obj_link);
122                         break;
123                 }
124                 i915_gem_object_unlock(obj);
125                 rcu_read_lock();
126
127                 if (&lut->obj_link != &obj->lut_list) {
128                         i915_lut_handle_free(lut);
129                         radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
130                         if (atomic_dec_and_test(&vma->open_count) &&
131                             !i915_vma_is_ggtt(vma))
132                                 i915_vma_close(vma);
133                         i915_gem_object_put(obj);
134                 }
135
136                 i915_gem_object_put(obj);
137         }
138         rcu_read_unlock();
139 }
140
141 static struct intel_context *
142 lookup_user_engine(struct i915_gem_context *ctx,
143                    unsigned long flags,
144                    const struct i915_engine_class_instance *ci)
145 #define LOOKUP_USER_INDEX BIT(0)
146 {
147         int idx;
148
149         if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
150                 return ERR_PTR(-EINVAL);
151
152         if (!i915_gem_context_user_engines(ctx)) {
153                 struct intel_engine_cs *engine;
154
155                 engine = intel_engine_lookup_user(ctx->i915,
156                                                   ci->engine_class,
157                                                   ci->engine_instance);
158                 if (!engine)
159                         return ERR_PTR(-EINVAL);
160
161                 idx = engine->id;
162         } else {
163                 idx = ci->engine_instance;
164         }
165
166         return i915_gem_context_get_engine(ctx, idx);
167 }
168
169 static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
170 {
171         unsigned int max;
172
173         lockdep_assert_held(&i915->contexts.mutex);
174
175         if (INTEL_GEN(i915) >= 11)
176                 max = GEN11_MAX_CONTEXT_HW_ID;
177         else if (USES_GUC_SUBMISSION(i915))
178                 /*
179                  * When using GuC in proxy submission, GuC consumes the
180                  * highest bit in the context id to indicate proxy submission.
181                  */
182                 max = MAX_GUC_CONTEXT_HW_ID;
183         else
184                 max = MAX_CONTEXT_HW_ID;
185
186         return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
187 }
188
189 static int steal_hw_id(struct drm_i915_private *i915)
190 {
191         struct i915_gem_context *ctx, *cn;
192         LIST_HEAD(pinned);
193         int id = -ENOSPC;
194
195         lockdep_assert_held(&i915->contexts.mutex);
196
197         list_for_each_entry_safe(ctx, cn,
198                                  &i915->contexts.hw_id_list, hw_id_link) {
199                 if (atomic_read(&ctx->hw_id_pin_count)) {
200                         list_move_tail(&ctx->hw_id_link, &pinned);
201                         continue;
202                 }
203
204                 GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
205                 list_del_init(&ctx->hw_id_link);
206                 id = ctx->hw_id;
207                 break;
208         }
209
210         /*
211          * Remember how far we got up on the last repossesion scan, so the
212          * list is kept in a "least recently scanned" order.
213          */
214         list_splice_tail(&pinned, &i915->contexts.hw_id_list);
215         return id;
216 }
217
218 static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
219 {
220         int ret;
221
222         lockdep_assert_held(&i915->contexts.mutex);
223
224         /*
225          * We prefer to steal/stall ourselves and our users over that of the
226          * entire system. That may be a little unfair to our users, and
227          * even hurt high priority clients. The choice is whether to oomkill
228          * something else, or steal a context id.
229          */
230         ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
231         if (unlikely(ret < 0)) {
232                 ret = steal_hw_id(i915);
233                 if (ret < 0) /* once again for the correct errno code */
234                         ret = new_hw_id(i915, GFP_KERNEL);
235                 if (ret < 0)
236                         return ret;
237         }
238
239         *out = ret;
240         return 0;
241 }
242
243 static void release_hw_id(struct i915_gem_context *ctx)
244 {
245         struct drm_i915_private *i915 = ctx->i915;
246
247         if (list_empty(&ctx->hw_id_link))
248                 return;
249
250         mutex_lock(&i915->contexts.mutex);
251         if (!list_empty(&ctx->hw_id_link)) {
252                 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
253                 list_del_init(&ctx->hw_id_link);
254         }
255         mutex_unlock(&i915->contexts.mutex);
256 }
257
258 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
259 {
260         while (count--) {
261                 if (!e->engines[count])
262                         continue;
263
264                 intel_context_put(e->engines[count]);
265         }
266         kfree(e);
267 }
268
269 static void free_engines(struct i915_gem_engines *e)
270 {
271         __free_engines(e, e->num_engines);
272 }
273
274 static void free_engines_rcu(struct rcu_head *rcu)
275 {
276         free_engines(container_of(rcu, struct i915_gem_engines, rcu));
277 }
278
279 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
280 {
281         struct intel_engine_cs *engine;
282         struct i915_gem_engines *e;
283         enum intel_engine_id id;
284
285         e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
286         if (!e)
287                 return ERR_PTR(-ENOMEM);
288
289         init_rcu_head(&e->rcu);
290         for_each_engine(engine, ctx->i915, id) {
291                 struct intel_context *ce;
292
293                 ce = intel_context_create(ctx, engine);
294                 if (IS_ERR(ce)) {
295                         __free_engines(e, id);
296                         return ERR_CAST(ce);
297                 }
298
299                 e->engines[id] = ce;
300         }
301         e->num_engines = id;
302
303         return e;
304 }
305
306 static void i915_gem_context_free(struct i915_gem_context *ctx)
307 {
308         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
309         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
310
311         release_hw_id(ctx);
312         if (ctx->vm)
313                 i915_vm_put(ctx->vm);
314
315         free_engines(rcu_access_pointer(ctx->engines));
316         mutex_destroy(&ctx->engines_mutex);
317
318         if (ctx->timeline)
319                 intel_timeline_put(ctx->timeline);
320
321         kfree(ctx->name);
322         put_pid(ctx->pid);
323
324         list_del(&ctx->link);
325         mutex_destroy(&ctx->mutex);
326
327         kfree_rcu(ctx, rcu);
328 }
329
330 static void contexts_free(struct drm_i915_private *i915)
331 {
332         struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
333         struct i915_gem_context *ctx, *cn;
334
335         lockdep_assert_held(&i915->drm.struct_mutex);
336
337         llist_for_each_entry_safe(ctx, cn, freed, free_link)
338                 i915_gem_context_free(ctx);
339 }
340
341 static void contexts_free_first(struct drm_i915_private *i915)
342 {
343         struct i915_gem_context *ctx;
344         struct llist_node *freed;
345
346         lockdep_assert_held(&i915->drm.struct_mutex);
347
348         freed = llist_del_first(&i915->contexts.free_list);
349         if (!freed)
350                 return;
351
352         ctx = container_of(freed, typeof(*ctx), free_link);
353         i915_gem_context_free(ctx);
354 }
355
356 static void contexts_free_worker(struct work_struct *work)
357 {
358         struct drm_i915_private *i915 =
359                 container_of(work, typeof(*i915), contexts.free_work);
360
361         mutex_lock(&i915->drm.struct_mutex);
362         contexts_free(i915);
363         mutex_unlock(&i915->drm.struct_mutex);
364 }
365
366 void i915_gem_context_release(struct kref *ref)
367 {
368         struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
369         struct drm_i915_private *i915 = ctx->i915;
370
371         trace_i915_context_free(ctx);
372         if (llist_add(&ctx->free_link, &i915->contexts.free_list))
373                 queue_work(i915->wq, &i915->contexts.free_work);
374 }
375
376 static void context_close(struct i915_gem_context *ctx)
377 {
378         mutex_lock(&ctx->mutex);
379
380         i915_gem_context_set_closed(ctx);
381         ctx->file_priv = ERR_PTR(-EBADF);
382
383         /*
384          * This context will never again be assinged to HW, so we can
385          * reuse its ID for the next context.
386          */
387         release_hw_id(ctx);
388
389         /*
390          * The LUT uses the VMA as a backpointer to unref the object,
391          * so we need to clear the LUT before we close all the VMA (inside
392          * the ppgtt).
393          */
394         lut_close(ctx);
395
396         mutex_unlock(&ctx->mutex);
397         i915_gem_context_put(ctx);
398 }
399
400 static u32 default_desc_template(const struct drm_i915_private *i915,
401                                  const struct i915_address_space *vm)
402 {
403         u32 address_mode;
404         u32 desc;
405
406         desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
407
408         address_mode = INTEL_LEGACY_32B_CONTEXT;
409         if (vm && i915_vm_is_4lvl(vm))
410                 address_mode = INTEL_LEGACY_64B_CONTEXT;
411         desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
412
413         if (IS_GEN(i915, 8))
414                 desc |= GEN8_CTX_L3LLC_COHERENT;
415
416         /* TODO: WaDisableLiteRestore when we start using semaphore
417          * signalling between Command Streamers
418          * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
419          */
420
421         return desc;
422 }
423
424 static struct i915_gem_context *
425 __create_context(struct drm_i915_private *i915)
426 {
427         struct i915_gem_context *ctx;
428         struct i915_gem_engines *e;
429         int err;
430         int i;
431
432         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
433         if (!ctx)
434                 return ERR_PTR(-ENOMEM);
435
436         kref_init(&ctx->ref);
437         list_add_tail(&ctx->link, &i915->contexts.list);
438         ctx->i915 = i915;
439         ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
440         mutex_init(&ctx->mutex);
441
442         mutex_init(&ctx->engines_mutex);
443         e = default_engines(ctx);
444         if (IS_ERR(e)) {
445                 err = PTR_ERR(e);
446                 goto err_free;
447         }
448         RCU_INIT_POINTER(ctx->engines, e);
449
450         INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
451         INIT_LIST_HEAD(&ctx->hw_id_link);
452
453         /* NB: Mark all slices as needing a remap so that when the context first
454          * loads it will restore whatever remap state already exists. If there
455          * is no remap info, it will be a NOP. */
456         ctx->remap_slice = ALL_L3_SLICES(i915);
457
458         i915_gem_context_set_bannable(ctx);
459         i915_gem_context_set_recoverable(ctx);
460
461         ctx->ring_size = 4 * PAGE_SIZE;
462         ctx->desc_template =
463                 default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm);
464
465         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
466                 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
467
468         return ctx;
469
470 err_free:
471         kfree(ctx);
472         return ERR_PTR(err);
473 }
474
475 static struct i915_address_space *
476 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
477 {
478         struct i915_address_space *old = ctx->vm;
479
480         ctx->vm = i915_vm_get(vm);
481         ctx->desc_template = default_desc_template(ctx->i915, vm);
482
483         return old;
484 }
485
486 static void __assign_ppgtt(struct i915_gem_context *ctx,
487                            struct i915_address_space *vm)
488 {
489         if (vm == ctx->vm)
490                 return;
491
492         vm = __set_ppgtt(ctx, vm);
493         if (vm)
494                 i915_vm_put(vm);
495 }
496
497 static struct i915_gem_context *
498 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
499 {
500         struct i915_gem_context *ctx;
501
502         lockdep_assert_held(&dev_priv->drm.struct_mutex);
503
504         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
505             !HAS_EXECLISTS(dev_priv))
506                 return ERR_PTR(-EINVAL);
507
508         /* Reap the most stale context */
509         contexts_free_first(dev_priv);
510
511         ctx = __create_context(dev_priv);
512         if (IS_ERR(ctx))
513                 return ctx;
514
515         if (HAS_FULL_PPGTT(dev_priv)) {
516                 struct i915_ppgtt *ppgtt;
517
518                 ppgtt = i915_ppgtt_create(dev_priv);
519                 if (IS_ERR(ppgtt)) {
520                         DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
521                                          PTR_ERR(ppgtt));
522                         context_close(ctx);
523                         return ERR_CAST(ppgtt);
524                 }
525
526                 __assign_ppgtt(ctx, &ppgtt->vm);
527                 i915_vm_put(&ppgtt->vm);
528         }
529
530         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
531                 struct intel_timeline *timeline;
532
533                 timeline = intel_timeline_create(&dev_priv->gt, NULL);
534                 if (IS_ERR(timeline)) {
535                         context_close(ctx);
536                         return ERR_CAST(timeline);
537                 }
538
539                 ctx->timeline = timeline;
540         }
541
542         trace_i915_context_create(ctx);
543
544         return ctx;
545 }
546
547 /**
548  * i915_gem_context_create_gvt - create a GVT GEM context
549  * @dev: drm device *
550  *
551  * This function is used to create a GVT specific GEM context.
552  *
553  * Returns:
554  * pointer to i915_gem_context on success, error pointer if failed
555  *
556  */
557 struct i915_gem_context *
558 i915_gem_context_create_gvt(struct drm_device *dev)
559 {
560         struct i915_gem_context *ctx;
561         int ret;
562
563         if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
564                 return ERR_PTR(-ENODEV);
565
566         ret = i915_mutex_lock_interruptible(dev);
567         if (ret)
568                 return ERR_PTR(ret);
569
570         ctx = i915_gem_create_context(to_i915(dev), 0);
571         if (IS_ERR(ctx))
572                 goto out;
573
574         ret = i915_gem_context_pin_hw_id(ctx);
575         if (ret) {
576                 context_close(ctx);
577                 ctx = ERR_PTR(ret);
578                 goto out;
579         }
580
581         ctx->file_priv = ERR_PTR(-EBADF);
582         i915_gem_context_set_closed(ctx); /* not user accessible */
583         i915_gem_context_clear_bannable(ctx);
584         i915_gem_context_set_force_single_submission(ctx);
585         if (!USES_GUC_SUBMISSION(to_i915(dev)))
586                 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
587
588         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
589 out:
590         mutex_unlock(&dev->struct_mutex);
591         return ctx;
592 }
593
594 static void
595 destroy_kernel_context(struct i915_gem_context **ctxp)
596 {
597         struct i915_gem_context *ctx;
598
599         /* Keep the context ref so that we can free it immediately ourselves */
600         ctx = i915_gem_context_get(fetch_and_zero(ctxp));
601         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
602
603         context_close(ctx);
604         i915_gem_context_free(ctx);
605 }
606
607 struct i915_gem_context *
608 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
609 {
610         struct i915_gem_context *ctx;
611         int err;
612
613         ctx = i915_gem_create_context(i915, 0);
614         if (IS_ERR(ctx))
615                 return ctx;
616
617         err = i915_gem_context_pin_hw_id(ctx);
618         if (err) {
619                 destroy_kernel_context(&ctx);
620                 return ERR_PTR(err);
621         }
622
623         i915_gem_context_clear_bannable(ctx);
624         ctx->sched.priority = I915_USER_PRIORITY(prio);
625         ctx->ring_size = PAGE_SIZE;
626
627         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
628
629         return ctx;
630 }
631
632 static void init_contexts(struct drm_i915_private *i915)
633 {
634         mutex_init(&i915->contexts.mutex);
635         INIT_LIST_HEAD(&i915->contexts.list);
636
637         /* Using the simple ida interface, the max is limited by sizeof(int) */
638         BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
639         BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
640         ida_init(&i915->contexts.hw_ida);
641         INIT_LIST_HEAD(&i915->contexts.hw_id_list);
642
643         INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
644         init_llist_head(&i915->contexts.free_list);
645 }
646
647 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
648 {
649         struct i915_gem_context *ctx;
650
651         /* Reassure ourselves we are only called once */
652         GEM_BUG_ON(dev_priv->kernel_context);
653
654         init_contexts(dev_priv);
655
656         /* lowest priority; idle task */
657         ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
658         if (IS_ERR(ctx)) {
659                 DRM_ERROR("Failed to create default global context\n");
660                 return PTR_ERR(ctx);
661         }
662         /*
663          * For easy recognisablity, we want the kernel context to be 0 and then
664          * all user contexts will have non-zero hw_id. Kernel contexts are
665          * permanently pinned, so that we never suffer a stall and can
666          * use them from any allocation context (e.g. for evicting other
667          * contexts and from inside the shrinker).
668          */
669         GEM_BUG_ON(ctx->hw_id);
670         GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
671         dev_priv->kernel_context = ctx;
672
673         DRM_DEBUG_DRIVER("%s context support initialized\n",
674                          DRIVER_CAPS(dev_priv)->has_logical_contexts ?
675                          "logical" : "fake");
676         return 0;
677 }
678
679 void i915_gem_contexts_fini(struct drm_i915_private *i915)
680 {
681         lockdep_assert_held(&i915->drm.struct_mutex);
682
683         destroy_kernel_context(&i915->kernel_context);
684
685         /* Must free all deferred contexts (via flush_workqueue) first */
686         GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
687         ida_destroy(&i915->contexts.hw_ida);
688 }
689
690 static int context_idr_cleanup(int id, void *p, void *data)
691 {
692         context_close(p);
693         return 0;
694 }
695
696 static int vm_idr_cleanup(int id, void *p, void *data)
697 {
698         i915_vm_put(p);
699         return 0;
700 }
701
702 static int gem_context_register(struct i915_gem_context *ctx,
703                                 struct drm_i915_file_private *fpriv)
704 {
705         int ret;
706
707         ctx->file_priv = fpriv;
708         if (ctx->vm)
709                 ctx->vm->file = fpriv;
710
711         ctx->pid = get_task_pid(current, PIDTYPE_PID);
712         ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
713                               current->comm, pid_nr(ctx->pid));
714         if (!ctx->name) {
715                 ret = -ENOMEM;
716                 goto err_pid;
717         }
718
719         /* And finally expose ourselves to userspace via the idr */
720         mutex_lock(&fpriv->context_idr_lock);
721         ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
722         mutex_unlock(&fpriv->context_idr_lock);
723         if (ret >= 0)
724                 goto out;
725
726         kfree(fetch_and_zero(&ctx->name));
727 err_pid:
728         put_pid(fetch_and_zero(&ctx->pid));
729 out:
730         return ret;
731 }
732
733 int i915_gem_context_open(struct drm_i915_private *i915,
734                           struct drm_file *file)
735 {
736         struct drm_i915_file_private *file_priv = file->driver_priv;
737         struct i915_gem_context *ctx;
738         int err;
739
740         mutex_init(&file_priv->context_idr_lock);
741         mutex_init(&file_priv->vm_idr_lock);
742
743         idr_init(&file_priv->context_idr);
744         idr_init_base(&file_priv->vm_idr, 1);
745
746         mutex_lock(&i915->drm.struct_mutex);
747         ctx = i915_gem_create_context(i915, 0);
748         mutex_unlock(&i915->drm.struct_mutex);
749         if (IS_ERR(ctx)) {
750                 err = PTR_ERR(ctx);
751                 goto err;
752         }
753
754         err = gem_context_register(ctx, file_priv);
755         if (err < 0)
756                 goto err_ctx;
757
758         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
759         GEM_BUG_ON(err > 0);
760
761         return 0;
762
763 err_ctx:
764         context_close(ctx);
765 err:
766         idr_destroy(&file_priv->vm_idr);
767         idr_destroy(&file_priv->context_idr);
768         mutex_destroy(&file_priv->vm_idr_lock);
769         mutex_destroy(&file_priv->context_idr_lock);
770         return err;
771 }
772
773 void i915_gem_context_close(struct drm_file *file)
774 {
775         struct drm_i915_file_private *file_priv = file->driver_priv;
776
777         idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
778         idr_destroy(&file_priv->context_idr);
779         mutex_destroy(&file_priv->context_idr_lock);
780
781         idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
782         idr_destroy(&file_priv->vm_idr);
783         mutex_destroy(&file_priv->vm_idr_lock);
784 }
785
786 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
787                              struct drm_file *file)
788 {
789         struct drm_i915_private *i915 = to_i915(dev);
790         struct drm_i915_gem_vm_control *args = data;
791         struct drm_i915_file_private *file_priv = file->driver_priv;
792         struct i915_ppgtt *ppgtt;
793         int err;
794
795         if (!HAS_FULL_PPGTT(i915))
796                 return -ENODEV;
797
798         if (args->flags)
799                 return -EINVAL;
800
801         ppgtt = i915_ppgtt_create(i915);
802         if (IS_ERR(ppgtt))
803                 return PTR_ERR(ppgtt);
804
805         ppgtt->vm.file = file_priv;
806
807         if (args->extensions) {
808                 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
809                                            NULL, 0,
810                                            ppgtt);
811                 if (err)
812                         goto err_put;
813         }
814
815         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
816         if (err)
817                 goto err_put;
818
819         err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
820         if (err < 0)
821                 goto err_unlock;
822
823         GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
824
825         mutex_unlock(&file_priv->vm_idr_lock);
826
827         args->vm_id = err;
828         return 0;
829
830 err_unlock:
831         mutex_unlock(&file_priv->vm_idr_lock);
832 err_put:
833         i915_vm_put(&ppgtt->vm);
834         return err;
835 }
836
837 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
838                               struct drm_file *file)
839 {
840         struct drm_i915_file_private *file_priv = file->driver_priv;
841         struct drm_i915_gem_vm_control *args = data;
842         struct i915_address_space *vm;
843         int err;
844         u32 id;
845
846         if (args->flags)
847                 return -EINVAL;
848
849         if (args->extensions)
850                 return -EINVAL;
851
852         id = args->vm_id;
853         if (!id)
854                 return -ENOENT;
855
856         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
857         if (err)
858                 return err;
859
860         vm = idr_remove(&file_priv->vm_idr, id);
861
862         mutex_unlock(&file_priv->vm_idr_lock);
863         if (!vm)
864                 return -ENOENT;
865
866         i915_vm_put(vm);
867         return 0;
868 }
869
870 struct context_barrier_task {
871         struct i915_active base;
872         void (*task)(void *data);
873         void *data;
874 };
875
876 static void cb_retire(struct i915_active *base)
877 {
878         struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
879
880         if (cb->task)
881                 cb->task(cb->data);
882
883         i915_active_fini(&cb->base);
884         kfree(cb);
885 }
886
887 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
888 static int context_barrier_task(struct i915_gem_context *ctx,
889                                 intel_engine_mask_t engines,
890                                 bool (*skip)(struct intel_context *ce, void *data),
891                                 int (*emit)(struct i915_request *rq, void *data),
892                                 void (*task)(void *data),
893                                 void *data)
894 {
895         struct drm_i915_private *i915 = ctx->i915;
896         struct context_barrier_task *cb;
897         struct i915_gem_engines_iter it;
898         struct intel_context *ce;
899         int err = 0;
900
901         lockdep_assert_held(&i915->drm.struct_mutex);
902         GEM_BUG_ON(!task);
903
904         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
905         if (!cb)
906                 return -ENOMEM;
907
908         i915_active_init(i915, &cb->base, NULL, cb_retire);
909         err = i915_active_acquire(&cb->base);
910         if (err) {
911                 kfree(cb);
912                 return err;
913         }
914
915         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
916                 struct i915_request *rq;
917
918                 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
919                                        ce->engine->mask)) {
920                         err = -ENXIO;
921                         break;
922                 }
923
924                 if (!(ce->engine->mask & engines))
925                         continue;
926
927                 if (skip && skip(ce, data))
928                         continue;
929
930                 rq = intel_context_create_request(ce);
931                 if (IS_ERR(rq)) {
932                         err = PTR_ERR(rq);
933                         break;
934                 }
935
936                 err = 0;
937                 if (emit)
938                         err = emit(rq, data);
939                 if (err == 0)
940                         err = i915_active_ref(&cb->base, rq->fence.context, rq);
941
942                 i915_request_add(rq);
943                 if (err)
944                         break;
945         }
946         i915_gem_context_unlock_engines(ctx);
947
948         cb->task = err ? NULL : task; /* caller needs to unwind instead */
949         cb->data = data;
950
951         i915_active_release(&cb->base);
952
953         return err;
954 }
955
956 static int get_ppgtt(struct drm_i915_file_private *file_priv,
957                      struct i915_gem_context *ctx,
958                      struct drm_i915_gem_context_param *args)
959 {
960         struct i915_address_space *vm;
961         int ret;
962
963         if (!ctx->vm)
964                 return -ENODEV;
965
966         /* XXX rcu acquire? */
967         ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
968         if (ret)
969                 return ret;
970
971         vm = i915_vm_get(ctx->vm);
972         mutex_unlock(&ctx->i915->drm.struct_mutex);
973
974         ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
975         if (ret)
976                 goto err_put;
977
978         ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
979         GEM_BUG_ON(!ret);
980         if (ret < 0)
981                 goto err_unlock;
982
983         i915_vm_get(vm);
984
985         args->size = 0;
986         args->value = ret;
987
988         ret = 0;
989 err_unlock:
990         mutex_unlock(&file_priv->vm_idr_lock);
991 err_put:
992         i915_vm_put(vm);
993         return ret;
994 }
995
996 static void set_ppgtt_barrier(void *data)
997 {
998         struct i915_address_space *old = data;
999
1000         if (INTEL_GEN(old->i915) < 8)
1001                 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1002
1003         i915_vm_put(old);
1004 }
1005
1006 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1007 {
1008         struct i915_address_space *vm = rq->gem_context->vm;
1009         struct intel_engine_cs *engine = rq->engine;
1010         u32 base = engine->mmio_base;
1011         u32 *cs;
1012         int i;
1013
1014         if (i915_vm_is_4lvl(vm)) {
1015                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1016                 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1017
1018                 cs = intel_ring_begin(rq, 6);
1019                 if (IS_ERR(cs))
1020                         return PTR_ERR(cs);
1021
1022                 *cs++ = MI_LOAD_REGISTER_IMM(2);
1023
1024                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1025                 *cs++ = upper_32_bits(pd_daddr);
1026                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1027                 *cs++ = lower_32_bits(pd_daddr);
1028
1029                 *cs++ = MI_NOOP;
1030                 intel_ring_advance(rq, cs);
1031         } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1032                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1033
1034                 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1035                 if (IS_ERR(cs))
1036                         return PTR_ERR(cs);
1037
1038                 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1039                 for (i = GEN8_3LVL_PDPES; i--; ) {
1040                         const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1041
1042                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1043                         *cs++ = upper_32_bits(pd_daddr);
1044                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1045                         *cs++ = lower_32_bits(pd_daddr);
1046                 }
1047                 *cs++ = MI_NOOP;
1048                 intel_ring_advance(rq, cs);
1049         } else {
1050                 /* ppGTT is not part of the legacy context image */
1051                 gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
1052         }
1053
1054         return 0;
1055 }
1056
1057 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1058 {
1059         if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1060                 return !ce->state;
1061         else
1062                 return !atomic_read(&ce->pin_count);
1063 }
1064
1065 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1066                      struct i915_gem_context *ctx,
1067                      struct drm_i915_gem_context_param *args)
1068 {
1069         struct i915_address_space *vm, *old;
1070         int err;
1071
1072         if (args->size)
1073                 return -EINVAL;
1074
1075         if (!ctx->vm)
1076                 return -ENODEV;
1077
1078         if (upper_32_bits(args->value))
1079                 return -ENOENT;
1080
1081         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1082         if (err)
1083                 return err;
1084
1085         vm = idr_find(&file_priv->vm_idr, args->value);
1086         if (vm)
1087                 i915_vm_get(vm);
1088         mutex_unlock(&file_priv->vm_idr_lock);
1089         if (!vm)
1090                 return -ENOENT;
1091
1092         err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1093         if (err)
1094                 goto out;
1095
1096         if (vm == ctx->vm)
1097                 goto unlock;
1098
1099         /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1100         mutex_lock(&ctx->mutex);
1101         lut_close(ctx);
1102         mutex_unlock(&ctx->mutex);
1103
1104         old = __set_ppgtt(ctx, vm);
1105
1106         /*
1107          * We need to flush any requests using the current ppgtt before
1108          * we release it as the requests do not hold a reference themselves,
1109          * only indirectly through the context.
1110          */
1111         err = context_barrier_task(ctx, ALL_ENGINES,
1112                                    skip_ppgtt_update,
1113                                    emit_ppgtt_update,
1114                                    set_ppgtt_barrier,
1115                                    old);
1116         if (err) {
1117                 ctx->vm = old;
1118                 ctx->desc_template = default_desc_template(ctx->i915, old);
1119                 i915_vm_put(vm);
1120         }
1121
1122 unlock:
1123         mutex_unlock(&ctx->i915->drm.struct_mutex);
1124
1125 out:
1126         i915_vm_put(vm);
1127         return err;
1128 }
1129
1130 static int gen8_emit_rpcs_config(struct i915_request *rq,
1131                                  struct intel_context *ce,
1132                                  struct intel_sseu sseu)
1133 {
1134         u64 offset;
1135         u32 *cs;
1136
1137         cs = intel_ring_begin(rq, 4);
1138         if (IS_ERR(cs))
1139                 return PTR_ERR(cs);
1140
1141         offset = i915_ggtt_offset(ce->state) +
1142                  LRC_STATE_PN * PAGE_SIZE +
1143                  (CTX_R_PWR_CLK_STATE + 1) * 4;
1144
1145         *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1146         *cs++ = lower_32_bits(offset);
1147         *cs++ = upper_32_bits(offset);
1148         *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1149
1150         intel_ring_advance(rq, cs);
1151
1152         return 0;
1153 }
1154
1155 static int
1156 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1157 {
1158         struct i915_request *rq;
1159         int ret;
1160
1161         lockdep_assert_held(&ce->pin_mutex);
1162
1163         /*
1164          * If the context is not idle, we have to submit an ordered request to
1165          * modify its context image via the kernel context (writing to our own
1166          * image, or into the registers directory, does not stick). Pristine
1167          * and idle contexts will be configured on pinning.
1168          */
1169         if (!intel_context_is_pinned(ce))
1170                 return 0;
1171
1172         rq = i915_request_create(ce->engine->kernel_context);
1173         if (IS_ERR(rq))
1174                 return PTR_ERR(rq);
1175
1176         /* Queue this switch after all other activity by this context. */
1177         ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
1178         if (ret)
1179                 goto out_add;
1180
1181         /*
1182          * Guarantee context image and the timeline remains pinned until the
1183          * modifying request is retired by setting the ce activity tracker.
1184          *
1185          * But we only need to take one pin on the account of it. Or in other
1186          * words transfer the pinned ce object to tracked active request.
1187          */
1188         GEM_BUG_ON(i915_active_is_idle(&ce->active));
1189         ret = i915_active_ref(&ce->active, rq->fence.context, rq);
1190         if (ret)
1191                 goto out_add;
1192
1193         ret = gen8_emit_rpcs_config(rq, ce, sseu);
1194
1195 out_add:
1196         i915_request_add(rq);
1197         return ret;
1198 }
1199
1200 static int
1201 __intel_context_reconfigure_sseu(struct intel_context *ce,
1202                                  struct intel_sseu sseu)
1203 {
1204         int ret;
1205
1206         GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
1207
1208         ret = intel_context_lock_pinned(ce);
1209         if (ret)
1210                 return ret;
1211
1212         /* Nothing to do if unmodified. */
1213         if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1214                 goto unlock;
1215
1216         ret = gen8_modify_rpcs(ce, sseu);
1217         if (!ret)
1218                 ce->sseu = sseu;
1219
1220 unlock:
1221         intel_context_unlock_pinned(ce);
1222         return ret;
1223 }
1224
1225 static int
1226 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1227 {
1228         struct drm_i915_private *i915 = ce->gem_context->i915;
1229         int ret;
1230
1231         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1232         if (ret)
1233                 return ret;
1234
1235         ret = __intel_context_reconfigure_sseu(ce, sseu);
1236
1237         mutex_unlock(&i915->drm.struct_mutex);
1238
1239         return ret;
1240 }
1241
1242 static int
1243 user_to_context_sseu(struct drm_i915_private *i915,
1244                      const struct drm_i915_gem_context_param_sseu *user,
1245                      struct intel_sseu *context)
1246 {
1247         const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1248
1249         /* No zeros in any field. */
1250         if (!user->slice_mask || !user->subslice_mask ||
1251             !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1252                 return -EINVAL;
1253
1254         /* Max > min. */
1255         if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1256                 return -EINVAL;
1257
1258         /*
1259          * Some future proofing on the types since the uAPI is wider than the
1260          * current internal implementation.
1261          */
1262         if (overflows_type(user->slice_mask, context->slice_mask) ||
1263             overflows_type(user->subslice_mask, context->subslice_mask) ||
1264             overflows_type(user->min_eus_per_subslice,
1265                            context->min_eus_per_subslice) ||
1266             overflows_type(user->max_eus_per_subslice,
1267                            context->max_eus_per_subslice))
1268                 return -EINVAL;
1269
1270         /* Check validity against hardware. */
1271         if (user->slice_mask & ~device->slice_mask)
1272                 return -EINVAL;
1273
1274         if (user->subslice_mask & ~device->subslice_mask[0])
1275                 return -EINVAL;
1276
1277         if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1278                 return -EINVAL;
1279
1280         context->slice_mask = user->slice_mask;
1281         context->subslice_mask = user->subslice_mask;
1282         context->min_eus_per_subslice = user->min_eus_per_subslice;
1283         context->max_eus_per_subslice = user->max_eus_per_subslice;
1284
1285         /* Part specific restrictions. */
1286         if (IS_GEN(i915, 11)) {
1287                 unsigned int hw_s = hweight8(device->slice_mask);
1288                 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1289                 unsigned int req_s = hweight8(context->slice_mask);
1290                 unsigned int req_ss = hweight8(context->subslice_mask);
1291
1292                 /*
1293                  * Only full subslice enablement is possible if more than one
1294                  * slice is turned on.
1295                  */
1296                 if (req_s > 1 && req_ss != hw_ss_per_s)
1297                         return -EINVAL;
1298
1299                 /*
1300                  * If more than four (SScount bitfield limit) subslices are
1301                  * requested then the number has to be even.
1302                  */
1303                 if (req_ss > 4 && (req_ss & 1))
1304                         return -EINVAL;
1305
1306                 /*
1307                  * If only one slice is enabled and subslice count is below the
1308                  * device full enablement, it must be at most half of the all
1309                  * available subslices.
1310                  */
1311                 if (req_s == 1 && req_ss < hw_ss_per_s &&
1312                     req_ss > (hw_ss_per_s / 2))
1313                         return -EINVAL;
1314
1315                 /* ABI restriction - VME use case only. */
1316
1317                 /* All slices or one slice only. */
1318                 if (req_s != 1 && req_s != hw_s)
1319                         return -EINVAL;
1320
1321                 /*
1322                  * Half subslices or full enablement only when one slice is
1323                  * enabled.
1324                  */
1325                 if (req_s == 1 &&
1326                     (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1327                         return -EINVAL;
1328
1329                 /* No EU configuration changes. */
1330                 if ((user->min_eus_per_subslice !=
1331                      device->max_eus_per_subslice) ||
1332                     (user->max_eus_per_subslice !=
1333                      device->max_eus_per_subslice))
1334                         return -EINVAL;
1335         }
1336
1337         return 0;
1338 }
1339
1340 static int set_sseu(struct i915_gem_context *ctx,
1341                     struct drm_i915_gem_context_param *args)
1342 {
1343         struct drm_i915_private *i915 = ctx->i915;
1344         struct drm_i915_gem_context_param_sseu user_sseu;
1345         struct intel_context *ce;
1346         struct intel_sseu sseu;
1347         unsigned long lookup;
1348         int ret;
1349
1350         if (args->size < sizeof(user_sseu))
1351                 return -EINVAL;
1352
1353         if (!IS_GEN(i915, 11))
1354                 return -ENODEV;
1355
1356         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1357                            sizeof(user_sseu)))
1358                 return -EFAULT;
1359
1360         if (user_sseu.rsvd)
1361                 return -EINVAL;
1362
1363         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1364                 return -EINVAL;
1365
1366         lookup = 0;
1367         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1368                 lookup |= LOOKUP_USER_INDEX;
1369
1370         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1371         if (IS_ERR(ce))
1372                 return PTR_ERR(ce);
1373
1374         /* Only render engine supports RPCS configuration. */
1375         if (ce->engine->class != RENDER_CLASS) {
1376                 ret = -ENODEV;
1377                 goto out_ce;
1378         }
1379
1380         ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1381         if (ret)
1382                 goto out_ce;
1383
1384         ret = intel_context_reconfigure_sseu(ce, sseu);
1385         if (ret)
1386                 goto out_ce;
1387
1388         args->size = sizeof(user_sseu);
1389
1390 out_ce:
1391         intel_context_put(ce);
1392         return ret;
1393 }
1394
1395 struct set_engines {
1396         struct i915_gem_context *ctx;
1397         struct i915_gem_engines *engines;
1398 };
1399
1400 static int
1401 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1402 {
1403         struct i915_context_engines_load_balance __user *ext =
1404                 container_of_user(base, typeof(*ext), base);
1405         const struct set_engines *set = data;
1406         struct intel_engine_cs *stack[16];
1407         struct intel_engine_cs **siblings;
1408         struct intel_context *ce;
1409         u16 num_siblings, idx;
1410         unsigned int n;
1411         int err;
1412
1413         if (!HAS_EXECLISTS(set->ctx->i915))
1414                 return -ENODEV;
1415
1416         if (USES_GUC_SUBMISSION(set->ctx->i915))
1417                 return -ENODEV; /* not implement yet */
1418
1419         if (get_user(idx, &ext->engine_index))
1420                 return -EFAULT;
1421
1422         if (idx >= set->engines->num_engines) {
1423                 DRM_DEBUG("Invalid placement value, %d >= %d\n",
1424                           idx, set->engines->num_engines);
1425                 return -EINVAL;
1426         }
1427
1428         idx = array_index_nospec(idx, set->engines->num_engines);
1429         if (set->engines->engines[idx]) {
1430                 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1431                 return -EEXIST;
1432         }
1433
1434         if (get_user(num_siblings, &ext->num_siblings))
1435                 return -EFAULT;
1436
1437         err = check_user_mbz(&ext->flags);
1438         if (err)
1439                 return err;
1440
1441         err = check_user_mbz(&ext->mbz64);
1442         if (err)
1443                 return err;
1444
1445         siblings = stack;
1446         if (num_siblings > ARRAY_SIZE(stack)) {
1447                 siblings = kmalloc_array(num_siblings,
1448                                          sizeof(*siblings),
1449                                          GFP_KERNEL);
1450                 if (!siblings)
1451                         return -ENOMEM;
1452         }
1453
1454         for (n = 0; n < num_siblings; n++) {
1455                 struct i915_engine_class_instance ci;
1456
1457                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1458                         err = -EFAULT;
1459                         goto out_siblings;
1460                 }
1461
1462                 siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1463                                                        ci.engine_class,
1464                                                        ci.engine_instance);
1465                 if (!siblings[n]) {
1466                         DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1467                                   n, ci.engine_class, ci.engine_instance);
1468                         err = -EINVAL;
1469                         goto out_siblings;
1470                 }
1471         }
1472
1473         ce = intel_execlists_create_virtual(set->ctx, siblings, n);
1474         if (IS_ERR(ce)) {
1475                 err = PTR_ERR(ce);
1476                 goto out_siblings;
1477         }
1478
1479         if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1480                 intel_context_put(ce);
1481                 err = -EEXIST;
1482                 goto out_siblings;
1483         }
1484
1485 out_siblings:
1486         if (siblings != stack)
1487                 kfree(siblings);
1488
1489         return err;
1490 }
1491
1492 static int
1493 set_engines__bond(struct i915_user_extension __user *base, void *data)
1494 {
1495         struct i915_context_engines_bond __user *ext =
1496                 container_of_user(base, typeof(*ext), base);
1497         const struct set_engines *set = data;
1498         struct i915_engine_class_instance ci;
1499         struct intel_engine_cs *virtual;
1500         struct intel_engine_cs *master;
1501         u16 idx, num_bonds;
1502         int err, n;
1503
1504         if (get_user(idx, &ext->virtual_index))
1505                 return -EFAULT;
1506
1507         if (idx >= set->engines->num_engines) {
1508                 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1509                           idx, set->engines->num_engines);
1510                 return -EINVAL;
1511         }
1512
1513         idx = array_index_nospec(idx, set->engines->num_engines);
1514         if (!set->engines->engines[idx]) {
1515                 DRM_DEBUG("Invalid engine at %d\n", idx);
1516                 return -EINVAL;
1517         }
1518         virtual = set->engines->engines[idx]->engine;
1519
1520         err = check_user_mbz(&ext->flags);
1521         if (err)
1522                 return err;
1523
1524         for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1525                 err = check_user_mbz(&ext->mbz64[n]);
1526                 if (err)
1527                         return err;
1528         }
1529
1530         if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1531                 return -EFAULT;
1532
1533         master = intel_engine_lookup_user(set->ctx->i915,
1534                                           ci.engine_class, ci.engine_instance);
1535         if (!master) {
1536                 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1537                           ci.engine_class, ci.engine_instance);
1538                 return -EINVAL;
1539         }
1540
1541         if (get_user(num_bonds, &ext->num_bonds))
1542                 return -EFAULT;
1543
1544         for (n = 0; n < num_bonds; n++) {
1545                 struct intel_engine_cs *bond;
1546
1547                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1548                         return -EFAULT;
1549
1550                 bond = intel_engine_lookup_user(set->ctx->i915,
1551                                                 ci.engine_class,
1552                                                 ci.engine_instance);
1553                 if (!bond) {
1554                         DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1555                                   n, ci.engine_class, ci.engine_instance);
1556                         return -EINVAL;
1557                 }
1558
1559                 /*
1560                  * A non-virtual engine has no siblings to choose between; and
1561                  * a submit fence will always be directed to the one engine.
1562                  */
1563                 if (intel_engine_is_virtual(virtual)) {
1564                         err = intel_virtual_engine_attach_bond(virtual,
1565                                                                master,
1566                                                                bond);
1567                         if (err)
1568                                 return err;
1569                 }
1570         }
1571
1572         return 0;
1573 }
1574
1575 static const i915_user_extension_fn set_engines__extensions[] = {
1576         [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1577         [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1578 };
1579
1580 static int
1581 set_engines(struct i915_gem_context *ctx,
1582             const struct drm_i915_gem_context_param *args)
1583 {
1584         struct i915_context_param_engines __user *user =
1585                 u64_to_user_ptr(args->value);
1586         struct set_engines set = { .ctx = ctx };
1587         unsigned int num_engines, n;
1588         u64 extensions;
1589         int err;
1590
1591         if (!args->size) { /* switch back to legacy user_ring_map */
1592                 if (!i915_gem_context_user_engines(ctx))
1593                         return 0;
1594
1595                 set.engines = default_engines(ctx);
1596                 if (IS_ERR(set.engines))
1597                         return PTR_ERR(set.engines);
1598
1599                 goto replace;
1600         }
1601
1602         BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1603         if (args->size < sizeof(*user) ||
1604             !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1605                 DRM_DEBUG("Invalid size for engine array: %d\n",
1606                           args->size);
1607                 return -EINVAL;
1608         }
1609
1610         /*
1611          * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1612          * first 64 engines defined here.
1613          */
1614         num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1615
1616         set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1617                               GFP_KERNEL);
1618         if (!set.engines)
1619                 return -ENOMEM;
1620
1621         init_rcu_head(&set.engines->rcu);
1622         for (n = 0; n < num_engines; n++) {
1623                 struct i915_engine_class_instance ci;
1624                 struct intel_engine_cs *engine;
1625
1626                 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1627                         __free_engines(set.engines, n);
1628                         return -EFAULT;
1629                 }
1630
1631                 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1632                     ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1633                         set.engines->engines[n] = NULL;
1634                         continue;
1635                 }
1636
1637                 engine = intel_engine_lookup_user(ctx->i915,
1638                                                   ci.engine_class,
1639                                                   ci.engine_instance);
1640                 if (!engine) {
1641                         DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1642                                   n, ci.engine_class, ci.engine_instance);
1643                         __free_engines(set.engines, n);
1644                         return -ENOENT;
1645                 }
1646
1647                 set.engines->engines[n] = intel_context_create(ctx, engine);
1648                 if (!set.engines->engines[n]) {
1649                         __free_engines(set.engines, n);
1650                         return -ENOMEM;
1651                 }
1652         }
1653         set.engines->num_engines = num_engines;
1654
1655         err = -EFAULT;
1656         if (!get_user(extensions, &user->extensions))
1657                 err = i915_user_extensions(u64_to_user_ptr(extensions),
1658                                            set_engines__extensions,
1659                                            ARRAY_SIZE(set_engines__extensions),
1660                                            &set);
1661         if (err) {
1662                 free_engines(set.engines);
1663                 return err;
1664         }
1665
1666 replace:
1667         mutex_lock(&ctx->engines_mutex);
1668         if (args->size)
1669                 i915_gem_context_set_user_engines(ctx);
1670         else
1671                 i915_gem_context_clear_user_engines(ctx);
1672         rcu_swap_protected(ctx->engines, set.engines, 1);
1673         mutex_unlock(&ctx->engines_mutex);
1674
1675         call_rcu(&set.engines->rcu, free_engines_rcu);
1676
1677         return 0;
1678 }
1679
1680 static struct i915_gem_engines *
1681 __copy_engines(struct i915_gem_engines *e)
1682 {
1683         struct i915_gem_engines *copy;
1684         unsigned int n;
1685
1686         copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1687         if (!copy)
1688                 return ERR_PTR(-ENOMEM);
1689
1690         init_rcu_head(&copy->rcu);
1691         for (n = 0; n < e->num_engines; n++) {
1692                 if (e->engines[n])
1693                         copy->engines[n] = intel_context_get(e->engines[n]);
1694                 else
1695                         copy->engines[n] = NULL;
1696         }
1697         copy->num_engines = n;
1698
1699         return copy;
1700 }
1701
1702 static int
1703 get_engines(struct i915_gem_context *ctx,
1704             struct drm_i915_gem_context_param *args)
1705 {
1706         struct i915_context_param_engines __user *user;
1707         struct i915_gem_engines *e;
1708         size_t n, count, size;
1709         int err = 0;
1710
1711         err = mutex_lock_interruptible(&ctx->engines_mutex);
1712         if (err)
1713                 return err;
1714
1715         e = NULL;
1716         if (i915_gem_context_user_engines(ctx))
1717                 e = __copy_engines(i915_gem_context_engines(ctx));
1718         mutex_unlock(&ctx->engines_mutex);
1719         if (IS_ERR_OR_NULL(e)) {
1720                 args->size = 0;
1721                 return PTR_ERR_OR_ZERO(e);
1722         }
1723
1724         count = e->num_engines;
1725
1726         /* Be paranoid in case we have an impedance mismatch */
1727         if (!check_struct_size(user, engines, count, &size)) {
1728                 err = -EINVAL;
1729                 goto err_free;
1730         }
1731         if (overflows_type(size, args->size)) {
1732                 err = -EINVAL;
1733                 goto err_free;
1734         }
1735
1736         if (!args->size) {
1737                 args->size = size;
1738                 goto err_free;
1739         }
1740
1741         if (args->size < size) {
1742                 err = -EINVAL;
1743                 goto err_free;
1744         }
1745
1746         user = u64_to_user_ptr(args->value);
1747         if (!access_ok(user, size)) {
1748                 err = -EFAULT;
1749                 goto err_free;
1750         }
1751
1752         if (put_user(0, &user->extensions)) {
1753                 err = -EFAULT;
1754                 goto err_free;
1755         }
1756
1757         for (n = 0; n < count; n++) {
1758                 struct i915_engine_class_instance ci = {
1759                         .engine_class = I915_ENGINE_CLASS_INVALID,
1760                         .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1761                 };
1762
1763                 if (e->engines[n]) {
1764                         ci.engine_class = e->engines[n]->engine->uabi_class;
1765                         ci.engine_instance = e->engines[n]->engine->instance;
1766                 }
1767
1768                 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1769                         err = -EFAULT;
1770                         goto err_free;
1771                 }
1772         }
1773
1774         args->size = size;
1775
1776 err_free:
1777         free_engines(e);
1778         return err;
1779 }
1780
1781 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1782                         struct i915_gem_context *ctx,
1783                         struct drm_i915_gem_context_param *args)
1784 {
1785         int ret = 0;
1786
1787         switch (args->param) {
1788         case I915_CONTEXT_PARAM_NO_ZEROMAP:
1789                 if (args->size)
1790                         ret = -EINVAL;
1791                 else if (args->value)
1792                         set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1793                 else
1794                         clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1795                 break;
1796
1797         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1798                 if (args->size)
1799                         ret = -EINVAL;
1800                 else if (args->value)
1801                         i915_gem_context_set_no_error_capture(ctx);
1802                 else
1803                         i915_gem_context_clear_no_error_capture(ctx);
1804                 break;
1805
1806         case I915_CONTEXT_PARAM_BANNABLE:
1807                 if (args->size)
1808                         ret = -EINVAL;
1809                 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1810                         ret = -EPERM;
1811                 else if (args->value)
1812                         i915_gem_context_set_bannable(ctx);
1813                 else
1814                         i915_gem_context_clear_bannable(ctx);
1815                 break;
1816
1817         case I915_CONTEXT_PARAM_RECOVERABLE:
1818                 if (args->size)
1819                         ret = -EINVAL;
1820                 else if (args->value)
1821                         i915_gem_context_set_recoverable(ctx);
1822                 else
1823                         i915_gem_context_clear_recoverable(ctx);
1824                 break;
1825
1826         case I915_CONTEXT_PARAM_PRIORITY:
1827                 {
1828                         s64 priority = args->value;
1829
1830                         if (args->size)
1831                                 ret = -EINVAL;
1832                         else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1833                                 ret = -ENODEV;
1834                         else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1835                                  priority < I915_CONTEXT_MIN_USER_PRIORITY)
1836                                 ret = -EINVAL;
1837                         else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1838                                  !capable(CAP_SYS_NICE))
1839                                 ret = -EPERM;
1840                         else
1841                                 ctx->sched.priority =
1842                                         I915_USER_PRIORITY(priority);
1843                 }
1844                 break;
1845
1846         case I915_CONTEXT_PARAM_SSEU:
1847                 ret = set_sseu(ctx, args);
1848                 break;
1849
1850         case I915_CONTEXT_PARAM_VM:
1851                 ret = set_ppgtt(fpriv, ctx, args);
1852                 break;
1853
1854         case I915_CONTEXT_PARAM_ENGINES:
1855                 ret = set_engines(ctx, args);
1856                 break;
1857
1858         case I915_CONTEXT_PARAM_BAN_PERIOD:
1859         default:
1860                 ret = -EINVAL;
1861                 break;
1862         }
1863
1864         return ret;
1865 }
1866
1867 struct create_ext {
1868         struct i915_gem_context *ctx;
1869         struct drm_i915_file_private *fpriv;
1870 };
1871
1872 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1873 {
1874         struct drm_i915_gem_context_create_ext_setparam local;
1875         const struct create_ext *arg = data;
1876
1877         if (copy_from_user(&local, ext, sizeof(local)))
1878                 return -EFAULT;
1879
1880         if (local.param.ctx_id)
1881                 return -EINVAL;
1882
1883         return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1884 }
1885
1886 static int clone_engines(struct i915_gem_context *dst,
1887                          struct i915_gem_context *src)
1888 {
1889         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1890         struct i915_gem_engines *clone;
1891         bool user_engines;
1892         unsigned long n;
1893
1894         clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1895         if (!clone)
1896                 goto err_unlock;
1897
1898         init_rcu_head(&clone->rcu);
1899         for (n = 0; n < e->num_engines; n++) {
1900                 struct intel_engine_cs *engine;
1901
1902                 if (!e->engines[n]) {
1903                         clone->engines[n] = NULL;
1904                         continue;
1905                 }
1906                 engine = e->engines[n]->engine;
1907
1908                 /*
1909                  * Virtual engines are singletons; they can only exist
1910                  * inside a single context, because they embed their
1911                  * HW context... As each virtual context implies a single
1912                  * timeline (each engine can only dequeue a single request
1913                  * at any time), it would be surprising for two contexts
1914                  * to use the same engine. So let's create a copy of
1915                  * the virtual engine instead.
1916                  */
1917                 if (intel_engine_is_virtual(engine))
1918                         clone->engines[n] =
1919                                 intel_execlists_clone_virtual(dst, engine);
1920                 else
1921                         clone->engines[n] = intel_context_create(dst, engine);
1922                 if (IS_ERR_OR_NULL(clone->engines[n])) {
1923                         __free_engines(clone, n);
1924                         goto err_unlock;
1925                 }
1926         }
1927         clone->num_engines = n;
1928
1929         user_engines = i915_gem_context_user_engines(src);
1930         i915_gem_context_unlock_engines(src);
1931
1932         free_engines(dst->engines);
1933         RCU_INIT_POINTER(dst->engines, clone);
1934         if (user_engines)
1935                 i915_gem_context_set_user_engines(dst);
1936         else
1937                 i915_gem_context_clear_user_engines(dst);
1938         return 0;
1939
1940 err_unlock:
1941         i915_gem_context_unlock_engines(src);
1942         return -ENOMEM;
1943 }
1944
1945 static int clone_flags(struct i915_gem_context *dst,
1946                        struct i915_gem_context *src)
1947 {
1948         dst->user_flags = src->user_flags;
1949         return 0;
1950 }
1951
1952 static int clone_schedattr(struct i915_gem_context *dst,
1953                            struct i915_gem_context *src)
1954 {
1955         dst->sched = src->sched;
1956         return 0;
1957 }
1958
1959 static int clone_sseu(struct i915_gem_context *dst,
1960                       struct i915_gem_context *src)
1961 {
1962         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1963         struct i915_gem_engines *clone;
1964         unsigned long n;
1965         int err;
1966
1967         clone = dst->engines; /* no locking required; sole access */
1968         if (e->num_engines != clone->num_engines) {
1969                 err = -EINVAL;
1970                 goto unlock;
1971         }
1972
1973         for (n = 0; n < e->num_engines; n++) {
1974                 struct intel_context *ce = e->engines[n];
1975
1976                 if (clone->engines[n]->engine->class != ce->engine->class) {
1977                         /* Must have compatible engine maps! */
1978                         err = -EINVAL;
1979                         goto unlock;
1980                 }
1981
1982                 /* serialises with set_sseu */
1983                 err = intel_context_lock_pinned(ce);
1984                 if (err)
1985                         goto unlock;
1986
1987                 clone->engines[n]->sseu = ce->sseu;
1988                 intel_context_unlock_pinned(ce);
1989         }
1990
1991         err = 0;
1992 unlock:
1993         i915_gem_context_unlock_engines(src);
1994         return err;
1995 }
1996
1997 static int clone_timeline(struct i915_gem_context *dst,
1998                           struct i915_gem_context *src)
1999 {
2000         if (src->timeline) {
2001                 GEM_BUG_ON(src->timeline == dst->timeline);
2002
2003                 if (dst->timeline)
2004                         intel_timeline_put(dst->timeline);
2005                 dst->timeline = intel_timeline_get(src->timeline);
2006         }
2007
2008         return 0;
2009 }
2010
2011 static int clone_vm(struct i915_gem_context *dst,
2012                     struct i915_gem_context *src)
2013 {
2014         struct i915_address_space *vm;
2015
2016         rcu_read_lock();
2017         do {
2018                 vm = READ_ONCE(src->vm);
2019                 if (!vm)
2020                         break;
2021
2022                 if (!kref_get_unless_zero(&vm->ref))
2023                         continue;
2024
2025                 /*
2026                  * This ppgtt may have be reallocated between
2027                  * the read and the kref, and reassigned to a third
2028                  * context. In order to avoid inadvertent sharing
2029                  * of this ppgtt with that third context (and not
2030                  * src), we have to confirm that we have the same
2031                  * ppgtt after passing through the strong memory
2032                  * barrier implied by a successful
2033                  * kref_get_unless_zero().
2034                  *
2035                  * Once we have acquired the current ppgtt of src,
2036                  * we no longer care if it is released from src, as
2037                  * it cannot be reallocated elsewhere.
2038                  */
2039
2040                 if (vm == READ_ONCE(src->vm))
2041                         break;
2042
2043                 i915_vm_put(vm);
2044         } while (1);
2045         rcu_read_unlock();
2046
2047         if (vm) {
2048                 __assign_ppgtt(dst, vm);
2049                 i915_vm_put(vm);
2050         }
2051
2052         return 0;
2053 }
2054
2055 static int create_clone(struct i915_user_extension __user *ext, void *data)
2056 {
2057         static int (* const fn[])(struct i915_gem_context *dst,
2058                                   struct i915_gem_context *src) = {
2059 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2060                 MAP(ENGINES, clone_engines),
2061                 MAP(FLAGS, clone_flags),
2062                 MAP(SCHEDATTR, clone_schedattr),
2063                 MAP(SSEU, clone_sseu),
2064                 MAP(TIMELINE, clone_timeline),
2065                 MAP(VM, clone_vm),
2066 #undef MAP
2067         };
2068         struct drm_i915_gem_context_create_ext_clone local;
2069         const struct create_ext *arg = data;
2070         struct i915_gem_context *dst = arg->ctx;
2071         struct i915_gem_context *src;
2072         int err, bit;
2073
2074         if (copy_from_user(&local, ext, sizeof(local)))
2075                 return -EFAULT;
2076
2077         BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2078                      I915_CONTEXT_CLONE_UNKNOWN);
2079
2080         if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2081                 return -EINVAL;
2082
2083         if (local.rsvd)
2084                 return -EINVAL;
2085
2086         rcu_read_lock();
2087         src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2088         rcu_read_unlock();
2089         if (!src)
2090                 return -ENOENT;
2091
2092         GEM_BUG_ON(src == dst);
2093
2094         for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2095                 if (!(local.flags & BIT(bit)))
2096                         continue;
2097
2098                 err = fn[bit](dst, src);
2099                 if (err)
2100                         return err;
2101         }
2102
2103         return 0;
2104 }
2105
2106 static const i915_user_extension_fn create_extensions[] = {
2107         [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2108         [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2109 };
2110
2111 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2112 {
2113         return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2114 }
2115
2116 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2117                                   struct drm_file *file)
2118 {
2119         struct drm_i915_private *i915 = to_i915(dev);
2120         struct drm_i915_gem_context_create_ext *args = data;
2121         struct create_ext ext_data;
2122         int ret;
2123
2124         if (!DRIVER_CAPS(i915)->has_logical_contexts)
2125                 return -ENODEV;
2126
2127         if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2128                 return -EINVAL;
2129
2130         ret = i915_terminally_wedged(i915);
2131         if (ret)
2132                 return ret;
2133
2134         ext_data.fpriv = file->driver_priv;
2135         if (client_is_banned(ext_data.fpriv)) {
2136                 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2137                           current->comm,
2138                           pid_nr(get_task_pid(current, PIDTYPE_PID)));
2139                 return -EIO;
2140         }
2141
2142         ret = i915_mutex_lock_interruptible(dev);
2143         if (ret)
2144                 return ret;
2145
2146         ext_data.ctx = i915_gem_create_context(i915, args->flags);
2147         mutex_unlock(&dev->struct_mutex);
2148         if (IS_ERR(ext_data.ctx))
2149                 return PTR_ERR(ext_data.ctx);
2150
2151         if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2152                 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2153                                            create_extensions,
2154                                            ARRAY_SIZE(create_extensions),
2155                                            &ext_data);
2156                 if (ret)
2157                         goto err_ctx;
2158         }
2159
2160         ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
2161         if (ret < 0)
2162                 goto err_ctx;
2163
2164         args->ctx_id = ret;
2165         DRM_DEBUG("HW context %d created\n", args->ctx_id);
2166
2167         return 0;
2168
2169 err_ctx:
2170         context_close(ext_data.ctx);
2171         return ret;
2172 }
2173
2174 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2175                                    struct drm_file *file)
2176 {
2177         struct drm_i915_gem_context_destroy *args = data;
2178         struct drm_i915_file_private *file_priv = file->driver_priv;
2179         struct i915_gem_context *ctx;
2180
2181         if (args->pad != 0)
2182                 return -EINVAL;
2183
2184         if (!args->ctx_id)
2185                 return -ENOENT;
2186
2187         if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2188                 return -EINTR;
2189
2190         ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2191         mutex_unlock(&file_priv->context_idr_lock);
2192         if (!ctx)
2193                 return -ENOENT;
2194
2195         context_close(ctx);
2196         return 0;
2197 }
2198
2199 static int get_sseu(struct i915_gem_context *ctx,
2200                     struct drm_i915_gem_context_param *args)
2201 {
2202         struct drm_i915_gem_context_param_sseu user_sseu;
2203         struct intel_context *ce;
2204         unsigned long lookup;
2205         int err;
2206
2207         if (args->size == 0)
2208                 goto out;
2209         else if (args->size < sizeof(user_sseu))
2210                 return -EINVAL;
2211
2212         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2213                            sizeof(user_sseu)))
2214                 return -EFAULT;
2215
2216         if (user_sseu.rsvd)
2217                 return -EINVAL;
2218
2219         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2220                 return -EINVAL;
2221
2222         lookup = 0;
2223         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2224                 lookup |= LOOKUP_USER_INDEX;
2225
2226         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2227         if (IS_ERR(ce))
2228                 return PTR_ERR(ce);
2229
2230         err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2231         if (err) {
2232                 intel_context_put(ce);
2233                 return err;
2234         }
2235
2236         user_sseu.slice_mask = ce->sseu.slice_mask;
2237         user_sseu.subslice_mask = ce->sseu.subslice_mask;
2238         user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2239         user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2240
2241         intel_context_unlock_pinned(ce);
2242         intel_context_put(ce);
2243
2244         if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2245                          sizeof(user_sseu)))
2246                 return -EFAULT;
2247
2248 out:
2249         args->size = sizeof(user_sseu);
2250
2251         return 0;
2252 }
2253
2254 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2255                                     struct drm_file *file)
2256 {
2257         struct drm_i915_file_private *file_priv = file->driver_priv;
2258         struct drm_i915_gem_context_param *args = data;
2259         struct i915_gem_context *ctx;
2260         int ret = 0;
2261
2262         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2263         if (!ctx)
2264                 return -ENOENT;
2265
2266         switch (args->param) {
2267         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2268                 args->size = 0;
2269                 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2270                 break;
2271
2272         case I915_CONTEXT_PARAM_GTT_SIZE:
2273                 args->size = 0;
2274                 if (ctx->vm)
2275                         args->value = ctx->vm->total;
2276                 else if (to_i915(dev)->mm.aliasing_ppgtt)
2277                         args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
2278                 else
2279                         args->value = to_i915(dev)->ggtt.vm.total;
2280                 break;
2281
2282         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2283                 args->size = 0;
2284                 args->value = i915_gem_context_no_error_capture(ctx);
2285                 break;
2286
2287         case I915_CONTEXT_PARAM_BANNABLE:
2288                 args->size = 0;
2289                 args->value = i915_gem_context_is_bannable(ctx);
2290                 break;
2291
2292         case I915_CONTEXT_PARAM_RECOVERABLE:
2293                 args->size = 0;
2294                 args->value = i915_gem_context_is_recoverable(ctx);
2295                 break;
2296
2297         case I915_CONTEXT_PARAM_PRIORITY:
2298                 args->size = 0;
2299                 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2300                 break;
2301
2302         case I915_CONTEXT_PARAM_SSEU:
2303                 ret = get_sseu(ctx, args);
2304                 break;
2305
2306         case I915_CONTEXT_PARAM_VM:
2307                 ret = get_ppgtt(file_priv, ctx, args);
2308                 break;
2309
2310         case I915_CONTEXT_PARAM_ENGINES:
2311                 ret = get_engines(ctx, args);
2312                 break;
2313
2314         case I915_CONTEXT_PARAM_BAN_PERIOD:
2315         default:
2316                 ret = -EINVAL;
2317                 break;
2318         }
2319
2320         i915_gem_context_put(ctx);
2321         return ret;
2322 }
2323
2324 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2325                                     struct drm_file *file)
2326 {
2327         struct drm_i915_file_private *file_priv = file->driver_priv;
2328         struct drm_i915_gem_context_param *args = data;
2329         struct i915_gem_context *ctx;
2330         int ret;
2331
2332         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2333         if (!ctx)
2334                 return -ENOENT;
2335
2336         ret = ctx_setparam(file_priv, ctx, args);
2337
2338         i915_gem_context_put(ctx);
2339         return ret;
2340 }
2341
2342 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2343                                        void *data, struct drm_file *file)
2344 {
2345         struct drm_i915_private *dev_priv = to_i915(dev);
2346         struct drm_i915_reset_stats *args = data;
2347         struct i915_gem_context *ctx;
2348         int ret;
2349
2350         if (args->flags || args->pad)
2351                 return -EINVAL;
2352
2353         ret = -ENOENT;
2354         rcu_read_lock();
2355         ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2356         if (!ctx)
2357                 goto out;
2358
2359         /*
2360          * We opt for unserialised reads here. This may result in tearing
2361          * in the extremely unlikely event of a GPU hang on this context
2362          * as we are querying them. If we need that extra layer of protection,
2363          * we should wrap the hangstats with a seqlock.
2364          */
2365
2366         if (capable(CAP_SYS_ADMIN))
2367                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2368         else
2369                 args->reset_count = 0;
2370
2371         args->batch_active = atomic_read(&ctx->guilty_count);
2372         args->batch_pending = atomic_read(&ctx->active_count);
2373
2374         ret = 0;
2375 out:
2376         rcu_read_unlock();
2377         return ret;
2378 }
2379
2380 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2381 {
2382         struct drm_i915_private *i915 = ctx->i915;
2383         int err = 0;
2384
2385         mutex_lock(&i915->contexts.mutex);
2386
2387         GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2388
2389         if (list_empty(&ctx->hw_id_link)) {
2390                 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2391
2392                 err = assign_hw_id(i915, &ctx->hw_id);
2393                 if (err)
2394                         goto out_unlock;
2395
2396                 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2397         }
2398
2399         GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2400         atomic_inc(&ctx->hw_id_pin_count);
2401
2402 out_unlock:
2403         mutex_unlock(&i915->contexts.mutex);
2404         return err;
2405 }
2406
2407 /* GEM context-engines iterator: for_each_gem_engine() */
2408 struct intel_context *
2409 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2410 {
2411         const struct i915_gem_engines *e = it->engines;
2412         struct intel_context *ctx;
2413
2414         do {
2415                 if (it->idx >= e->num_engines)
2416                         return NULL;
2417
2418                 ctx = e->engines[it->idx++];
2419         } while (!ctx);
2420
2421         return ctx;
2422 }
2423
2424 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2425 #include "selftests/mock_context.c"
2426 #include "selftests/i915_gem_context.c"
2427 #endif
2428
2429 static void i915_global_gem_context_shrink(void)
2430 {
2431         kmem_cache_shrink(global.slab_luts);
2432 }
2433
2434 static void i915_global_gem_context_exit(void)
2435 {
2436         kmem_cache_destroy(global.slab_luts);
2437 }
2438
2439 static struct i915_global_gem_context global = { {
2440         .shrink = i915_global_gem_context_shrink,
2441         .exit = i915_global_gem_context_exit,
2442 } };
2443
2444 int __init i915_global_gem_context_init(void)
2445 {
2446         global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2447         if (!global.slab_luts)
2448                 return -ENOMEM;
2449
2450         i915_global_register(&global.base);
2451         return 0;
2452 }