]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gem/i915_gem_context.c
drm/i915: Drop the deferred active reference
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69
70 #include <drm/i915_drm.h>
71
72 #include "gt/intel_lrc_reg.h"
73
74 #include "i915_gem_context.h"
75 #include "i915_globals.h"
76 #include "i915_trace.h"
77 #include "i915_user_extensions.h"
78
79 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
80
81 static struct i915_global_gem_context {
82         struct i915_global base;
83         struct kmem_cache *slab_luts;
84 } global;
85
86 struct i915_lut_handle *i915_lut_handle_alloc(void)
87 {
88         return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
89 }
90
91 void i915_lut_handle_free(struct i915_lut_handle *lut)
92 {
93         return kmem_cache_free(global.slab_luts, lut);
94 }
95
96 static void lut_close(struct i915_gem_context *ctx)
97 {
98         struct i915_lut_handle *lut, *ln;
99         struct radix_tree_iter iter;
100         void __rcu **slot;
101
102         list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
103                 list_del(&lut->obj_link);
104                 i915_lut_handle_free(lut);
105         }
106         INIT_LIST_HEAD(&ctx->handles_list);
107
108         rcu_read_lock();
109         radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
110                 struct i915_vma *vma = rcu_dereference_raw(*slot);
111
112                 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
113
114                 vma->open_count--;
115                 i915_vma_put(vma);
116         }
117         rcu_read_unlock();
118 }
119
120 static struct intel_context *
121 lookup_user_engine(struct i915_gem_context *ctx,
122                    unsigned long flags,
123                    const struct i915_engine_class_instance *ci)
124 #define LOOKUP_USER_INDEX BIT(0)
125 {
126         int idx;
127
128         if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
129                 return ERR_PTR(-EINVAL);
130
131         if (!i915_gem_context_user_engines(ctx)) {
132                 struct intel_engine_cs *engine;
133
134                 engine = intel_engine_lookup_user(ctx->i915,
135                                                   ci->engine_class,
136                                                   ci->engine_instance);
137                 if (!engine)
138                         return ERR_PTR(-EINVAL);
139
140                 idx = engine->id;
141         } else {
142                 idx = ci->engine_instance;
143         }
144
145         return i915_gem_context_get_engine(ctx, idx);
146 }
147
148 static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
149 {
150         unsigned int max;
151
152         lockdep_assert_held(&i915->contexts.mutex);
153
154         if (INTEL_GEN(i915) >= 11)
155                 max = GEN11_MAX_CONTEXT_HW_ID;
156         else if (USES_GUC_SUBMISSION(i915))
157                 /*
158                  * When using GuC in proxy submission, GuC consumes the
159                  * highest bit in the context id to indicate proxy submission.
160                  */
161                 max = MAX_GUC_CONTEXT_HW_ID;
162         else
163                 max = MAX_CONTEXT_HW_ID;
164
165         return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
166 }
167
168 static int steal_hw_id(struct drm_i915_private *i915)
169 {
170         struct i915_gem_context *ctx, *cn;
171         LIST_HEAD(pinned);
172         int id = -ENOSPC;
173
174         lockdep_assert_held(&i915->contexts.mutex);
175
176         list_for_each_entry_safe(ctx, cn,
177                                  &i915->contexts.hw_id_list, hw_id_link) {
178                 if (atomic_read(&ctx->hw_id_pin_count)) {
179                         list_move_tail(&ctx->hw_id_link, &pinned);
180                         continue;
181                 }
182
183                 GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
184                 list_del_init(&ctx->hw_id_link);
185                 id = ctx->hw_id;
186                 break;
187         }
188
189         /*
190          * Remember how far we got up on the last repossesion scan, so the
191          * list is kept in a "least recently scanned" order.
192          */
193         list_splice_tail(&pinned, &i915->contexts.hw_id_list);
194         return id;
195 }
196
197 static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
198 {
199         int ret;
200
201         lockdep_assert_held(&i915->contexts.mutex);
202
203         /*
204          * We prefer to steal/stall ourselves and our users over that of the
205          * entire system. That may be a little unfair to our users, and
206          * even hurt high priority clients. The choice is whether to oomkill
207          * something else, or steal a context id.
208          */
209         ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
210         if (unlikely(ret < 0)) {
211                 ret = steal_hw_id(i915);
212                 if (ret < 0) /* once again for the correct errno code */
213                         ret = new_hw_id(i915, GFP_KERNEL);
214                 if (ret < 0)
215                         return ret;
216         }
217
218         *out = ret;
219         return 0;
220 }
221
222 static void release_hw_id(struct i915_gem_context *ctx)
223 {
224         struct drm_i915_private *i915 = ctx->i915;
225
226         if (list_empty(&ctx->hw_id_link))
227                 return;
228
229         mutex_lock(&i915->contexts.mutex);
230         if (!list_empty(&ctx->hw_id_link)) {
231                 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
232                 list_del_init(&ctx->hw_id_link);
233         }
234         mutex_unlock(&i915->contexts.mutex);
235 }
236
237 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
238 {
239         while (count--) {
240                 if (!e->engines[count])
241                         continue;
242
243                 intel_context_put(e->engines[count]);
244         }
245         kfree(e);
246 }
247
248 static void free_engines(struct i915_gem_engines *e)
249 {
250         __free_engines(e, e->num_engines);
251 }
252
253 static void free_engines_rcu(struct work_struct *wrk)
254 {
255         struct i915_gem_engines *e =
256                 container_of(wrk, struct i915_gem_engines, rcu.work);
257         struct drm_i915_private *i915 = e->i915;
258
259         mutex_lock(&i915->drm.struct_mutex);
260         free_engines(e);
261         mutex_unlock(&i915->drm.struct_mutex);
262 }
263
264 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
265 {
266         struct intel_engine_cs *engine;
267         struct i915_gem_engines *e;
268         enum intel_engine_id id;
269
270         e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
271         if (!e)
272                 return ERR_PTR(-ENOMEM);
273
274         e->i915 = ctx->i915;
275         for_each_engine(engine, ctx->i915, id) {
276                 struct intel_context *ce;
277
278                 ce = intel_context_create(ctx, engine);
279                 if (IS_ERR(ce)) {
280                         __free_engines(e, id);
281                         return ERR_CAST(ce);
282                 }
283
284                 e->engines[id] = ce;
285         }
286         e->num_engines = id;
287
288         return e;
289 }
290
291 static void i915_gem_context_free(struct i915_gem_context *ctx)
292 {
293         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
294         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
295
296         release_hw_id(ctx);
297         i915_ppgtt_put(ctx->ppgtt);
298
299         free_engines(rcu_access_pointer(ctx->engines));
300         mutex_destroy(&ctx->engines_mutex);
301
302         if (ctx->timeline)
303                 i915_timeline_put(ctx->timeline);
304
305         kfree(ctx->name);
306         put_pid(ctx->pid);
307
308         list_del(&ctx->link);
309         mutex_destroy(&ctx->mutex);
310
311         kfree_rcu(ctx, rcu);
312 }
313
314 static void contexts_free(struct drm_i915_private *i915)
315 {
316         struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
317         struct i915_gem_context *ctx, *cn;
318
319         lockdep_assert_held(&i915->drm.struct_mutex);
320
321         llist_for_each_entry_safe(ctx, cn, freed, free_link)
322                 i915_gem_context_free(ctx);
323 }
324
325 static void contexts_free_first(struct drm_i915_private *i915)
326 {
327         struct i915_gem_context *ctx;
328         struct llist_node *freed;
329
330         lockdep_assert_held(&i915->drm.struct_mutex);
331
332         freed = llist_del_first(&i915->contexts.free_list);
333         if (!freed)
334                 return;
335
336         ctx = container_of(freed, typeof(*ctx), free_link);
337         i915_gem_context_free(ctx);
338 }
339
340 static void contexts_free_worker(struct work_struct *work)
341 {
342         struct drm_i915_private *i915 =
343                 container_of(work, typeof(*i915), contexts.free_work);
344
345         mutex_lock(&i915->drm.struct_mutex);
346         contexts_free(i915);
347         mutex_unlock(&i915->drm.struct_mutex);
348 }
349
350 void i915_gem_context_release(struct kref *ref)
351 {
352         struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
353         struct drm_i915_private *i915 = ctx->i915;
354
355         trace_i915_context_free(ctx);
356         if (llist_add(&ctx->free_link, &i915->contexts.free_list))
357                 queue_work(i915->wq, &i915->contexts.free_work);
358 }
359
360 static void context_close(struct i915_gem_context *ctx)
361 {
362         i915_gem_context_set_closed(ctx);
363
364         /*
365          * This context will never again be assinged to HW, so we can
366          * reuse its ID for the next context.
367          */
368         release_hw_id(ctx);
369
370         /*
371          * The LUT uses the VMA as a backpointer to unref the object,
372          * so we need to clear the LUT before we close all the VMA (inside
373          * the ppgtt).
374          */
375         lut_close(ctx);
376
377         ctx->file_priv = ERR_PTR(-EBADF);
378         i915_gem_context_put(ctx);
379 }
380
381 static u32 default_desc_template(const struct drm_i915_private *i915,
382                                  const struct i915_hw_ppgtt *ppgtt)
383 {
384         u32 address_mode;
385         u32 desc;
386
387         desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
388
389         address_mode = INTEL_LEGACY_32B_CONTEXT;
390         if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
391                 address_mode = INTEL_LEGACY_64B_CONTEXT;
392         desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
393
394         if (IS_GEN(i915, 8))
395                 desc |= GEN8_CTX_L3LLC_COHERENT;
396
397         /* TODO: WaDisableLiteRestore when we start using semaphore
398          * signalling between Command Streamers
399          * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
400          */
401
402         return desc;
403 }
404
405 static struct i915_gem_context *
406 __create_context(struct drm_i915_private *dev_priv)
407 {
408         struct i915_gem_context *ctx;
409         struct i915_gem_engines *e;
410         int err;
411         int i;
412
413         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
414         if (!ctx)
415                 return ERR_PTR(-ENOMEM);
416
417         kref_init(&ctx->ref);
418         list_add_tail(&ctx->link, &dev_priv->contexts.list);
419         ctx->i915 = dev_priv;
420         ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
421         mutex_init(&ctx->mutex);
422
423         mutex_init(&ctx->engines_mutex);
424         e = default_engines(ctx);
425         if (IS_ERR(e)) {
426                 err = PTR_ERR(e);
427                 goto err_free;
428         }
429         RCU_INIT_POINTER(ctx->engines, e);
430
431         INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
432         INIT_LIST_HEAD(&ctx->handles_list);
433         INIT_LIST_HEAD(&ctx->hw_id_link);
434
435         /* NB: Mark all slices as needing a remap so that when the context first
436          * loads it will restore whatever remap state already exists. If there
437          * is no remap info, it will be a NOP. */
438         ctx->remap_slice = ALL_L3_SLICES(dev_priv);
439
440         i915_gem_context_set_bannable(ctx);
441         i915_gem_context_set_recoverable(ctx);
442
443         ctx->ring_size = 4 * PAGE_SIZE;
444         ctx->desc_template =
445                 default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
446
447         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
448                 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
449
450         return ctx;
451
452 err_free:
453         kfree(ctx);
454         return ERR_PTR(err);
455 }
456
457 static struct i915_hw_ppgtt *
458 __set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
459 {
460         struct i915_hw_ppgtt *old = ctx->ppgtt;
461
462         ctx->ppgtt = i915_ppgtt_get(ppgtt);
463         ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
464
465         return old;
466 }
467
468 static void __assign_ppgtt(struct i915_gem_context *ctx,
469                            struct i915_hw_ppgtt *ppgtt)
470 {
471         if (ppgtt == ctx->ppgtt)
472                 return;
473
474         ppgtt = __set_ppgtt(ctx, ppgtt);
475         if (ppgtt)
476                 i915_ppgtt_put(ppgtt);
477 }
478
479 static struct i915_gem_context *
480 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
481 {
482         struct i915_gem_context *ctx;
483
484         lockdep_assert_held(&dev_priv->drm.struct_mutex);
485
486         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
487             !HAS_EXECLISTS(dev_priv))
488                 return ERR_PTR(-EINVAL);
489
490         /* Reap the most stale context */
491         contexts_free_first(dev_priv);
492
493         ctx = __create_context(dev_priv);
494         if (IS_ERR(ctx))
495                 return ctx;
496
497         if (HAS_FULL_PPGTT(dev_priv)) {
498                 struct i915_hw_ppgtt *ppgtt;
499
500                 ppgtt = i915_ppgtt_create(dev_priv);
501                 if (IS_ERR(ppgtt)) {
502                         DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
503                                          PTR_ERR(ppgtt));
504                         context_close(ctx);
505                         return ERR_CAST(ppgtt);
506                 }
507
508                 __assign_ppgtt(ctx, ppgtt);
509                 i915_ppgtt_put(ppgtt);
510         }
511
512         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
513                 struct i915_timeline *timeline;
514
515                 timeline = i915_timeline_create(dev_priv, NULL);
516                 if (IS_ERR(timeline)) {
517                         context_close(ctx);
518                         return ERR_CAST(timeline);
519                 }
520
521                 ctx->timeline = timeline;
522         }
523
524         trace_i915_context_create(ctx);
525
526         return ctx;
527 }
528
529 /**
530  * i915_gem_context_create_gvt - create a GVT GEM context
531  * @dev: drm device *
532  *
533  * This function is used to create a GVT specific GEM context.
534  *
535  * Returns:
536  * pointer to i915_gem_context on success, error pointer if failed
537  *
538  */
539 struct i915_gem_context *
540 i915_gem_context_create_gvt(struct drm_device *dev)
541 {
542         struct i915_gem_context *ctx;
543         int ret;
544
545         if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
546                 return ERR_PTR(-ENODEV);
547
548         ret = i915_mutex_lock_interruptible(dev);
549         if (ret)
550                 return ERR_PTR(ret);
551
552         ctx = i915_gem_create_context(to_i915(dev), 0);
553         if (IS_ERR(ctx))
554                 goto out;
555
556         ret = i915_gem_context_pin_hw_id(ctx);
557         if (ret) {
558                 context_close(ctx);
559                 ctx = ERR_PTR(ret);
560                 goto out;
561         }
562
563         ctx->file_priv = ERR_PTR(-EBADF);
564         i915_gem_context_set_closed(ctx); /* not user accessible */
565         i915_gem_context_clear_bannable(ctx);
566         i915_gem_context_set_force_single_submission(ctx);
567         if (!USES_GUC_SUBMISSION(to_i915(dev)))
568                 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
569
570         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
571 out:
572         mutex_unlock(&dev->struct_mutex);
573         return ctx;
574 }
575
576 static void
577 destroy_kernel_context(struct i915_gem_context **ctxp)
578 {
579         struct i915_gem_context *ctx;
580
581         /* Keep the context ref so that we can free it immediately ourselves */
582         ctx = i915_gem_context_get(fetch_and_zero(ctxp));
583         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
584
585         context_close(ctx);
586         i915_gem_context_free(ctx);
587 }
588
589 struct i915_gem_context *
590 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
591 {
592         struct i915_gem_context *ctx;
593         int err;
594
595         ctx = i915_gem_create_context(i915, 0);
596         if (IS_ERR(ctx))
597                 return ctx;
598
599         err = i915_gem_context_pin_hw_id(ctx);
600         if (err) {
601                 destroy_kernel_context(&ctx);
602                 return ERR_PTR(err);
603         }
604
605         i915_gem_context_clear_bannable(ctx);
606         ctx->sched.priority = I915_USER_PRIORITY(prio);
607         ctx->ring_size = PAGE_SIZE;
608
609         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
610
611         return ctx;
612 }
613
614 static void init_contexts(struct drm_i915_private *i915)
615 {
616         mutex_init(&i915->contexts.mutex);
617         INIT_LIST_HEAD(&i915->contexts.list);
618
619         /* Using the simple ida interface, the max is limited by sizeof(int) */
620         BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
621         BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
622         ida_init(&i915->contexts.hw_ida);
623         INIT_LIST_HEAD(&i915->contexts.hw_id_list);
624
625         INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
626         init_llist_head(&i915->contexts.free_list);
627 }
628
629 static bool needs_preempt_context(struct drm_i915_private *i915)
630 {
631         return HAS_EXECLISTS(i915);
632 }
633
634 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
635 {
636         struct i915_gem_context *ctx;
637
638         /* Reassure ourselves we are only called once */
639         GEM_BUG_ON(dev_priv->kernel_context);
640         GEM_BUG_ON(dev_priv->preempt_context);
641
642         intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
643         init_contexts(dev_priv);
644
645         /* lowest priority; idle task */
646         ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
647         if (IS_ERR(ctx)) {
648                 DRM_ERROR("Failed to create default global context\n");
649                 return PTR_ERR(ctx);
650         }
651         /*
652          * For easy recognisablity, we want the kernel context to be 0 and then
653          * all user contexts will have non-zero hw_id. Kernel contexts are
654          * permanently pinned, so that we never suffer a stall and can
655          * use them from any allocation context (e.g. for evicting other
656          * contexts and from inside the shrinker).
657          */
658         GEM_BUG_ON(ctx->hw_id);
659         GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
660         dev_priv->kernel_context = ctx;
661
662         /* highest priority; preempting task */
663         if (needs_preempt_context(dev_priv)) {
664                 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
665                 if (!IS_ERR(ctx))
666                         dev_priv->preempt_context = ctx;
667                 else
668                         DRM_ERROR("Failed to create preempt context; disabling preemption\n");
669         }
670
671         DRM_DEBUG_DRIVER("%s context support initialized\n",
672                          DRIVER_CAPS(dev_priv)->has_logical_contexts ?
673                          "logical" : "fake");
674         return 0;
675 }
676
677 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
678 {
679         struct intel_engine_cs *engine;
680         enum intel_engine_id id;
681
682         lockdep_assert_held(&dev_priv->drm.struct_mutex);
683
684         for_each_engine(engine, dev_priv, id)
685                 intel_engine_lost_context(engine);
686 }
687
688 void i915_gem_contexts_fini(struct drm_i915_private *i915)
689 {
690         lockdep_assert_held(&i915->drm.struct_mutex);
691
692         if (i915->preempt_context)
693                 destroy_kernel_context(&i915->preempt_context);
694         destroy_kernel_context(&i915->kernel_context);
695
696         /* Must free all deferred contexts (via flush_workqueue) first */
697         GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
698         ida_destroy(&i915->contexts.hw_ida);
699 }
700
701 static int context_idr_cleanup(int id, void *p, void *data)
702 {
703         context_close(p);
704         return 0;
705 }
706
707 static int vm_idr_cleanup(int id, void *p, void *data)
708 {
709         i915_ppgtt_put(p);
710         return 0;
711 }
712
713 static int gem_context_register(struct i915_gem_context *ctx,
714                                 struct drm_i915_file_private *fpriv)
715 {
716         int ret;
717
718         ctx->file_priv = fpriv;
719         if (ctx->ppgtt)
720                 ctx->ppgtt->vm.file = fpriv;
721
722         ctx->pid = get_task_pid(current, PIDTYPE_PID);
723         ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
724                               current->comm, pid_nr(ctx->pid));
725         if (!ctx->name) {
726                 ret = -ENOMEM;
727                 goto err_pid;
728         }
729
730         /* And finally expose ourselves to userspace via the idr */
731         mutex_lock(&fpriv->context_idr_lock);
732         ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
733         mutex_unlock(&fpriv->context_idr_lock);
734         if (ret >= 0)
735                 goto out;
736
737         kfree(fetch_and_zero(&ctx->name));
738 err_pid:
739         put_pid(fetch_and_zero(&ctx->pid));
740 out:
741         return ret;
742 }
743
744 int i915_gem_context_open(struct drm_i915_private *i915,
745                           struct drm_file *file)
746 {
747         struct drm_i915_file_private *file_priv = file->driver_priv;
748         struct i915_gem_context *ctx;
749         int err;
750
751         mutex_init(&file_priv->context_idr_lock);
752         mutex_init(&file_priv->vm_idr_lock);
753
754         idr_init(&file_priv->context_idr);
755         idr_init_base(&file_priv->vm_idr, 1);
756
757         mutex_lock(&i915->drm.struct_mutex);
758         ctx = i915_gem_create_context(i915, 0);
759         mutex_unlock(&i915->drm.struct_mutex);
760         if (IS_ERR(ctx)) {
761                 err = PTR_ERR(ctx);
762                 goto err;
763         }
764
765         err = gem_context_register(ctx, file_priv);
766         if (err < 0)
767                 goto err_ctx;
768
769         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
770         GEM_BUG_ON(err > 0);
771
772         return 0;
773
774 err_ctx:
775         mutex_lock(&i915->drm.struct_mutex);
776         context_close(ctx);
777         mutex_unlock(&i915->drm.struct_mutex);
778 err:
779         idr_destroy(&file_priv->vm_idr);
780         idr_destroy(&file_priv->context_idr);
781         mutex_destroy(&file_priv->vm_idr_lock);
782         mutex_destroy(&file_priv->context_idr_lock);
783         return err;
784 }
785
786 void i915_gem_context_close(struct drm_file *file)
787 {
788         struct drm_i915_file_private *file_priv = file->driver_priv;
789
790         lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
791
792         idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
793         idr_destroy(&file_priv->context_idr);
794         mutex_destroy(&file_priv->context_idr_lock);
795
796         idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
797         idr_destroy(&file_priv->vm_idr);
798         mutex_destroy(&file_priv->vm_idr_lock);
799 }
800
801 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
802                              struct drm_file *file)
803 {
804         struct drm_i915_private *i915 = to_i915(dev);
805         struct drm_i915_gem_vm_control *args = data;
806         struct drm_i915_file_private *file_priv = file->driver_priv;
807         struct i915_hw_ppgtt *ppgtt;
808         int err;
809
810         if (!HAS_FULL_PPGTT(i915))
811                 return -ENODEV;
812
813         if (args->flags)
814                 return -EINVAL;
815
816         ppgtt = i915_ppgtt_create(i915);
817         if (IS_ERR(ppgtt))
818                 return PTR_ERR(ppgtt);
819
820         ppgtt->vm.file = file_priv;
821
822         if (args->extensions) {
823                 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
824                                            NULL, 0,
825                                            ppgtt);
826                 if (err)
827                         goto err_put;
828         }
829
830         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
831         if (err)
832                 goto err_put;
833
834         err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
835         if (err < 0)
836                 goto err_unlock;
837
838         GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
839
840         mutex_unlock(&file_priv->vm_idr_lock);
841
842         args->vm_id = err;
843         return 0;
844
845 err_unlock:
846         mutex_unlock(&file_priv->vm_idr_lock);
847 err_put:
848         i915_ppgtt_put(ppgtt);
849         return err;
850 }
851
852 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
853                               struct drm_file *file)
854 {
855         struct drm_i915_file_private *file_priv = file->driver_priv;
856         struct drm_i915_gem_vm_control *args = data;
857         struct i915_hw_ppgtt *ppgtt;
858         int err;
859         u32 id;
860
861         if (args->flags)
862                 return -EINVAL;
863
864         if (args->extensions)
865                 return -EINVAL;
866
867         id = args->vm_id;
868         if (!id)
869                 return -ENOENT;
870
871         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
872         if (err)
873                 return err;
874
875         ppgtt = idr_remove(&file_priv->vm_idr, id);
876
877         mutex_unlock(&file_priv->vm_idr_lock);
878         if (!ppgtt)
879                 return -ENOENT;
880
881         i915_ppgtt_put(ppgtt);
882         return 0;
883 }
884
885 struct context_barrier_task {
886         struct i915_active base;
887         void (*task)(void *data);
888         void *data;
889 };
890
891 static void cb_retire(struct i915_active *base)
892 {
893         struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
894
895         if (cb->task)
896                 cb->task(cb->data);
897
898         i915_active_fini(&cb->base);
899         kfree(cb);
900 }
901
902 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
903 static int context_barrier_task(struct i915_gem_context *ctx,
904                                 intel_engine_mask_t engines,
905                                 int (*emit)(struct i915_request *rq, void *data),
906                                 void (*task)(void *data),
907                                 void *data)
908 {
909         struct drm_i915_private *i915 = ctx->i915;
910         struct context_barrier_task *cb;
911         struct i915_gem_engines_iter it;
912         struct intel_context *ce;
913         int err = 0;
914
915         lockdep_assert_held(&i915->drm.struct_mutex);
916         GEM_BUG_ON(!task);
917
918         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
919         if (!cb)
920                 return -ENOMEM;
921
922         i915_active_init(i915, &cb->base, cb_retire);
923         i915_active_acquire(&cb->base);
924
925         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
926                 struct i915_request *rq;
927
928                 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
929                                        ce->engine->mask)) {
930                         err = -ENXIO;
931                         break;
932                 }
933
934                 if (!(ce->engine->mask & engines) || !ce->state)
935                         continue;
936
937                 rq = intel_context_create_request(ce);
938                 if (IS_ERR(rq)) {
939                         err = PTR_ERR(rq);
940                         break;
941                 }
942
943                 err = 0;
944                 if (emit)
945                         err = emit(rq, data);
946                 if (err == 0)
947                         err = i915_active_ref(&cb->base, rq->fence.context, rq);
948
949                 i915_request_add(rq);
950                 if (err)
951                         break;
952         }
953         i915_gem_context_unlock_engines(ctx);
954
955         cb->task = err ? NULL : task; /* caller needs to unwind instead */
956         cb->data = data;
957
958         i915_active_release(&cb->base);
959
960         return err;
961 }
962
963 static int get_ppgtt(struct drm_i915_file_private *file_priv,
964                      struct i915_gem_context *ctx,
965                      struct drm_i915_gem_context_param *args)
966 {
967         struct i915_hw_ppgtt *ppgtt;
968         int ret;
969
970         if (!ctx->ppgtt)
971                 return -ENODEV;
972
973         /* XXX rcu acquire? */
974         ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
975         if (ret)
976                 return ret;
977
978         ppgtt = i915_ppgtt_get(ctx->ppgtt);
979         mutex_unlock(&ctx->i915->drm.struct_mutex);
980
981         ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
982         if (ret)
983                 goto err_put;
984
985         ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
986         GEM_BUG_ON(!ret);
987         if (ret < 0)
988                 goto err_unlock;
989
990         i915_ppgtt_get(ppgtt);
991
992         args->size = 0;
993         args->value = ret;
994
995         ret = 0;
996 err_unlock:
997         mutex_unlock(&file_priv->vm_idr_lock);
998 err_put:
999         i915_ppgtt_put(ppgtt);
1000         return ret;
1001 }
1002
1003 static void set_ppgtt_barrier(void *data)
1004 {
1005         struct i915_hw_ppgtt *old = data;
1006
1007         if (INTEL_GEN(old->vm.i915) < 8)
1008                 gen6_ppgtt_unpin_all(old);
1009
1010         i915_ppgtt_put(old);
1011 }
1012
1013 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1014 {
1015         struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
1016         struct intel_engine_cs *engine = rq->engine;
1017         u32 base = engine->mmio_base;
1018         u32 *cs;
1019         int i;
1020
1021         if (i915_vm_is_4lvl(&ppgtt->vm)) {
1022                 const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
1023
1024                 cs = intel_ring_begin(rq, 6);
1025                 if (IS_ERR(cs))
1026                         return PTR_ERR(cs);
1027
1028                 *cs++ = MI_LOAD_REGISTER_IMM(2);
1029
1030                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1031                 *cs++ = upper_32_bits(pd_daddr);
1032                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1033                 *cs++ = lower_32_bits(pd_daddr);
1034
1035                 *cs++ = MI_NOOP;
1036                 intel_ring_advance(rq, cs);
1037         } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1038                 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1039                 if (IS_ERR(cs))
1040                         return PTR_ERR(cs);
1041
1042                 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1043                 for (i = GEN8_3LVL_PDPES; i--; ) {
1044                         const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1045
1046                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1047                         *cs++ = upper_32_bits(pd_daddr);
1048                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1049                         *cs++ = lower_32_bits(pd_daddr);
1050                 }
1051                 *cs++ = MI_NOOP;
1052                 intel_ring_advance(rq, cs);
1053         } else {
1054                 /* ppGTT is not part of the legacy context image */
1055                 gen6_ppgtt_pin(ppgtt);
1056         }
1057
1058         return 0;
1059 }
1060
1061 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1062                      struct i915_gem_context *ctx,
1063                      struct drm_i915_gem_context_param *args)
1064 {
1065         struct i915_hw_ppgtt *ppgtt, *old;
1066         int err;
1067
1068         if (args->size)
1069                 return -EINVAL;
1070
1071         if (!ctx->ppgtt)
1072                 return -ENODEV;
1073
1074         if (upper_32_bits(args->value))
1075                 return -ENOENT;
1076
1077         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1078         if (err)
1079                 return err;
1080
1081         ppgtt = idr_find(&file_priv->vm_idr, args->value);
1082         if (ppgtt)
1083                 i915_ppgtt_get(ppgtt);
1084         mutex_unlock(&file_priv->vm_idr_lock);
1085         if (!ppgtt)
1086                 return -ENOENT;
1087
1088         err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1089         if (err)
1090                 goto out;
1091
1092         if (ppgtt == ctx->ppgtt)
1093                 goto unlock;
1094
1095         /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1096         lut_close(ctx);
1097
1098         old = __set_ppgtt(ctx, ppgtt);
1099
1100         /*
1101          * We need to flush any requests using the current ppgtt before
1102          * we release it as the requests do not hold a reference themselves,
1103          * only indirectly through the context.
1104          */
1105         err = context_barrier_task(ctx, ALL_ENGINES,
1106                                    emit_ppgtt_update,
1107                                    set_ppgtt_barrier,
1108                                    old);
1109         if (err) {
1110                 ctx->ppgtt = old;
1111                 ctx->desc_template = default_desc_template(ctx->i915, old);
1112                 i915_ppgtt_put(ppgtt);
1113         }
1114
1115 unlock:
1116         mutex_unlock(&ctx->i915->drm.struct_mutex);
1117
1118 out:
1119         i915_ppgtt_put(ppgtt);
1120         return err;
1121 }
1122
1123 static int gen8_emit_rpcs_config(struct i915_request *rq,
1124                                  struct intel_context *ce,
1125                                  struct intel_sseu sseu)
1126 {
1127         u64 offset;
1128         u32 *cs;
1129
1130         cs = intel_ring_begin(rq, 4);
1131         if (IS_ERR(cs))
1132                 return PTR_ERR(cs);
1133
1134         offset = i915_ggtt_offset(ce->state) +
1135                  LRC_STATE_PN * PAGE_SIZE +
1136                  (CTX_R_PWR_CLK_STATE + 1) * 4;
1137
1138         *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1139         *cs++ = lower_32_bits(offset);
1140         *cs++ = upper_32_bits(offset);
1141         *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1142
1143         intel_ring_advance(rq, cs);
1144
1145         return 0;
1146 }
1147
1148 static int
1149 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1150 {
1151         struct i915_request *rq;
1152         int ret;
1153
1154         lockdep_assert_held(&ce->pin_mutex);
1155
1156         /*
1157          * If the context is not idle, we have to submit an ordered request to
1158          * modify its context image via the kernel context (writing to our own
1159          * image, or into the registers directory, does not stick). Pristine
1160          * and idle contexts will be configured on pinning.
1161          */
1162         if (!intel_context_is_pinned(ce))
1163                 return 0;
1164
1165         rq = i915_request_create(ce->engine->kernel_context);
1166         if (IS_ERR(rq))
1167                 return PTR_ERR(rq);
1168
1169         /* Queue this switch after all other activity by this context. */
1170         ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
1171         if (ret)
1172                 goto out_add;
1173
1174         ret = gen8_emit_rpcs_config(rq, ce, sseu);
1175         if (ret)
1176                 goto out_add;
1177
1178         /*
1179          * Guarantee context image and the timeline remains pinned until the
1180          * modifying request is retired by setting the ce activity tracker.
1181          *
1182          * But we only need to take one pin on the account of it. Or in other
1183          * words transfer the pinned ce object to tracked active request.
1184          */
1185         if (!i915_active_request_isset(&ce->active_tracker))
1186                 __intel_context_pin(ce);
1187         __i915_active_request_set(&ce->active_tracker, rq);
1188
1189 out_add:
1190         i915_request_add(rq);
1191         return ret;
1192 }
1193
1194 static int
1195 __intel_context_reconfigure_sseu(struct intel_context *ce,
1196                                  struct intel_sseu sseu)
1197 {
1198         int ret;
1199
1200         GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
1201
1202         ret = intel_context_lock_pinned(ce);
1203         if (ret)
1204                 return ret;
1205
1206         /* Nothing to do if unmodified. */
1207         if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1208                 goto unlock;
1209
1210         ret = gen8_modify_rpcs(ce, sseu);
1211         if (!ret)
1212                 ce->sseu = sseu;
1213
1214 unlock:
1215         intel_context_unlock_pinned(ce);
1216         return ret;
1217 }
1218
1219 static int
1220 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1221 {
1222         struct drm_i915_private *i915 = ce->gem_context->i915;
1223         int ret;
1224
1225         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1226         if (ret)
1227                 return ret;
1228
1229         ret = __intel_context_reconfigure_sseu(ce, sseu);
1230
1231         mutex_unlock(&i915->drm.struct_mutex);
1232
1233         return ret;
1234 }
1235
1236 static int
1237 user_to_context_sseu(struct drm_i915_private *i915,
1238                      const struct drm_i915_gem_context_param_sseu *user,
1239                      struct intel_sseu *context)
1240 {
1241         const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1242
1243         /* No zeros in any field. */
1244         if (!user->slice_mask || !user->subslice_mask ||
1245             !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1246                 return -EINVAL;
1247
1248         /* Max > min. */
1249         if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1250                 return -EINVAL;
1251
1252         /*
1253          * Some future proofing on the types since the uAPI is wider than the
1254          * current internal implementation.
1255          */
1256         if (overflows_type(user->slice_mask, context->slice_mask) ||
1257             overflows_type(user->subslice_mask, context->subslice_mask) ||
1258             overflows_type(user->min_eus_per_subslice,
1259                            context->min_eus_per_subslice) ||
1260             overflows_type(user->max_eus_per_subslice,
1261                            context->max_eus_per_subslice))
1262                 return -EINVAL;
1263
1264         /* Check validity against hardware. */
1265         if (user->slice_mask & ~device->slice_mask)
1266                 return -EINVAL;
1267
1268         if (user->subslice_mask & ~device->subslice_mask[0])
1269                 return -EINVAL;
1270
1271         if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1272                 return -EINVAL;
1273
1274         context->slice_mask = user->slice_mask;
1275         context->subslice_mask = user->subslice_mask;
1276         context->min_eus_per_subslice = user->min_eus_per_subslice;
1277         context->max_eus_per_subslice = user->max_eus_per_subslice;
1278
1279         /* Part specific restrictions. */
1280         if (IS_GEN(i915, 11)) {
1281                 unsigned int hw_s = hweight8(device->slice_mask);
1282                 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1283                 unsigned int req_s = hweight8(context->slice_mask);
1284                 unsigned int req_ss = hweight8(context->subslice_mask);
1285
1286                 /*
1287                  * Only full subslice enablement is possible if more than one
1288                  * slice is turned on.
1289                  */
1290                 if (req_s > 1 && req_ss != hw_ss_per_s)
1291                         return -EINVAL;
1292
1293                 /*
1294                  * If more than four (SScount bitfield limit) subslices are
1295                  * requested then the number has to be even.
1296                  */
1297                 if (req_ss > 4 && (req_ss & 1))
1298                         return -EINVAL;
1299
1300                 /*
1301                  * If only one slice is enabled and subslice count is below the
1302                  * device full enablement, it must be at most half of the all
1303                  * available subslices.
1304                  */
1305                 if (req_s == 1 && req_ss < hw_ss_per_s &&
1306                     req_ss > (hw_ss_per_s / 2))
1307                         return -EINVAL;
1308
1309                 /* ABI restriction - VME use case only. */
1310
1311                 /* All slices or one slice only. */
1312                 if (req_s != 1 && req_s != hw_s)
1313                         return -EINVAL;
1314
1315                 /*
1316                  * Half subslices or full enablement only when one slice is
1317                  * enabled.
1318                  */
1319                 if (req_s == 1 &&
1320                     (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1321                         return -EINVAL;
1322
1323                 /* No EU configuration changes. */
1324                 if ((user->min_eus_per_subslice !=
1325                      device->max_eus_per_subslice) ||
1326                     (user->max_eus_per_subslice !=
1327                      device->max_eus_per_subslice))
1328                         return -EINVAL;
1329         }
1330
1331         return 0;
1332 }
1333
1334 static int set_sseu(struct i915_gem_context *ctx,
1335                     struct drm_i915_gem_context_param *args)
1336 {
1337         struct drm_i915_private *i915 = ctx->i915;
1338         struct drm_i915_gem_context_param_sseu user_sseu;
1339         struct intel_context *ce;
1340         struct intel_sseu sseu;
1341         unsigned long lookup;
1342         int ret;
1343
1344         if (args->size < sizeof(user_sseu))
1345                 return -EINVAL;
1346
1347         if (!IS_GEN(i915, 11))
1348                 return -ENODEV;
1349
1350         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1351                            sizeof(user_sseu)))
1352                 return -EFAULT;
1353
1354         if (user_sseu.rsvd)
1355                 return -EINVAL;
1356
1357         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1358                 return -EINVAL;
1359
1360         lookup = 0;
1361         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1362                 lookup |= LOOKUP_USER_INDEX;
1363
1364         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1365         if (IS_ERR(ce))
1366                 return PTR_ERR(ce);
1367
1368         /* Only render engine supports RPCS configuration. */
1369         if (ce->engine->class != RENDER_CLASS) {
1370                 ret = -ENODEV;
1371                 goto out_ce;
1372         }
1373
1374         ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1375         if (ret)
1376                 goto out_ce;
1377
1378         ret = intel_context_reconfigure_sseu(ce, sseu);
1379         if (ret)
1380                 goto out_ce;
1381
1382         args->size = sizeof(user_sseu);
1383
1384 out_ce:
1385         intel_context_put(ce);
1386         return ret;
1387 }
1388
1389 struct set_engines {
1390         struct i915_gem_context *ctx;
1391         struct i915_gem_engines *engines;
1392 };
1393
1394 static int
1395 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1396 {
1397         struct i915_context_engines_load_balance __user *ext =
1398                 container_of_user(base, typeof(*ext), base);
1399         const struct set_engines *set = data;
1400         struct intel_engine_cs *stack[16];
1401         struct intel_engine_cs **siblings;
1402         struct intel_context *ce;
1403         u16 num_siblings, idx;
1404         unsigned int n;
1405         int err;
1406
1407         if (!HAS_EXECLISTS(set->ctx->i915))
1408                 return -ENODEV;
1409
1410         if (USES_GUC_SUBMISSION(set->ctx->i915))
1411                 return -ENODEV; /* not implement yet */
1412
1413         if (get_user(idx, &ext->engine_index))
1414                 return -EFAULT;
1415
1416         if (idx >= set->engines->num_engines) {
1417                 DRM_DEBUG("Invalid placement value, %d >= %d\n",
1418                           idx, set->engines->num_engines);
1419                 return -EINVAL;
1420         }
1421
1422         idx = array_index_nospec(idx, set->engines->num_engines);
1423         if (set->engines->engines[idx]) {
1424                 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1425                 return -EEXIST;
1426         }
1427
1428         if (get_user(num_siblings, &ext->num_siblings))
1429                 return -EFAULT;
1430
1431         err = check_user_mbz(&ext->flags);
1432         if (err)
1433                 return err;
1434
1435         err = check_user_mbz(&ext->mbz64);
1436         if (err)
1437                 return err;
1438
1439         siblings = stack;
1440         if (num_siblings > ARRAY_SIZE(stack)) {
1441                 siblings = kmalloc_array(num_siblings,
1442                                          sizeof(*siblings),
1443                                          GFP_KERNEL);
1444                 if (!siblings)
1445                         return -ENOMEM;
1446         }
1447
1448         for (n = 0; n < num_siblings; n++) {
1449                 struct i915_engine_class_instance ci;
1450
1451                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1452                         err = -EFAULT;
1453                         goto out_siblings;
1454                 }
1455
1456                 siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1457                                                        ci.engine_class,
1458                                                        ci.engine_instance);
1459                 if (!siblings[n]) {
1460                         DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1461                                   n, ci.engine_class, ci.engine_instance);
1462                         err = -EINVAL;
1463                         goto out_siblings;
1464                 }
1465         }
1466
1467         ce = intel_execlists_create_virtual(set->ctx, siblings, n);
1468         if (IS_ERR(ce)) {
1469                 err = PTR_ERR(ce);
1470                 goto out_siblings;
1471         }
1472
1473         if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1474                 intel_context_put(ce);
1475                 err = -EEXIST;
1476                 goto out_siblings;
1477         }
1478
1479 out_siblings:
1480         if (siblings != stack)
1481                 kfree(siblings);
1482
1483         return err;
1484 }
1485
1486 static int
1487 set_engines__bond(struct i915_user_extension __user *base, void *data)
1488 {
1489         struct i915_context_engines_bond __user *ext =
1490                 container_of_user(base, typeof(*ext), base);
1491         const struct set_engines *set = data;
1492         struct i915_engine_class_instance ci;
1493         struct intel_engine_cs *virtual;
1494         struct intel_engine_cs *master;
1495         u16 idx, num_bonds;
1496         int err, n;
1497
1498         if (get_user(idx, &ext->virtual_index))
1499                 return -EFAULT;
1500
1501         if (idx >= set->engines->num_engines) {
1502                 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1503                           idx, set->engines->num_engines);
1504                 return -EINVAL;
1505         }
1506
1507         idx = array_index_nospec(idx, set->engines->num_engines);
1508         if (!set->engines->engines[idx]) {
1509                 DRM_DEBUG("Invalid engine at %d\n", idx);
1510                 return -EINVAL;
1511         }
1512         virtual = set->engines->engines[idx]->engine;
1513
1514         err = check_user_mbz(&ext->flags);
1515         if (err)
1516                 return err;
1517
1518         for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1519                 err = check_user_mbz(&ext->mbz64[n]);
1520                 if (err)
1521                         return err;
1522         }
1523
1524         if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1525                 return -EFAULT;
1526
1527         master = intel_engine_lookup_user(set->ctx->i915,
1528                                           ci.engine_class, ci.engine_instance);
1529         if (!master) {
1530                 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1531                           ci.engine_class, ci.engine_instance);
1532                 return -EINVAL;
1533         }
1534
1535         if (get_user(num_bonds, &ext->num_bonds))
1536                 return -EFAULT;
1537
1538         for (n = 0; n < num_bonds; n++) {
1539                 struct intel_engine_cs *bond;
1540
1541                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1542                         return -EFAULT;
1543
1544                 bond = intel_engine_lookup_user(set->ctx->i915,
1545                                                 ci.engine_class,
1546                                                 ci.engine_instance);
1547                 if (!bond) {
1548                         DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1549                                   n, ci.engine_class, ci.engine_instance);
1550                         return -EINVAL;
1551                 }
1552
1553                 /*
1554                  * A non-virtual engine has no siblings to choose between; and
1555                  * a submit fence will always be directed to the one engine.
1556                  */
1557                 if (intel_engine_is_virtual(virtual)) {
1558                         err = intel_virtual_engine_attach_bond(virtual,
1559                                                                master,
1560                                                                bond);
1561                         if (err)
1562                                 return err;
1563                 }
1564         }
1565
1566         return 0;
1567 }
1568
1569 static const i915_user_extension_fn set_engines__extensions[] = {
1570         [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1571         [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1572 };
1573
1574 static int
1575 set_engines(struct i915_gem_context *ctx,
1576             const struct drm_i915_gem_context_param *args)
1577 {
1578         struct i915_context_param_engines __user *user =
1579                 u64_to_user_ptr(args->value);
1580         struct set_engines set = { .ctx = ctx };
1581         unsigned int num_engines, n;
1582         u64 extensions;
1583         int err;
1584
1585         if (!args->size) { /* switch back to legacy user_ring_map */
1586                 if (!i915_gem_context_user_engines(ctx))
1587                         return 0;
1588
1589                 set.engines = default_engines(ctx);
1590                 if (IS_ERR(set.engines))
1591                         return PTR_ERR(set.engines);
1592
1593                 goto replace;
1594         }
1595
1596         BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1597         if (args->size < sizeof(*user) ||
1598             !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1599                 DRM_DEBUG("Invalid size for engine array: %d\n",
1600                           args->size);
1601                 return -EINVAL;
1602         }
1603
1604         /*
1605          * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1606          * first 64 engines defined here.
1607          */
1608         num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1609
1610         set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1611                               GFP_KERNEL);
1612         if (!set.engines)
1613                 return -ENOMEM;
1614
1615         set.engines->i915 = ctx->i915;
1616         for (n = 0; n < num_engines; n++) {
1617                 struct i915_engine_class_instance ci;
1618                 struct intel_engine_cs *engine;
1619
1620                 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1621                         __free_engines(set.engines, n);
1622                         return -EFAULT;
1623                 }
1624
1625                 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1626                     ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1627                         set.engines->engines[n] = NULL;
1628                         continue;
1629                 }
1630
1631                 engine = intel_engine_lookup_user(ctx->i915,
1632                                                   ci.engine_class,
1633                                                   ci.engine_instance);
1634                 if (!engine) {
1635                         DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1636                                   n, ci.engine_class, ci.engine_instance);
1637                         __free_engines(set.engines, n);
1638                         return -ENOENT;
1639                 }
1640
1641                 set.engines->engines[n] = intel_context_create(ctx, engine);
1642                 if (!set.engines->engines[n]) {
1643                         __free_engines(set.engines, n);
1644                         return -ENOMEM;
1645                 }
1646         }
1647         set.engines->num_engines = num_engines;
1648
1649         err = -EFAULT;
1650         if (!get_user(extensions, &user->extensions))
1651                 err = i915_user_extensions(u64_to_user_ptr(extensions),
1652                                            set_engines__extensions,
1653                                            ARRAY_SIZE(set_engines__extensions),
1654                                            &set);
1655         if (err) {
1656                 free_engines(set.engines);
1657                 return err;
1658         }
1659
1660 replace:
1661         mutex_lock(&ctx->engines_mutex);
1662         if (args->size)
1663                 i915_gem_context_set_user_engines(ctx);
1664         else
1665                 i915_gem_context_clear_user_engines(ctx);
1666         rcu_swap_protected(ctx->engines, set.engines, 1);
1667         mutex_unlock(&ctx->engines_mutex);
1668
1669         INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu);
1670         queue_rcu_work(system_wq, &set.engines->rcu);
1671
1672         return 0;
1673 }
1674
1675 static struct i915_gem_engines *
1676 __copy_engines(struct i915_gem_engines *e)
1677 {
1678         struct i915_gem_engines *copy;
1679         unsigned int n;
1680
1681         copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1682         if (!copy)
1683                 return ERR_PTR(-ENOMEM);
1684
1685         copy->i915 = e->i915;
1686         for (n = 0; n < e->num_engines; n++) {
1687                 if (e->engines[n])
1688                         copy->engines[n] = intel_context_get(e->engines[n]);
1689                 else
1690                         copy->engines[n] = NULL;
1691         }
1692         copy->num_engines = n;
1693
1694         return copy;
1695 }
1696
1697 static int
1698 get_engines(struct i915_gem_context *ctx,
1699             struct drm_i915_gem_context_param *args)
1700 {
1701         struct i915_context_param_engines __user *user;
1702         struct i915_gem_engines *e;
1703         size_t n, count, size;
1704         int err = 0;
1705
1706         err = mutex_lock_interruptible(&ctx->engines_mutex);
1707         if (err)
1708                 return err;
1709
1710         e = NULL;
1711         if (i915_gem_context_user_engines(ctx))
1712                 e = __copy_engines(i915_gem_context_engines(ctx));
1713         mutex_unlock(&ctx->engines_mutex);
1714         if (IS_ERR_OR_NULL(e)) {
1715                 args->size = 0;
1716                 return PTR_ERR_OR_ZERO(e);
1717         }
1718
1719         count = e->num_engines;
1720
1721         /* Be paranoid in case we have an impedance mismatch */
1722         if (!check_struct_size(user, engines, count, &size)) {
1723                 err = -EINVAL;
1724                 goto err_free;
1725         }
1726         if (overflows_type(size, args->size)) {
1727                 err = -EINVAL;
1728                 goto err_free;
1729         }
1730
1731         if (!args->size) {
1732                 args->size = size;
1733                 goto err_free;
1734         }
1735
1736         if (args->size < size) {
1737                 err = -EINVAL;
1738                 goto err_free;
1739         }
1740
1741         user = u64_to_user_ptr(args->value);
1742         if (!access_ok(user, size)) {
1743                 err = -EFAULT;
1744                 goto err_free;
1745         }
1746
1747         if (put_user(0, &user->extensions)) {
1748                 err = -EFAULT;
1749                 goto err_free;
1750         }
1751
1752         for (n = 0; n < count; n++) {
1753                 struct i915_engine_class_instance ci = {
1754                         .engine_class = I915_ENGINE_CLASS_INVALID,
1755                         .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1756                 };
1757
1758                 if (e->engines[n]) {
1759                         ci.engine_class = e->engines[n]->engine->uabi_class;
1760                         ci.engine_instance = e->engines[n]->engine->instance;
1761                 }
1762
1763                 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1764                         err = -EFAULT;
1765                         goto err_free;
1766                 }
1767         }
1768
1769         args->size = size;
1770
1771 err_free:
1772         INIT_RCU_WORK(&e->rcu, free_engines_rcu);
1773         queue_rcu_work(system_wq, &e->rcu);
1774         return err;
1775 }
1776
1777 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1778                         struct i915_gem_context *ctx,
1779                         struct drm_i915_gem_context_param *args)
1780 {
1781         int ret = 0;
1782
1783         switch (args->param) {
1784         case I915_CONTEXT_PARAM_NO_ZEROMAP:
1785                 if (args->size)
1786                         ret = -EINVAL;
1787                 else if (args->value)
1788                         set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1789                 else
1790                         clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1791                 break;
1792
1793         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1794                 if (args->size)
1795                         ret = -EINVAL;
1796                 else if (args->value)
1797                         i915_gem_context_set_no_error_capture(ctx);
1798                 else
1799                         i915_gem_context_clear_no_error_capture(ctx);
1800                 break;
1801
1802         case I915_CONTEXT_PARAM_BANNABLE:
1803                 if (args->size)
1804                         ret = -EINVAL;
1805                 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1806                         ret = -EPERM;
1807                 else if (args->value)
1808                         i915_gem_context_set_bannable(ctx);
1809                 else
1810                         i915_gem_context_clear_bannable(ctx);
1811                 break;
1812
1813         case I915_CONTEXT_PARAM_RECOVERABLE:
1814                 if (args->size)
1815                         ret = -EINVAL;
1816                 else if (args->value)
1817                         i915_gem_context_set_recoverable(ctx);
1818                 else
1819                         i915_gem_context_clear_recoverable(ctx);
1820                 break;
1821
1822         case I915_CONTEXT_PARAM_PRIORITY:
1823                 {
1824                         s64 priority = args->value;
1825
1826                         if (args->size)
1827                                 ret = -EINVAL;
1828                         else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1829                                 ret = -ENODEV;
1830                         else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1831                                  priority < I915_CONTEXT_MIN_USER_PRIORITY)
1832                                 ret = -EINVAL;
1833                         else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1834                                  !capable(CAP_SYS_NICE))
1835                                 ret = -EPERM;
1836                         else
1837                                 ctx->sched.priority =
1838                                         I915_USER_PRIORITY(priority);
1839                 }
1840                 break;
1841
1842         case I915_CONTEXT_PARAM_SSEU:
1843                 ret = set_sseu(ctx, args);
1844                 break;
1845
1846         case I915_CONTEXT_PARAM_VM:
1847                 ret = set_ppgtt(fpriv, ctx, args);
1848                 break;
1849
1850         case I915_CONTEXT_PARAM_ENGINES:
1851                 ret = set_engines(ctx, args);
1852                 break;
1853
1854         case I915_CONTEXT_PARAM_BAN_PERIOD:
1855         default:
1856                 ret = -EINVAL;
1857                 break;
1858         }
1859
1860         return ret;
1861 }
1862
1863 struct create_ext {
1864         struct i915_gem_context *ctx;
1865         struct drm_i915_file_private *fpriv;
1866 };
1867
1868 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1869 {
1870         struct drm_i915_gem_context_create_ext_setparam local;
1871         const struct create_ext *arg = data;
1872
1873         if (copy_from_user(&local, ext, sizeof(local)))
1874                 return -EFAULT;
1875
1876         if (local.param.ctx_id)
1877                 return -EINVAL;
1878
1879         return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1880 }
1881
1882 static int clone_engines(struct i915_gem_context *dst,
1883                          struct i915_gem_context *src)
1884 {
1885         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1886         struct i915_gem_engines *clone;
1887         bool user_engines;
1888         unsigned long n;
1889
1890         clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1891         if (!clone)
1892                 goto err_unlock;
1893
1894         clone->i915 = dst->i915;
1895         for (n = 0; n < e->num_engines; n++) {
1896                 struct intel_engine_cs *engine;
1897
1898                 if (!e->engines[n]) {
1899                         clone->engines[n] = NULL;
1900                         continue;
1901                 }
1902                 engine = e->engines[n]->engine;
1903
1904                 /*
1905                  * Virtual engines are singletons; they can only exist
1906                  * inside a single context, because they embed their
1907                  * HW context... As each virtual context implies a single
1908                  * timeline (each engine can only dequeue a single request
1909                  * at any time), it would be surprising for two contexts
1910                  * to use the same engine. So let's create a copy of
1911                  * the virtual engine instead.
1912                  */
1913                 if (intel_engine_is_virtual(engine))
1914                         clone->engines[n] =
1915                                 intel_execlists_clone_virtual(dst, engine);
1916                 else
1917                         clone->engines[n] = intel_context_create(dst, engine);
1918                 if (IS_ERR_OR_NULL(clone->engines[n])) {
1919                         __free_engines(clone, n);
1920                         goto err_unlock;
1921                 }
1922         }
1923         clone->num_engines = n;
1924
1925         user_engines = i915_gem_context_user_engines(src);
1926         i915_gem_context_unlock_engines(src);
1927
1928         free_engines(dst->engines);
1929         RCU_INIT_POINTER(dst->engines, clone);
1930         if (user_engines)
1931                 i915_gem_context_set_user_engines(dst);
1932         else
1933                 i915_gem_context_clear_user_engines(dst);
1934         return 0;
1935
1936 err_unlock:
1937         i915_gem_context_unlock_engines(src);
1938         return -ENOMEM;
1939 }
1940
1941 static int clone_flags(struct i915_gem_context *dst,
1942                        struct i915_gem_context *src)
1943 {
1944         dst->user_flags = src->user_flags;
1945         return 0;
1946 }
1947
1948 static int clone_schedattr(struct i915_gem_context *dst,
1949                            struct i915_gem_context *src)
1950 {
1951         dst->sched = src->sched;
1952         return 0;
1953 }
1954
1955 static int clone_sseu(struct i915_gem_context *dst,
1956                       struct i915_gem_context *src)
1957 {
1958         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1959         struct i915_gem_engines *clone;
1960         unsigned long n;
1961         int err;
1962
1963         clone = dst->engines; /* no locking required; sole access */
1964         if (e->num_engines != clone->num_engines) {
1965                 err = -EINVAL;
1966                 goto unlock;
1967         }
1968
1969         for (n = 0; n < e->num_engines; n++) {
1970                 struct intel_context *ce = e->engines[n];
1971
1972                 if (clone->engines[n]->engine->class != ce->engine->class) {
1973                         /* Must have compatible engine maps! */
1974                         err = -EINVAL;
1975                         goto unlock;
1976                 }
1977
1978                 /* serialises with set_sseu */
1979                 err = intel_context_lock_pinned(ce);
1980                 if (err)
1981                         goto unlock;
1982
1983                 clone->engines[n]->sseu = ce->sseu;
1984                 intel_context_unlock_pinned(ce);
1985         }
1986
1987         err = 0;
1988 unlock:
1989         i915_gem_context_unlock_engines(src);
1990         return err;
1991 }
1992
1993 static int clone_timeline(struct i915_gem_context *dst,
1994                           struct i915_gem_context *src)
1995 {
1996         if (src->timeline) {
1997                 GEM_BUG_ON(src->timeline == dst->timeline);
1998
1999                 if (dst->timeline)
2000                         i915_timeline_put(dst->timeline);
2001                 dst->timeline = i915_timeline_get(src->timeline);
2002         }
2003
2004         return 0;
2005 }
2006
2007 static int clone_vm(struct i915_gem_context *dst,
2008                     struct i915_gem_context *src)
2009 {
2010         struct i915_hw_ppgtt *ppgtt;
2011
2012         rcu_read_lock();
2013         do {
2014                 ppgtt = READ_ONCE(src->ppgtt);
2015                 if (!ppgtt)
2016                         break;
2017
2018                 if (!kref_get_unless_zero(&ppgtt->ref))
2019                         continue;
2020
2021                 /*
2022                  * This ppgtt may have be reallocated between
2023                  * the read and the kref, and reassigned to a third
2024                  * context. In order to avoid inadvertent sharing
2025                  * of this ppgtt with that third context (and not
2026                  * src), we have to confirm that we have the same
2027                  * ppgtt after passing through the strong memory
2028                  * barrier implied by a successful
2029                  * kref_get_unless_zero().
2030                  *
2031                  * Once we have acquired the current ppgtt of src,
2032                  * we no longer care if it is released from src, as
2033                  * it cannot be reallocated elsewhere.
2034                  */
2035
2036                 if (ppgtt == READ_ONCE(src->ppgtt))
2037                         break;
2038
2039                 i915_ppgtt_put(ppgtt);
2040         } while (1);
2041         rcu_read_unlock();
2042
2043         if (ppgtt) {
2044                 __assign_ppgtt(dst, ppgtt);
2045                 i915_ppgtt_put(ppgtt);
2046         }
2047
2048         return 0;
2049 }
2050
2051 static int create_clone(struct i915_user_extension __user *ext, void *data)
2052 {
2053         static int (* const fn[])(struct i915_gem_context *dst,
2054                                   struct i915_gem_context *src) = {
2055 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2056                 MAP(ENGINES, clone_engines),
2057                 MAP(FLAGS, clone_flags),
2058                 MAP(SCHEDATTR, clone_schedattr),
2059                 MAP(SSEU, clone_sseu),
2060                 MAP(TIMELINE, clone_timeline),
2061                 MAP(VM, clone_vm),
2062 #undef MAP
2063         };
2064         struct drm_i915_gem_context_create_ext_clone local;
2065         const struct create_ext *arg = data;
2066         struct i915_gem_context *dst = arg->ctx;
2067         struct i915_gem_context *src;
2068         int err, bit;
2069
2070         if (copy_from_user(&local, ext, sizeof(local)))
2071                 return -EFAULT;
2072
2073         BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2074                      I915_CONTEXT_CLONE_UNKNOWN);
2075
2076         if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2077                 return -EINVAL;
2078
2079         if (local.rsvd)
2080                 return -EINVAL;
2081
2082         rcu_read_lock();
2083         src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2084         rcu_read_unlock();
2085         if (!src)
2086                 return -ENOENT;
2087
2088         GEM_BUG_ON(src == dst);
2089
2090         for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2091                 if (!(local.flags & BIT(bit)))
2092                         continue;
2093
2094                 err = fn[bit](dst, src);
2095                 if (err)
2096                         return err;
2097         }
2098
2099         return 0;
2100 }
2101
2102 static const i915_user_extension_fn create_extensions[] = {
2103         [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2104         [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2105 };
2106
2107 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2108 {
2109         return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2110 }
2111
2112 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2113                                   struct drm_file *file)
2114 {
2115         struct drm_i915_private *i915 = to_i915(dev);
2116         struct drm_i915_gem_context_create_ext *args = data;
2117         struct create_ext ext_data;
2118         int ret;
2119
2120         if (!DRIVER_CAPS(i915)->has_logical_contexts)
2121                 return -ENODEV;
2122
2123         if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2124                 return -EINVAL;
2125
2126         ret = i915_terminally_wedged(i915);
2127         if (ret)
2128                 return ret;
2129
2130         ext_data.fpriv = file->driver_priv;
2131         if (client_is_banned(ext_data.fpriv)) {
2132                 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2133                           current->comm,
2134                           pid_nr(get_task_pid(current, PIDTYPE_PID)));
2135                 return -EIO;
2136         }
2137
2138         ret = i915_mutex_lock_interruptible(dev);
2139         if (ret)
2140                 return ret;
2141
2142         ext_data.ctx = i915_gem_create_context(i915, args->flags);
2143         mutex_unlock(&dev->struct_mutex);
2144         if (IS_ERR(ext_data.ctx))
2145                 return PTR_ERR(ext_data.ctx);
2146
2147         if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2148                 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2149                                            create_extensions,
2150                                            ARRAY_SIZE(create_extensions),
2151                                            &ext_data);
2152                 if (ret)
2153                         goto err_ctx;
2154         }
2155
2156         ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
2157         if (ret < 0)
2158                 goto err_ctx;
2159
2160         args->ctx_id = ret;
2161         DRM_DEBUG("HW context %d created\n", args->ctx_id);
2162
2163         return 0;
2164
2165 err_ctx:
2166         mutex_lock(&dev->struct_mutex);
2167         context_close(ext_data.ctx);
2168         mutex_unlock(&dev->struct_mutex);
2169         return ret;
2170 }
2171
2172 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2173                                    struct drm_file *file)
2174 {
2175         struct drm_i915_gem_context_destroy *args = data;
2176         struct drm_i915_file_private *file_priv = file->driver_priv;
2177         struct i915_gem_context *ctx;
2178
2179         if (args->pad != 0)
2180                 return -EINVAL;
2181
2182         if (!args->ctx_id)
2183                 return -ENOENT;
2184
2185         if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2186                 return -EINTR;
2187
2188         ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2189         mutex_unlock(&file_priv->context_idr_lock);
2190         if (!ctx)
2191                 return -ENOENT;
2192
2193         mutex_lock(&dev->struct_mutex);
2194         context_close(ctx);
2195         mutex_unlock(&dev->struct_mutex);
2196
2197         return 0;
2198 }
2199
2200 static int get_sseu(struct i915_gem_context *ctx,
2201                     struct drm_i915_gem_context_param *args)
2202 {
2203         struct drm_i915_gem_context_param_sseu user_sseu;
2204         struct intel_context *ce;
2205         unsigned long lookup;
2206         int err;
2207
2208         if (args->size == 0)
2209                 goto out;
2210         else if (args->size < sizeof(user_sseu))
2211                 return -EINVAL;
2212
2213         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2214                            sizeof(user_sseu)))
2215                 return -EFAULT;
2216
2217         if (user_sseu.rsvd)
2218                 return -EINVAL;
2219
2220         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2221                 return -EINVAL;
2222
2223         lookup = 0;
2224         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2225                 lookup |= LOOKUP_USER_INDEX;
2226
2227         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2228         if (IS_ERR(ce))
2229                 return PTR_ERR(ce);
2230
2231         err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2232         if (err) {
2233                 intel_context_put(ce);
2234                 return err;
2235         }
2236
2237         user_sseu.slice_mask = ce->sseu.slice_mask;
2238         user_sseu.subslice_mask = ce->sseu.subslice_mask;
2239         user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2240         user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2241
2242         intel_context_unlock_pinned(ce);
2243         intel_context_put(ce);
2244
2245         if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2246                          sizeof(user_sseu)))
2247                 return -EFAULT;
2248
2249 out:
2250         args->size = sizeof(user_sseu);
2251
2252         return 0;
2253 }
2254
2255 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2256                                     struct drm_file *file)
2257 {
2258         struct drm_i915_file_private *file_priv = file->driver_priv;
2259         struct drm_i915_gem_context_param *args = data;
2260         struct i915_gem_context *ctx;
2261         int ret = 0;
2262
2263         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2264         if (!ctx)
2265                 return -ENOENT;
2266
2267         switch (args->param) {
2268         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2269                 args->size = 0;
2270                 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2271                 break;
2272
2273         case I915_CONTEXT_PARAM_GTT_SIZE:
2274                 args->size = 0;
2275                 if (ctx->ppgtt)
2276                         args->value = ctx->ppgtt->vm.total;
2277                 else if (to_i915(dev)->mm.aliasing_ppgtt)
2278                         args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
2279                 else
2280                         args->value = to_i915(dev)->ggtt.vm.total;
2281                 break;
2282
2283         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2284                 args->size = 0;
2285                 args->value = i915_gem_context_no_error_capture(ctx);
2286                 break;
2287
2288         case I915_CONTEXT_PARAM_BANNABLE:
2289                 args->size = 0;
2290                 args->value = i915_gem_context_is_bannable(ctx);
2291                 break;
2292
2293         case I915_CONTEXT_PARAM_RECOVERABLE:
2294                 args->size = 0;
2295                 args->value = i915_gem_context_is_recoverable(ctx);
2296                 break;
2297
2298         case I915_CONTEXT_PARAM_PRIORITY:
2299                 args->size = 0;
2300                 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2301                 break;
2302
2303         case I915_CONTEXT_PARAM_SSEU:
2304                 ret = get_sseu(ctx, args);
2305                 break;
2306
2307         case I915_CONTEXT_PARAM_VM:
2308                 ret = get_ppgtt(file_priv, ctx, args);
2309                 break;
2310
2311         case I915_CONTEXT_PARAM_ENGINES:
2312                 ret = get_engines(ctx, args);
2313                 break;
2314
2315         case I915_CONTEXT_PARAM_BAN_PERIOD:
2316         default:
2317                 ret = -EINVAL;
2318                 break;
2319         }
2320
2321         i915_gem_context_put(ctx);
2322         return ret;
2323 }
2324
2325 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2326                                     struct drm_file *file)
2327 {
2328         struct drm_i915_file_private *file_priv = file->driver_priv;
2329         struct drm_i915_gem_context_param *args = data;
2330         struct i915_gem_context *ctx;
2331         int ret;
2332
2333         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2334         if (!ctx)
2335                 return -ENOENT;
2336
2337         ret = ctx_setparam(file_priv, ctx, args);
2338
2339         i915_gem_context_put(ctx);
2340         return ret;
2341 }
2342
2343 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2344                                        void *data, struct drm_file *file)
2345 {
2346         struct drm_i915_private *dev_priv = to_i915(dev);
2347         struct drm_i915_reset_stats *args = data;
2348         struct i915_gem_context *ctx;
2349         int ret;
2350
2351         if (args->flags || args->pad)
2352                 return -EINVAL;
2353
2354         ret = -ENOENT;
2355         rcu_read_lock();
2356         ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2357         if (!ctx)
2358                 goto out;
2359
2360         /*
2361          * We opt for unserialised reads here. This may result in tearing
2362          * in the extremely unlikely event of a GPU hang on this context
2363          * as we are querying them. If we need that extra layer of protection,
2364          * we should wrap the hangstats with a seqlock.
2365          */
2366
2367         if (capable(CAP_SYS_ADMIN))
2368                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2369         else
2370                 args->reset_count = 0;
2371
2372         args->batch_active = atomic_read(&ctx->guilty_count);
2373         args->batch_pending = atomic_read(&ctx->active_count);
2374
2375         ret = 0;
2376 out:
2377         rcu_read_unlock();
2378         return ret;
2379 }
2380
2381 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2382 {
2383         struct drm_i915_private *i915 = ctx->i915;
2384         int err = 0;
2385
2386         mutex_lock(&i915->contexts.mutex);
2387
2388         GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2389
2390         if (list_empty(&ctx->hw_id_link)) {
2391                 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2392
2393                 err = assign_hw_id(i915, &ctx->hw_id);
2394                 if (err)
2395                         goto out_unlock;
2396
2397                 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2398         }
2399
2400         GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2401         atomic_inc(&ctx->hw_id_pin_count);
2402
2403 out_unlock:
2404         mutex_unlock(&i915->contexts.mutex);
2405         return err;
2406 }
2407
2408 /* GEM context-engines iterator: for_each_gem_engine() */
2409 struct intel_context *
2410 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2411 {
2412         const struct i915_gem_engines *e = it->engines;
2413         struct intel_context *ctx;
2414
2415         do {
2416                 if (it->idx >= e->num_engines)
2417                         return NULL;
2418
2419                 ctx = e->engines[it->idx++];
2420         } while (!ctx);
2421
2422         return ctx;
2423 }
2424
2425 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2426 #include "selftests/mock_context.c"
2427 #include "selftests/i915_gem_context.c"
2428 #endif
2429
2430 static void i915_global_gem_context_shrink(void)
2431 {
2432         kmem_cache_shrink(global.slab_luts);
2433 }
2434
2435 static void i915_global_gem_context_exit(void)
2436 {
2437         kmem_cache_destroy(global.slab_luts);
2438 }
2439
2440 static struct i915_global_gem_context global = { {
2441         .shrink = i915_global_gem_context_shrink,
2442         .exit = i915_global_gem_context_exit,
2443 } };
2444
2445 int __init i915_global_gem_context_init(void)
2446 {
2447         global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2448         if (!global.slab_luts)
2449                 return -ENOMEM;
2450
2451         i915_global_register(&global.base);
2452         return 0;
2453 }