]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_gem_context.c
24736fcd463d94816d2d72d718e3c8cf93ac9b66
[linux.git] / drivers / gpu / drm / i915 / i915_gem_context.c
1 /*
2  * Copyright © 2011-2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *
26  */
27
28 /*
29  * This file implements HW context support. On gen5+ a HW context consists of an
30  * opaque GPU object which is referenced at times of context saves and restores.
31  * With RC6 enabled, the context is also referenced as the GPU enters and exists
32  * from RC6 (GPU has it's own internal power context, except on gen5). Though
33  * something like a context does exist for the media ring, the code only
34  * supports contexts for the render ring.
35  *
36  * In software, there is a distinction between contexts created by the user,
37  * and the default HW context. The default HW context is used by GPU clients
38  * that do not request setup of their own hardware context. The default
39  * context's state is never restored to help prevent programming errors. This
40  * would happen if a client ran and piggy-backed off another clients GPU state.
41  * The default context only exists to give the GPU some offset to load as the
42  * current to invoke a save of the context we actually care about. In fact, the
43  * code could likely be constructed, albeit in a more complicated fashion, to
44  * never use the default context, though that limits the driver's ability to
45  * swap out, and/or destroy other contexts.
46  *
47  * All other contexts are created as a request by the GPU client. These contexts
48  * store GPU state, and thus allow GPU clients to not re-emit state (and
49  * potentially query certain state) at any time. The kernel driver makes
50  * certain that the appropriate commands are inserted.
51  *
52  * The context life cycle is semi-complicated in that context BOs may live
53  * longer than the context itself because of the way the hardware, and object
54  * tracking works. Below is a very crude representation of the state machine
55  * describing the context life.
56  *                                         refcount     pincount     active
57  * S0: initial state                          0            0           0
58  * S1: context created                        1            0           0
59  * S2: context is currently running           2            1           X
60  * S3: GPU referenced, but not current        2            0           1
61  * S4: context is current, but destroyed      1            1           0
62  * S5: like S3, but destroyed                 1            0           1
63  *
64  * The most common (but not all) transitions:
65  * S0->S1: client creates a context
66  * S1->S2: client submits execbuf with context
67  * S2->S3: other clients submits execbuf with context
68  * S3->S1: context object was retired
69  * S3->S2: clients submits another execbuf
70  * S2->S4: context destroy called with current context
71  * S3->S5->S0: destroy path
72  * S4->S5->S0: destroy path on current context
73  *
74  * There are two confusing terms used above:
75  *  The "current context" means the context which is currently running on the
76  *  GPU. The GPU has loaded its state already and has stored away the gtt
77  *  offset of the BO. The GPU is not actively referencing the data at this
78  *  offset, but it will on the next context switch. The only way to avoid this
79  *  is to do a GPU reset.
80  *
81  *  An "active context' is one which was previously the "current context" and is
82  *  on the active list waiting for the next context switch to occur. Until this
83  *  happens, the object must remain at the same gtt offset. It is therefore
84  *  possible to destroy a context, but it is still active.
85  *
86  */
87
88 #include <linux/log2.h>
89
90 #include <drm/i915_drm.h>
91
92 #include "gt/intel_lrc_reg.h"
93
94 #include "i915_drv.h"
95 #include "i915_globals.h"
96 #include "i915_trace.h"
97 #include "i915_user_extensions.h"
98
99 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
100
101 static struct i915_global_gem_context {
102         struct i915_global base;
103         struct kmem_cache *slab_luts;
104 } global;
105
106 struct i915_lut_handle *i915_lut_handle_alloc(void)
107 {
108         return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
109 }
110
111 void i915_lut_handle_free(struct i915_lut_handle *lut)
112 {
113         return kmem_cache_free(global.slab_luts, lut);
114 }
115
116 static void lut_close(struct i915_gem_context *ctx)
117 {
118         struct i915_lut_handle *lut, *ln;
119         struct radix_tree_iter iter;
120         void __rcu **slot;
121
122         list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
123                 list_del(&lut->obj_link);
124                 i915_lut_handle_free(lut);
125         }
126         INIT_LIST_HEAD(&ctx->handles_list);
127
128         rcu_read_lock();
129         radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
130                 struct i915_vma *vma = rcu_dereference_raw(*slot);
131
132                 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
133
134                 vma->open_count--;
135                 __i915_gem_object_release_unless_active(vma->obj);
136         }
137         rcu_read_unlock();
138 }
139
140 static struct intel_context *
141 lookup_user_engine(struct i915_gem_context *ctx,
142                    unsigned long flags,
143                    const struct i915_engine_class_instance *ci)
144 #define LOOKUP_USER_INDEX BIT(0)
145 {
146         int idx;
147
148         if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
149                 return ERR_PTR(-EINVAL);
150
151         if (!i915_gem_context_user_engines(ctx)) {
152                 struct intel_engine_cs *engine;
153
154                 engine = intel_engine_lookup_user(ctx->i915,
155                                                   ci->engine_class,
156                                                   ci->engine_instance);
157                 if (!engine)
158                         return ERR_PTR(-EINVAL);
159
160                 idx = engine->id;
161         } else {
162                 idx = ci->engine_instance;
163         }
164
165         return i915_gem_context_get_engine(ctx, idx);
166 }
167
168 static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
169 {
170         unsigned int max;
171
172         lockdep_assert_held(&i915->contexts.mutex);
173
174         if (INTEL_GEN(i915) >= 11)
175                 max = GEN11_MAX_CONTEXT_HW_ID;
176         else if (USES_GUC_SUBMISSION(i915))
177                 /*
178                  * When using GuC in proxy submission, GuC consumes the
179                  * highest bit in the context id to indicate proxy submission.
180                  */
181                 max = MAX_GUC_CONTEXT_HW_ID;
182         else
183                 max = MAX_CONTEXT_HW_ID;
184
185         return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
186 }
187
188 static int steal_hw_id(struct drm_i915_private *i915)
189 {
190         struct i915_gem_context *ctx, *cn;
191         LIST_HEAD(pinned);
192         int id = -ENOSPC;
193
194         lockdep_assert_held(&i915->contexts.mutex);
195
196         list_for_each_entry_safe(ctx, cn,
197                                  &i915->contexts.hw_id_list, hw_id_link) {
198                 if (atomic_read(&ctx->hw_id_pin_count)) {
199                         list_move_tail(&ctx->hw_id_link, &pinned);
200                         continue;
201                 }
202
203                 GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
204                 list_del_init(&ctx->hw_id_link);
205                 id = ctx->hw_id;
206                 break;
207         }
208
209         /*
210          * Remember how far we got up on the last repossesion scan, so the
211          * list is kept in a "least recently scanned" order.
212          */
213         list_splice_tail(&pinned, &i915->contexts.hw_id_list);
214         return id;
215 }
216
217 static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
218 {
219         int ret;
220
221         lockdep_assert_held(&i915->contexts.mutex);
222
223         /*
224          * We prefer to steal/stall ourselves and our users over that of the
225          * entire system. That may be a little unfair to our users, and
226          * even hurt high priority clients. The choice is whether to oomkill
227          * something else, or steal a context id.
228          */
229         ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
230         if (unlikely(ret < 0)) {
231                 ret = steal_hw_id(i915);
232                 if (ret < 0) /* once again for the correct errno code */
233                         ret = new_hw_id(i915, GFP_KERNEL);
234                 if (ret < 0)
235                         return ret;
236         }
237
238         *out = ret;
239         return 0;
240 }
241
242 static void release_hw_id(struct i915_gem_context *ctx)
243 {
244         struct drm_i915_private *i915 = ctx->i915;
245
246         if (list_empty(&ctx->hw_id_link))
247                 return;
248
249         mutex_lock(&i915->contexts.mutex);
250         if (!list_empty(&ctx->hw_id_link)) {
251                 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
252                 list_del_init(&ctx->hw_id_link);
253         }
254         mutex_unlock(&i915->contexts.mutex);
255 }
256
257 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
258 {
259         while (count--) {
260                 if (!e->engines[count])
261                         continue;
262
263                 intel_context_put(e->engines[count]);
264         }
265         kfree(e);
266 }
267
268 static void free_engines(struct i915_gem_engines *e)
269 {
270         __free_engines(e, e->num_engines);
271 }
272
273 static void free_engines_rcu(struct work_struct *wrk)
274 {
275         struct i915_gem_engines *e =
276                 container_of(wrk, struct i915_gem_engines, rcu.work);
277         struct drm_i915_private *i915 = e->i915;
278
279         mutex_lock(&i915->drm.struct_mutex);
280         free_engines(e);
281         mutex_unlock(&i915->drm.struct_mutex);
282 }
283
284 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
285 {
286         struct intel_engine_cs *engine;
287         struct i915_gem_engines *e;
288         enum intel_engine_id id;
289
290         e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
291         if (!e)
292                 return ERR_PTR(-ENOMEM);
293
294         e->i915 = ctx->i915;
295         for_each_engine(engine, ctx->i915, id) {
296                 struct intel_context *ce;
297
298                 ce = intel_context_create(ctx, engine);
299                 if (IS_ERR(ce)) {
300                         __free_engines(e, id);
301                         return ERR_CAST(ce);
302                 }
303
304                 e->engines[id] = ce;
305         }
306         e->num_engines = id;
307
308         return e;
309 }
310
311 static void i915_gem_context_free(struct i915_gem_context *ctx)
312 {
313         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
314         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
315
316         release_hw_id(ctx);
317         i915_ppgtt_put(ctx->ppgtt);
318
319         free_engines(rcu_access_pointer(ctx->engines));
320         mutex_destroy(&ctx->engines_mutex);
321
322         if (ctx->timeline)
323                 i915_timeline_put(ctx->timeline);
324
325         kfree(ctx->name);
326         put_pid(ctx->pid);
327
328         list_del(&ctx->link);
329         mutex_destroy(&ctx->mutex);
330
331         kfree_rcu(ctx, rcu);
332 }
333
334 static void contexts_free(struct drm_i915_private *i915)
335 {
336         struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
337         struct i915_gem_context *ctx, *cn;
338
339         lockdep_assert_held(&i915->drm.struct_mutex);
340
341         llist_for_each_entry_safe(ctx, cn, freed, free_link)
342                 i915_gem_context_free(ctx);
343 }
344
345 static void contexts_free_first(struct drm_i915_private *i915)
346 {
347         struct i915_gem_context *ctx;
348         struct llist_node *freed;
349
350         lockdep_assert_held(&i915->drm.struct_mutex);
351
352         freed = llist_del_first(&i915->contexts.free_list);
353         if (!freed)
354                 return;
355
356         ctx = container_of(freed, typeof(*ctx), free_link);
357         i915_gem_context_free(ctx);
358 }
359
360 static void contexts_free_worker(struct work_struct *work)
361 {
362         struct drm_i915_private *i915 =
363                 container_of(work, typeof(*i915), contexts.free_work);
364
365         mutex_lock(&i915->drm.struct_mutex);
366         contexts_free(i915);
367         mutex_unlock(&i915->drm.struct_mutex);
368 }
369
370 void i915_gem_context_release(struct kref *ref)
371 {
372         struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
373         struct drm_i915_private *i915 = ctx->i915;
374
375         trace_i915_context_free(ctx);
376         if (llist_add(&ctx->free_link, &i915->contexts.free_list))
377                 queue_work(i915->wq, &i915->contexts.free_work);
378 }
379
380 static void context_close(struct i915_gem_context *ctx)
381 {
382         i915_gem_context_set_closed(ctx);
383
384         /*
385          * This context will never again be assinged to HW, so we can
386          * reuse its ID for the next context.
387          */
388         release_hw_id(ctx);
389
390         /*
391          * The LUT uses the VMA as a backpointer to unref the object,
392          * so we need to clear the LUT before we close all the VMA (inside
393          * the ppgtt).
394          */
395         lut_close(ctx);
396
397         ctx->file_priv = ERR_PTR(-EBADF);
398         i915_gem_context_put(ctx);
399 }
400
401 static u32 default_desc_template(const struct drm_i915_private *i915,
402                                  const struct i915_hw_ppgtt *ppgtt)
403 {
404         u32 address_mode;
405         u32 desc;
406
407         desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
408
409         address_mode = INTEL_LEGACY_32B_CONTEXT;
410         if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
411                 address_mode = INTEL_LEGACY_64B_CONTEXT;
412         desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
413
414         if (IS_GEN(i915, 8))
415                 desc |= GEN8_CTX_L3LLC_COHERENT;
416
417         /* TODO: WaDisableLiteRestore when we start using semaphore
418          * signalling between Command Streamers
419          * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
420          */
421
422         return desc;
423 }
424
425 static struct i915_gem_context *
426 __create_context(struct drm_i915_private *dev_priv)
427 {
428         struct i915_gem_context *ctx;
429         struct i915_gem_engines *e;
430         int err;
431         int i;
432
433         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
434         if (!ctx)
435                 return ERR_PTR(-ENOMEM);
436
437         kref_init(&ctx->ref);
438         list_add_tail(&ctx->link, &dev_priv->contexts.list);
439         ctx->i915 = dev_priv;
440         ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
441         mutex_init(&ctx->mutex);
442
443         mutex_init(&ctx->engines_mutex);
444         e = default_engines(ctx);
445         if (IS_ERR(e)) {
446                 err = PTR_ERR(e);
447                 goto err_free;
448         }
449         RCU_INIT_POINTER(ctx->engines, e);
450
451         INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
452         INIT_LIST_HEAD(&ctx->handles_list);
453         INIT_LIST_HEAD(&ctx->hw_id_link);
454
455         /* NB: Mark all slices as needing a remap so that when the context first
456          * loads it will restore whatever remap state already exists. If there
457          * is no remap info, it will be a NOP. */
458         ctx->remap_slice = ALL_L3_SLICES(dev_priv);
459
460         i915_gem_context_set_bannable(ctx);
461         i915_gem_context_set_recoverable(ctx);
462
463         ctx->ring_size = 4 * PAGE_SIZE;
464         ctx->desc_template =
465                 default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
466
467         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
468                 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
469
470         return ctx;
471
472 err_free:
473         kfree(ctx);
474         return ERR_PTR(err);
475 }
476
477 static struct i915_hw_ppgtt *
478 __set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
479 {
480         struct i915_hw_ppgtt *old = ctx->ppgtt;
481
482         ctx->ppgtt = i915_ppgtt_get(ppgtt);
483         ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
484
485         return old;
486 }
487
488 static void __assign_ppgtt(struct i915_gem_context *ctx,
489                            struct i915_hw_ppgtt *ppgtt)
490 {
491         if (ppgtt == ctx->ppgtt)
492                 return;
493
494         ppgtt = __set_ppgtt(ctx, ppgtt);
495         if (ppgtt)
496                 i915_ppgtt_put(ppgtt);
497 }
498
499 static struct i915_gem_context *
500 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
501 {
502         struct i915_gem_context *ctx;
503
504         lockdep_assert_held(&dev_priv->drm.struct_mutex);
505
506         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
507             !HAS_EXECLISTS(dev_priv))
508                 return ERR_PTR(-EINVAL);
509
510         /* Reap the most stale context */
511         contexts_free_first(dev_priv);
512
513         ctx = __create_context(dev_priv);
514         if (IS_ERR(ctx))
515                 return ctx;
516
517         if (HAS_FULL_PPGTT(dev_priv)) {
518                 struct i915_hw_ppgtt *ppgtt;
519
520                 ppgtt = i915_ppgtt_create(dev_priv);
521                 if (IS_ERR(ppgtt)) {
522                         DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
523                                          PTR_ERR(ppgtt));
524                         context_close(ctx);
525                         return ERR_CAST(ppgtt);
526                 }
527
528                 __assign_ppgtt(ctx, ppgtt);
529                 i915_ppgtt_put(ppgtt);
530         }
531
532         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
533                 struct i915_timeline *timeline;
534
535                 timeline = i915_timeline_create(dev_priv, NULL);
536                 if (IS_ERR(timeline)) {
537                         context_close(ctx);
538                         return ERR_CAST(timeline);
539                 }
540
541                 ctx->timeline = timeline;
542         }
543
544         trace_i915_context_create(ctx);
545
546         return ctx;
547 }
548
549 /**
550  * i915_gem_context_create_gvt - create a GVT GEM context
551  * @dev: drm device *
552  *
553  * This function is used to create a GVT specific GEM context.
554  *
555  * Returns:
556  * pointer to i915_gem_context on success, error pointer if failed
557  *
558  */
559 struct i915_gem_context *
560 i915_gem_context_create_gvt(struct drm_device *dev)
561 {
562         struct i915_gem_context *ctx;
563         int ret;
564
565         if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
566                 return ERR_PTR(-ENODEV);
567
568         ret = i915_mutex_lock_interruptible(dev);
569         if (ret)
570                 return ERR_PTR(ret);
571
572         ctx = i915_gem_create_context(to_i915(dev), 0);
573         if (IS_ERR(ctx))
574                 goto out;
575
576         ret = i915_gem_context_pin_hw_id(ctx);
577         if (ret) {
578                 context_close(ctx);
579                 ctx = ERR_PTR(ret);
580                 goto out;
581         }
582
583         ctx->file_priv = ERR_PTR(-EBADF);
584         i915_gem_context_set_closed(ctx); /* not user accessible */
585         i915_gem_context_clear_bannable(ctx);
586         i915_gem_context_set_force_single_submission(ctx);
587         if (!USES_GUC_SUBMISSION(to_i915(dev)))
588                 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
589
590         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
591 out:
592         mutex_unlock(&dev->struct_mutex);
593         return ctx;
594 }
595
596 static void
597 destroy_kernel_context(struct i915_gem_context **ctxp)
598 {
599         struct i915_gem_context *ctx;
600
601         /* Keep the context ref so that we can free it immediately ourselves */
602         ctx = i915_gem_context_get(fetch_and_zero(ctxp));
603         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
604
605         context_close(ctx);
606         i915_gem_context_free(ctx);
607 }
608
609 struct i915_gem_context *
610 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
611 {
612         struct i915_gem_context *ctx;
613         int err;
614
615         ctx = i915_gem_create_context(i915, 0);
616         if (IS_ERR(ctx))
617                 return ctx;
618
619         err = i915_gem_context_pin_hw_id(ctx);
620         if (err) {
621                 destroy_kernel_context(&ctx);
622                 return ERR_PTR(err);
623         }
624
625         i915_gem_context_clear_bannable(ctx);
626         ctx->sched.priority = I915_USER_PRIORITY(prio);
627         ctx->ring_size = PAGE_SIZE;
628
629         GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
630
631         return ctx;
632 }
633
634 static void init_contexts(struct drm_i915_private *i915)
635 {
636         mutex_init(&i915->contexts.mutex);
637         INIT_LIST_HEAD(&i915->contexts.list);
638
639         /* Using the simple ida interface, the max is limited by sizeof(int) */
640         BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
641         BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
642         ida_init(&i915->contexts.hw_ida);
643         INIT_LIST_HEAD(&i915->contexts.hw_id_list);
644
645         INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
646         init_llist_head(&i915->contexts.free_list);
647 }
648
649 static bool needs_preempt_context(struct drm_i915_private *i915)
650 {
651         return HAS_EXECLISTS(i915);
652 }
653
654 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
655 {
656         struct i915_gem_context *ctx;
657
658         /* Reassure ourselves we are only called once */
659         GEM_BUG_ON(dev_priv->kernel_context);
660         GEM_BUG_ON(dev_priv->preempt_context);
661
662         intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
663         init_contexts(dev_priv);
664
665         /* lowest priority; idle task */
666         ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
667         if (IS_ERR(ctx)) {
668                 DRM_ERROR("Failed to create default global context\n");
669                 return PTR_ERR(ctx);
670         }
671         /*
672          * For easy recognisablity, we want the kernel context to be 0 and then
673          * all user contexts will have non-zero hw_id. Kernel contexts are
674          * permanently pinned, so that we never suffer a stall and can
675          * use them from any allocation context (e.g. for evicting other
676          * contexts and from inside the shrinker).
677          */
678         GEM_BUG_ON(ctx->hw_id);
679         GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
680         dev_priv->kernel_context = ctx;
681
682         /* highest priority; preempting task */
683         if (needs_preempt_context(dev_priv)) {
684                 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
685                 if (!IS_ERR(ctx))
686                         dev_priv->preempt_context = ctx;
687                 else
688                         DRM_ERROR("Failed to create preempt context; disabling preemption\n");
689         }
690
691         DRM_DEBUG_DRIVER("%s context support initialized\n",
692                          DRIVER_CAPS(dev_priv)->has_logical_contexts ?
693                          "logical" : "fake");
694         return 0;
695 }
696
697 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
698 {
699         struct intel_engine_cs *engine;
700         enum intel_engine_id id;
701
702         lockdep_assert_held(&dev_priv->drm.struct_mutex);
703
704         for_each_engine(engine, dev_priv, id)
705                 intel_engine_lost_context(engine);
706 }
707
708 void i915_gem_contexts_fini(struct drm_i915_private *i915)
709 {
710         lockdep_assert_held(&i915->drm.struct_mutex);
711
712         if (i915->preempt_context)
713                 destroy_kernel_context(&i915->preempt_context);
714         destroy_kernel_context(&i915->kernel_context);
715
716         /* Must free all deferred contexts (via flush_workqueue) first */
717         GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
718         ida_destroy(&i915->contexts.hw_ida);
719 }
720
721 static int context_idr_cleanup(int id, void *p, void *data)
722 {
723         context_close(p);
724         return 0;
725 }
726
727 static int vm_idr_cleanup(int id, void *p, void *data)
728 {
729         i915_ppgtt_put(p);
730         return 0;
731 }
732
733 static int gem_context_register(struct i915_gem_context *ctx,
734                                 struct drm_i915_file_private *fpriv)
735 {
736         int ret;
737
738         ctx->file_priv = fpriv;
739         if (ctx->ppgtt)
740                 ctx->ppgtt->vm.file = fpriv;
741
742         ctx->pid = get_task_pid(current, PIDTYPE_PID);
743         ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
744                               current->comm, pid_nr(ctx->pid));
745         if (!ctx->name) {
746                 ret = -ENOMEM;
747                 goto err_pid;
748         }
749
750         /* And finally expose ourselves to userspace via the idr */
751         mutex_lock(&fpriv->context_idr_lock);
752         ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
753         mutex_unlock(&fpriv->context_idr_lock);
754         if (ret >= 0)
755                 goto out;
756
757         kfree(fetch_and_zero(&ctx->name));
758 err_pid:
759         put_pid(fetch_and_zero(&ctx->pid));
760 out:
761         return ret;
762 }
763
764 int i915_gem_context_open(struct drm_i915_private *i915,
765                           struct drm_file *file)
766 {
767         struct drm_i915_file_private *file_priv = file->driver_priv;
768         struct i915_gem_context *ctx;
769         int err;
770
771         mutex_init(&file_priv->context_idr_lock);
772         mutex_init(&file_priv->vm_idr_lock);
773
774         idr_init(&file_priv->context_idr);
775         idr_init_base(&file_priv->vm_idr, 1);
776
777         mutex_lock(&i915->drm.struct_mutex);
778         ctx = i915_gem_create_context(i915, 0);
779         mutex_unlock(&i915->drm.struct_mutex);
780         if (IS_ERR(ctx)) {
781                 err = PTR_ERR(ctx);
782                 goto err;
783         }
784
785         err = gem_context_register(ctx, file_priv);
786         if (err < 0)
787                 goto err_ctx;
788
789         GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
790         GEM_BUG_ON(err > 0);
791
792         return 0;
793
794 err_ctx:
795         mutex_lock(&i915->drm.struct_mutex);
796         context_close(ctx);
797         mutex_unlock(&i915->drm.struct_mutex);
798 err:
799         idr_destroy(&file_priv->vm_idr);
800         idr_destroy(&file_priv->context_idr);
801         mutex_destroy(&file_priv->vm_idr_lock);
802         mutex_destroy(&file_priv->context_idr_lock);
803         return err;
804 }
805
806 void i915_gem_context_close(struct drm_file *file)
807 {
808         struct drm_i915_file_private *file_priv = file->driver_priv;
809
810         lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
811
812         idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
813         idr_destroy(&file_priv->context_idr);
814         mutex_destroy(&file_priv->context_idr_lock);
815
816         idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
817         idr_destroy(&file_priv->vm_idr);
818         mutex_destroy(&file_priv->vm_idr_lock);
819 }
820
821 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
822                              struct drm_file *file)
823 {
824         struct drm_i915_private *i915 = to_i915(dev);
825         struct drm_i915_gem_vm_control *args = data;
826         struct drm_i915_file_private *file_priv = file->driver_priv;
827         struct i915_hw_ppgtt *ppgtt;
828         int err;
829
830         if (!HAS_FULL_PPGTT(i915))
831                 return -ENODEV;
832
833         if (args->flags)
834                 return -EINVAL;
835
836         ppgtt = i915_ppgtt_create(i915);
837         if (IS_ERR(ppgtt))
838                 return PTR_ERR(ppgtt);
839
840         ppgtt->vm.file = file_priv;
841
842         if (args->extensions) {
843                 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
844                                            NULL, 0,
845                                            ppgtt);
846                 if (err)
847                         goto err_put;
848         }
849
850         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
851         if (err)
852                 goto err_put;
853
854         err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
855         if (err < 0)
856                 goto err_unlock;
857
858         GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
859
860         mutex_unlock(&file_priv->vm_idr_lock);
861
862         args->vm_id = err;
863         return 0;
864
865 err_unlock:
866         mutex_unlock(&file_priv->vm_idr_lock);
867 err_put:
868         i915_ppgtt_put(ppgtt);
869         return err;
870 }
871
872 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
873                               struct drm_file *file)
874 {
875         struct drm_i915_file_private *file_priv = file->driver_priv;
876         struct drm_i915_gem_vm_control *args = data;
877         struct i915_hw_ppgtt *ppgtt;
878         int err;
879         u32 id;
880
881         if (args->flags)
882                 return -EINVAL;
883
884         if (args->extensions)
885                 return -EINVAL;
886
887         id = args->vm_id;
888         if (!id)
889                 return -ENOENT;
890
891         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
892         if (err)
893                 return err;
894
895         ppgtt = idr_remove(&file_priv->vm_idr, id);
896
897         mutex_unlock(&file_priv->vm_idr_lock);
898         if (!ppgtt)
899                 return -ENOENT;
900
901         i915_ppgtt_put(ppgtt);
902         return 0;
903 }
904
905 struct context_barrier_task {
906         struct i915_active base;
907         void (*task)(void *data);
908         void *data;
909 };
910
911 static void cb_retire(struct i915_active *base)
912 {
913         struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
914
915         if (cb->task)
916                 cb->task(cb->data);
917
918         i915_active_fini(&cb->base);
919         kfree(cb);
920 }
921
922 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
923 static int context_barrier_task(struct i915_gem_context *ctx,
924                                 intel_engine_mask_t engines,
925                                 int (*emit)(struct i915_request *rq, void *data),
926                                 void (*task)(void *data),
927                                 void *data)
928 {
929         struct drm_i915_private *i915 = ctx->i915;
930         struct context_barrier_task *cb;
931         struct i915_gem_engines_iter it;
932         struct intel_context *ce;
933         int err = 0;
934
935         lockdep_assert_held(&i915->drm.struct_mutex);
936         GEM_BUG_ON(!task);
937
938         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
939         if (!cb)
940                 return -ENOMEM;
941
942         i915_active_init(i915, &cb->base, cb_retire);
943         i915_active_acquire(&cb->base);
944
945         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
946                 struct i915_request *rq;
947
948                 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
949                                        ce->engine->mask)) {
950                         err = -ENXIO;
951                         break;
952                 }
953
954                 if (!(ce->engine->mask & engines) || !ce->state)
955                         continue;
956
957                 rq = intel_context_create_request(ce);
958                 if (IS_ERR(rq)) {
959                         err = PTR_ERR(rq);
960                         break;
961                 }
962
963                 err = 0;
964                 if (emit)
965                         err = emit(rq, data);
966                 if (err == 0)
967                         err = i915_active_ref(&cb->base, rq->fence.context, rq);
968
969                 i915_request_add(rq);
970                 if (err)
971                         break;
972         }
973         i915_gem_context_unlock_engines(ctx);
974
975         cb->task = err ? NULL : task; /* caller needs to unwind instead */
976         cb->data = data;
977
978         i915_active_release(&cb->base);
979
980         return err;
981 }
982
983 static int get_ppgtt(struct drm_i915_file_private *file_priv,
984                      struct i915_gem_context *ctx,
985                      struct drm_i915_gem_context_param *args)
986 {
987         struct i915_hw_ppgtt *ppgtt;
988         int ret;
989
990         if (!ctx->ppgtt)
991                 return -ENODEV;
992
993         /* XXX rcu acquire? */
994         ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
995         if (ret)
996                 return ret;
997
998         ppgtt = i915_ppgtt_get(ctx->ppgtt);
999         mutex_unlock(&ctx->i915->drm.struct_mutex);
1000
1001         ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1002         if (ret)
1003                 goto err_put;
1004
1005         ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
1006         GEM_BUG_ON(!ret);
1007         if (ret < 0)
1008                 goto err_unlock;
1009
1010         i915_ppgtt_get(ppgtt);
1011
1012         args->size = 0;
1013         args->value = ret;
1014
1015         ret = 0;
1016 err_unlock:
1017         mutex_unlock(&file_priv->vm_idr_lock);
1018 err_put:
1019         i915_ppgtt_put(ppgtt);
1020         return ret;
1021 }
1022
1023 static void set_ppgtt_barrier(void *data)
1024 {
1025         struct i915_hw_ppgtt *old = data;
1026
1027         if (INTEL_GEN(old->vm.i915) < 8)
1028                 gen6_ppgtt_unpin_all(old);
1029
1030         i915_ppgtt_put(old);
1031 }
1032
1033 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1034 {
1035         struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
1036         struct intel_engine_cs *engine = rq->engine;
1037         u32 base = engine->mmio_base;
1038         u32 *cs;
1039         int i;
1040
1041         if (i915_vm_is_4lvl(&ppgtt->vm)) {
1042                 const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
1043
1044                 cs = intel_ring_begin(rq, 6);
1045                 if (IS_ERR(cs))
1046                         return PTR_ERR(cs);
1047
1048                 *cs++ = MI_LOAD_REGISTER_IMM(2);
1049
1050                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1051                 *cs++ = upper_32_bits(pd_daddr);
1052                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1053                 *cs++ = lower_32_bits(pd_daddr);
1054
1055                 *cs++ = MI_NOOP;
1056                 intel_ring_advance(rq, cs);
1057         } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1058                 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1059                 if (IS_ERR(cs))
1060                         return PTR_ERR(cs);
1061
1062                 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1063                 for (i = GEN8_3LVL_PDPES; i--; ) {
1064                         const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1065
1066                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1067                         *cs++ = upper_32_bits(pd_daddr);
1068                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1069                         *cs++ = lower_32_bits(pd_daddr);
1070                 }
1071                 *cs++ = MI_NOOP;
1072                 intel_ring_advance(rq, cs);
1073         } else {
1074                 /* ppGTT is not part of the legacy context image */
1075                 gen6_ppgtt_pin(ppgtt);
1076         }
1077
1078         return 0;
1079 }
1080
1081 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1082                      struct i915_gem_context *ctx,
1083                      struct drm_i915_gem_context_param *args)
1084 {
1085         struct i915_hw_ppgtt *ppgtt, *old;
1086         int err;
1087
1088         if (args->size)
1089                 return -EINVAL;
1090
1091         if (!ctx->ppgtt)
1092                 return -ENODEV;
1093
1094         if (upper_32_bits(args->value))
1095                 return -ENOENT;
1096
1097         err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1098         if (err)
1099                 return err;
1100
1101         ppgtt = idr_find(&file_priv->vm_idr, args->value);
1102         if (ppgtt)
1103                 i915_ppgtt_get(ppgtt);
1104         mutex_unlock(&file_priv->vm_idr_lock);
1105         if (!ppgtt)
1106                 return -ENOENT;
1107
1108         err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1109         if (err)
1110                 goto out;
1111
1112         if (ppgtt == ctx->ppgtt)
1113                 goto unlock;
1114
1115         /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1116         lut_close(ctx);
1117
1118         old = __set_ppgtt(ctx, ppgtt);
1119
1120         /*
1121          * We need to flush any requests using the current ppgtt before
1122          * we release it as the requests do not hold a reference themselves,
1123          * only indirectly through the context.
1124          */
1125         err = context_barrier_task(ctx, ALL_ENGINES,
1126                                    emit_ppgtt_update,
1127                                    set_ppgtt_barrier,
1128                                    old);
1129         if (err) {
1130                 ctx->ppgtt = old;
1131                 ctx->desc_template = default_desc_template(ctx->i915, old);
1132                 i915_ppgtt_put(ppgtt);
1133         }
1134
1135 unlock:
1136         mutex_unlock(&ctx->i915->drm.struct_mutex);
1137
1138 out:
1139         i915_ppgtt_put(ppgtt);
1140         return err;
1141 }
1142
1143 static int gen8_emit_rpcs_config(struct i915_request *rq,
1144                                  struct intel_context *ce,
1145                                  struct intel_sseu sseu)
1146 {
1147         u64 offset;
1148         u32 *cs;
1149
1150         cs = intel_ring_begin(rq, 4);
1151         if (IS_ERR(cs))
1152                 return PTR_ERR(cs);
1153
1154         offset = i915_ggtt_offset(ce->state) +
1155                  LRC_STATE_PN * PAGE_SIZE +
1156                  (CTX_R_PWR_CLK_STATE + 1) * 4;
1157
1158         *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1159         *cs++ = lower_32_bits(offset);
1160         *cs++ = upper_32_bits(offset);
1161         *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1162
1163         intel_ring_advance(rq, cs);
1164
1165         return 0;
1166 }
1167
1168 static int
1169 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1170 {
1171         struct i915_request *rq;
1172         int ret;
1173
1174         lockdep_assert_held(&ce->pin_mutex);
1175
1176         /*
1177          * If the context is not idle, we have to submit an ordered request to
1178          * modify its context image via the kernel context (writing to our own
1179          * image, or into the registers directory, does not stick). Pristine
1180          * and idle contexts will be configured on pinning.
1181          */
1182         if (!intel_context_is_pinned(ce))
1183                 return 0;
1184
1185         rq = i915_request_create(ce->engine->kernel_context);
1186         if (IS_ERR(rq))
1187                 return PTR_ERR(rq);
1188
1189         /* Queue this switch after all other activity by this context. */
1190         ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
1191         if (ret)
1192                 goto out_add;
1193
1194         ret = gen8_emit_rpcs_config(rq, ce, sseu);
1195         if (ret)
1196                 goto out_add;
1197
1198         /*
1199          * Guarantee context image and the timeline remains pinned until the
1200          * modifying request is retired by setting the ce activity tracker.
1201          *
1202          * But we only need to take one pin on the account of it. Or in other
1203          * words transfer the pinned ce object to tracked active request.
1204          */
1205         if (!i915_active_request_isset(&ce->active_tracker))
1206                 __intel_context_pin(ce);
1207         __i915_active_request_set(&ce->active_tracker, rq);
1208
1209 out_add:
1210         i915_request_add(rq);
1211         return ret;
1212 }
1213
1214 static int
1215 __intel_context_reconfigure_sseu(struct intel_context *ce,
1216                                  struct intel_sseu sseu)
1217 {
1218         int ret;
1219
1220         GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
1221         GEM_BUG_ON(ce->engine->id != RCS0);
1222
1223         ret = intel_context_lock_pinned(ce);
1224         if (ret)
1225                 return ret;
1226
1227         /* Nothing to do if unmodified. */
1228         if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1229                 goto unlock;
1230
1231         ret = gen8_modify_rpcs(ce, sseu);
1232         if (!ret)
1233                 ce->sseu = sseu;
1234
1235 unlock:
1236         intel_context_unlock_pinned(ce);
1237         return ret;
1238 }
1239
1240 static int
1241 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1242 {
1243         struct drm_i915_private *i915 = ce->gem_context->i915;
1244         int ret;
1245
1246         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1247         if (ret)
1248                 return ret;
1249
1250         ret = __intel_context_reconfigure_sseu(ce, sseu);
1251
1252         mutex_unlock(&i915->drm.struct_mutex);
1253
1254         return ret;
1255 }
1256
1257 static int
1258 user_to_context_sseu(struct drm_i915_private *i915,
1259                      const struct drm_i915_gem_context_param_sseu *user,
1260                      struct intel_sseu *context)
1261 {
1262         const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1263
1264         /* No zeros in any field. */
1265         if (!user->slice_mask || !user->subslice_mask ||
1266             !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1267                 return -EINVAL;
1268
1269         /* Max > min. */
1270         if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1271                 return -EINVAL;
1272
1273         /*
1274          * Some future proofing on the types since the uAPI is wider than the
1275          * current internal implementation.
1276          */
1277         if (overflows_type(user->slice_mask, context->slice_mask) ||
1278             overflows_type(user->subslice_mask, context->subslice_mask) ||
1279             overflows_type(user->min_eus_per_subslice,
1280                            context->min_eus_per_subslice) ||
1281             overflows_type(user->max_eus_per_subslice,
1282                            context->max_eus_per_subslice))
1283                 return -EINVAL;
1284
1285         /* Check validity against hardware. */
1286         if (user->slice_mask & ~device->slice_mask)
1287                 return -EINVAL;
1288
1289         if (user->subslice_mask & ~device->subslice_mask[0])
1290                 return -EINVAL;
1291
1292         if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1293                 return -EINVAL;
1294
1295         context->slice_mask = user->slice_mask;
1296         context->subslice_mask = user->subslice_mask;
1297         context->min_eus_per_subslice = user->min_eus_per_subslice;
1298         context->max_eus_per_subslice = user->max_eus_per_subslice;
1299
1300         /* Part specific restrictions. */
1301         if (IS_GEN(i915, 11)) {
1302                 unsigned int hw_s = hweight8(device->slice_mask);
1303                 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1304                 unsigned int req_s = hweight8(context->slice_mask);
1305                 unsigned int req_ss = hweight8(context->subslice_mask);
1306
1307                 /*
1308                  * Only full subslice enablement is possible if more than one
1309                  * slice is turned on.
1310                  */
1311                 if (req_s > 1 && req_ss != hw_ss_per_s)
1312                         return -EINVAL;
1313
1314                 /*
1315                  * If more than four (SScount bitfield limit) subslices are
1316                  * requested then the number has to be even.
1317                  */
1318                 if (req_ss > 4 && (req_ss & 1))
1319                         return -EINVAL;
1320
1321                 /*
1322                  * If only one slice is enabled and subslice count is below the
1323                  * device full enablement, it must be at most half of the all
1324                  * available subslices.
1325                  */
1326                 if (req_s == 1 && req_ss < hw_ss_per_s &&
1327                     req_ss > (hw_ss_per_s / 2))
1328                         return -EINVAL;
1329
1330                 /* ABI restriction - VME use case only. */
1331
1332                 /* All slices or one slice only. */
1333                 if (req_s != 1 && req_s != hw_s)
1334                         return -EINVAL;
1335
1336                 /*
1337                  * Half subslices or full enablement only when one slice is
1338                  * enabled.
1339                  */
1340                 if (req_s == 1 &&
1341                     (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1342                         return -EINVAL;
1343
1344                 /* No EU configuration changes. */
1345                 if ((user->min_eus_per_subslice !=
1346                      device->max_eus_per_subslice) ||
1347                     (user->max_eus_per_subslice !=
1348                      device->max_eus_per_subslice))
1349                         return -EINVAL;
1350         }
1351
1352         return 0;
1353 }
1354
1355 static int set_sseu(struct i915_gem_context *ctx,
1356                     struct drm_i915_gem_context_param *args)
1357 {
1358         struct drm_i915_private *i915 = ctx->i915;
1359         struct drm_i915_gem_context_param_sseu user_sseu;
1360         struct intel_context *ce;
1361         struct intel_sseu sseu;
1362         unsigned long lookup;
1363         int ret;
1364
1365         if (args->size < sizeof(user_sseu))
1366                 return -EINVAL;
1367
1368         if (!IS_GEN(i915, 11))
1369                 return -ENODEV;
1370
1371         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1372                            sizeof(user_sseu)))
1373                 return -EFAULT;
1374
1375         if (user_sseu.rsvd)
1376                 return -EINVAL;
1377
1378         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1379                 return -EINVAL;
1380
1381         lookup = 0;
1382         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1383                 lookup |= LOOKUP_USER_INDEX;
1384
1385         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1386         if (IS_ERR(ce))
1387                 return PTR_ERR(ce);
1388
1389         /* Only render engine supports RPCS configuration. */
1390         if (ce->engine->class != RENDER_CLASS) {
1391                 ret = -ENODEV;
1392                 goto out_ce;
1393         }
1394
1395         ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1396         if (ret)
1397                 goto out_ce;
1398
1399         ret = intel_context_reconfigure_sseu(ce, sseu);
1400         if (ret)
1401                 goto out_ce;
1402
1403         args->size = sizeof(user_sseu);
1404
1405 out_ce:
1406         intel_context_put(ce);
1407         return ret;
1408 }
1409
1410 struct set_engines {
1411         struct i915_gem_context *ctx;
1412         struct i915_gem_engines *engines;
1413 };
1414
1415 static const i915_user_extension_fn set_engines__extensions[] = {
1416 };
1417
1418 static int
1419 set_engines(struct i915_gem_context *ctx,
1420             const struct drm_i915_gem_context_param *args)
1421 {
1422         struct i915_context_param_engines __user *user =
1423                 u64_to_user_ptr(args->value);
1424         struct set_engines set = { .ctx = ctx };
1425         unsigned int num_engines, n;
1426         u64 extensions;
1427         int err;
1428
1429         if (!args->size) { /* switch back to legacy user_ring_map */
1430                 if (!i915_gem_context_user_engines(ctx))
1431                         return 0;
1432
1433                 set.engines = default_engines(ctx);
1434                 if (IS_ERR(set.engines))
1435                         return PTR_ERR(set.engines);
1436
1437                 goto replace;
1438         }
1439
1440         BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1441         if (args->size < sizeof(*user) ||
1442             !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1443                 DRM_DEBUG("Invalid size for engine array: %d\n",
1444                           args->size);
1445                 return -EINVAL;
1446         }
1447
1448         /*
1449          * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1450          * first 64 engines defined here.
1451          */
1452         num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1453
1454         set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1455                               GFP_KERNEL);
1456         if (!set.engines)
1457                 return -ENOMEM;
1458
1459         set.engines->i915 = ctx->i915;
1460         for (n = 0; n < num_engines; n++) {
1461                 struct i915_engine_class_instance ci;
1462                 struct intel_engine_cs *engine;
1463
1464                 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1465                         __free_engines(set.engines, n);
1466                         return -EFAULT;
1467                 }
1468
1469                 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1470                     ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1471                         set.engines->engines[n] = NULL;
1472                         continue;
1473                 }
1474
1475                 engine = intel_engine_lookup_user(ctx->i915,
1476                                                   ci.engine_class,
1477                                                   ci.engine_instance);
1478                 if (!engine) {
1479                         DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1480                                   n, ci.engine_class, ci.engine_instance);
1481                         __free_engines(set.engines, n);
1482                         return -ENOENT;
1483                 }
1484
1485                 set.engines->engines[n] = intel_context_create(ctx, engine);
1486                 if (!set.engines->engines[n]) {
1487                         __free_engines(set.engines, n);
1488                         return -ENOMEM;
1489                 }
1490         }
1491         set.engines->num_engines = num_engines;
1492
1493         err = -EFAULT;
1494         if (!get_user(extensions, &user->extensions))
1495                 err = i915_user_extensions(u64_to_user_ptr(extensions),
1496                                            set_engines__extensions,
1497                                            ARRAY_SIZE(set_engines__extensions),
1498                                            &set);
1499         if (err) {
1500                 free_engines(set.engines);
1501                 return err;
1502         }
1503
1504 replace:
1505         mutex_lock(&ctx->engines_mutex);
1506         if (args->size)
1507                 i915_gem_context_set_user_engines(ctx);
1508         else
1509                 i915_gem_context_clear_user_engines(ctx);
1510         rcu_swap_protected(ctx->engines, set.engines, 1);
1511         mutex_unlock(&ctx->engines_mutex);
1512
1513         INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu);
1514         queue_rcu_work(system_wq, &set.engines->rcu);
1515
1516         return 0;
1517 }
1518
1519 static struct i915_gem_engines *
1520 __copy_engines(struct i915_gem_engines *e)
1521 {
1522         struct i915_gem_engines *copy;
1523         unsigned int n;
1524
1525         copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1526         if (!copy)
1527                 return ERR_PTR(-ENOMEM);
1528
1529         copy->i915 = e->i915;
1530         for (n = 0; n < e->num_engines; n++) {
1531                 if (e->engines[n])
1532                         copy->engines[n] = intel_context_get(e->engines[n]);
1533                 else
1534                         copy->engines[n] = NULL;
1535         }
1536         copy->num_engines = n;
1537
1538         return copy;
1539 }
1540
1541 static int
1542 get_engines(struct i915_gem_context *ctx,
1543             struct drm_i915_gem_context_param *args)
1544 {
1545         struct i915_context_param_engines __user *user;
1546         struct i915_gem_engines *e;
1547         size_t n, count, size;
1548         int err = 0;
1549
1550         err = mutex_lock_interruptible(&ctx->engines_mutex);
1551         if (err)
1552                 return err;
1553
1554         e = NULL;
1555         if (i915_gem_context_user_engines(ctx))
1556                 e = __copy_engines(i915_gem_context_engines(ctx));
1557         mutex_unlock(&ctx->engines_mutex);
1558         if (IS_ERR_OR_NULL(e)) {
1559                 args->size = 0;
1560                 return PTR_ERR_OR_ZERO(e);
1561         }
1562
1563         count = e->num_engines;
1564
1565         /* Be paranoid in case we have an impedance mismatch */
1566         if (!check_struct_size(user, engines, count, &size)) {
1567                 err = -EINVAL;
1568                 goto err_free;
1569         }
1570         if (overflows_type(size, args->size)) {
1571                 err = -EINVAL;
1572                 goto err_free;
1573         }
1574
1575         if (!args->size) {
1576                 args->size = size;
1577                 goto err_free;
1578         }
1579
1580         if (args->size < size) {
1581                 err = -EINVAL;
1582                 goto err_free;
1583         }
1584
1585         user = u64_to_user_ptr(args->value);
1586         if (!access_ok(user, size)) {
1587                 err = -EFAULT;
1588                 goto err_free;
1589         }
1590
1591         if (put_user(0, &user->extensions)) {
1592                 err = -EFAULT;
1593                 goto err_free;
1594         }
1595
1596         for (n = 0; n < count; n++) {
1597                 struct i915_engine_class_instance ci = {
1598                         .engine_class = I915_ENGINE_CLASS_INVALID,
1599                         .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1600                 };
1601
1602                 if (e->engines[n]) {
1603                         ci.engine_class = e->engines[n]->engine->uabi_class;
1604                         ci.engine_instance = e->engines[n]->engine->instance;
1605                 }
1606
1607                 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1608                         err = -EFAULT;
1609                         goto err_free;
1610                 }
1611         }
1612
1613         args->size = size;
1614
1615 err_free:
1616         INIT_RCU_WORK(&e->rcu, free_engines_rcu);
1617         queue_rcu_work(system_wq, &e->rcu);
1618         return err;
1619 }
1620
1621 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1622                         struct i915_gem_context *ctx,
1623                         struct drm_i915_gem_context_param *args)
1624 {
1625         int ret = 0;
1626
1627         switch (args->param) {
1628         case I915_CONTEXT_PARAM_NO_ZEROMAP:
1629                 if (args->size)
1630                         ret = -EINVAL;
1631                 else if (args->value)
1632                         set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1633                 else
1634                         clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1635                 break;
1636
1637         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1638                 if (args->size)
1639                         ret = -EINVAL;
1640                 else if (args->value)
1641                         i915_gem_context_set_no_error_capture(ctx);
1642                 else
1643                         i915_gem_context_clear_no_error_capture(ctx);
1644                 break;
1645
1646         case I915_CONTEXT_PARAM_BANNABLE:
1647                 if (args->size)
1648                         ret = -EINVAL;
1649                 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1650                         ret = -EPERM;
1651                 else if (args->value)
1652                         i915_gem_context_set_bannable(ctx);
1653                 else
1654                         i915_gem_context_clear_bannable(ctx);
1655                 break;
1656
1657         case I915_CONTEXT_PARAM_RECOVERABLE:
1658                 if (args->size)
1659                         ret = -EINVAL;
1660                 else if (args->value)
1661                         i915_gem_context_set_recoverable(ctx);
1662                 else
1663                         i915_gem_context_clear_recoverable(ctx);
1664                 break;
1665
1666         case I915_CONTEXT_PARAM_PRIORITY:
1667                 {
1668                         s64 priority = args->value;
1669
1670                         if (args->size)
1671                                 ret = -EINVAL;
1672                         else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1673                                 ret = -ENODEV;
1674                         else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1675                                  priority < I915_CONTEXT_MIN_USER_PRIORITY)
1676                                 ret = -EINVAL;
1677                         else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1678                                  !capable(CAP_SYS_NICE))
1679                                 ret = -EPERM;
1680                         else
1681                                 ctx->sched.priority =
1682                                         I915_USER_PRIORITY(priority);
1683                 }
1684                 break;
1685
1686         case I915_CONTEXT_PARAM_SSEU:
1687                 ret = set_sseu(ctx, args);
1688                 break;
1689
1690         case I915_CONTEXT_PARAM_VM:
1691                 ret = set_ppgtt(fpriv, ctx, args);
1692                 break;
1693
1694         case I915_CONTEXT_PARAM_ENGINES:
1695                 ret = set_engines(ctx, args);
1696                 break;
1697
1698         case I915_CONTEXT_PARAM_BAN_PERIOD:
1699         default:
1700                 ret = -EINVAL;
1701                 break;
1702         }
1703
1704         return ret;
1705 }
1706
1707 struct create_ext {
1708         struct i915_gem_context *ctx;
1709         struct drm_i915_file_private *fpriv;
1710 };
1711
1712 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1713 {
1714         struct drm_i915_gem_context_create_ext_setparam local;
1715         const struct create_ext *arg = data;
1716
1717         if (copy_from_user(&local, ext, sizeof(local)))
1718                 return -EFAULT;
1719
1720         if (local.param.ctx_id)
1721                 return -EINVAL;
1722
1723         return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1724 }
1725
1726 static int clone_engines(struct i915_gem_context *dst,
1727                          struct i915_gem_context *src)
1728 {
1729         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1730         struct i915_gem_engines *clone;
1731         bool user_engines;
1732         unsigned long n;
1733
1734         clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1735         if (!clone)
1736                 goto err_unlock;
1737
1738         clone->i915 = dst->i915;
1739         for (n = 0; n < e->num_engines; n++) {
1740                 if (!e->engines[n]) {
1741                         clone->engines[n] = NULL;
1742                         continue;
1743                 }
1744
1745                 clone->engines[n] =
1746                         intel_context_create(dst, e->engines[n]->engine);
1747                 if (!clone->engines[n]) {
1748                         __free_engines(clone, n);
1749                         goto err_unlock;
1750                 }
1751         }
1752         clone->num_engines = n;
1753
1754         user_engines = i915_gem_context_user_engines(src);
1755         i915_gem_context_unlock_engines(src);
1756
1757         free_engines(dst->engines);
1758         RCU_INIT_POINTER(dst->engines, clone);
1759         if (user_engines)
1760                 i915_gem_context_set_user_engines(dst);
1761         else
1762                 i915_gem_context_clear_user_engines(dst);
1763         return 0;
1764
1765 err_unlock:
1766         i915_gem_context_unlock_engines(src);
1767         return -ENOMEM;
1768 }
1769
1770 static int clone_flags(struct i915_gem_context *dst,
1771                        struct i915_gem_context *src)
1772 {
1773         dst->user_flags = src->user_flags;
1774         return 0;
1775 }
1776
1777 static int clone_schedattr(struct i915_gem_context *dst,
1778                            struct i915_gem_context *src)
1779 {
1780         dst->sched = src->sched;
1781         return 0;
1782 }
1783
1784 static int clone_sseu(struct i915_gem_context *dst,
1785                       struct i915_gem_context *src)
1786 {
1787         struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1788         struct i915_gem_engines *clone;
1789         unsigned long n;
1790         int err;
1791
1792         clone = dst->engines; /* no locking required; sole access */
1793         if (e->num_engines != clone->num_engines) {
1794                 err = -EINVAL;
1795                 goto unlock;
1796         }
1797
1798         for (n = 0; n < e->num_engines; n++) {
1799                 struct intel_context *ce = e->engines[n];
1800
1801                 if (clone->engines[n]->engine->class != ce->engine->class) {
1802                         /* Must have compatible engine maps! */
1803                         err = -EINVAL;
1804                         goto unlock;
1805                 }
1806
1807                 /* serialises with set_sseu */
1808                 err = intel_context_lock_pinned(ce);
1809                 if (err)
1810                         goto unlock;
1811
1812                 clone->engines[n]->sseu = ce->sseu;
1813                 intel_context_unlock_pinned(ce);
1814         }
1815
1816         err = 0;
1817 unlock:
1818         i915_gem_context_unlock_engines(src);
1819         return err;
1820 }
1821
1822 static int clone_timeline(struct i915_gem_context *dst,
1823                           struct i915_gem_context *src)
1824 {
1825         if (src->timeline) {
1826                 GEM_BUG_ON(src->timeline == dst->timeline);
1827
1828                 if (dst->timeline)
1829                         i915_timeline_put(dst->timeline);
1830                 dst->timeline = i915_timeline_get(src->timeline);
1831         }
1832
1833         return 0;
1834 }
1835
1836 static int clone_vm(struct i915_gem_context *dst,
1837                     struct i915_gem_context *src)
1838 {
1839         struct i915_hw_ppgtt *ppgtt;
1840
1841         rcu_read_lock();
1842         do {
1843                 ppgtt = READ_ONCE(src->ppgtt);
1844                 if (!ppgtt)
1845                         break;
1846
1847                 if (!kref_get_unless_zero(&ppgtt->ref))
1848                         continue;
1849
1850                 /*
1851                  * This ppgtt may have be reallocated between
1852                  * the read and the kref, and reassigned to a third
1853                  * context. In order to avoid inadvertent sharing
1854                  * of this ppgtt with that third context (and not
1855                  * src), we have to confirm that we have the same
1856                  * ppgtt after passing through the strong memory
1857                  * barrier implied by a successful
1858                  * kref_get_unless_zero().
1859                  *
1860                  * Once we have acquired the current ppgtt of src,
1861                  * we no longer care if it is released from src, as
1862                  * it cannot be reallocated elsewhere.
1863                  */
1864
1865                 if (ppgtt == READ_ONCE(src->ppgtt))
1866                         break;
1867
1868                 i915_ppgtt_put(ppgtt);
1869         } while (1);
1870         rcu_read_unlock();
1871
1872         if (ppgtt) {
1873                 __assign_ppgtt(dst, ppgtt);
1874                 i915_ppgtt_put(ppgtt);
1875         }
1876
1877         return 0;
1878 }
1879
1880 static int create_clone(struct i915_user_extension __user *ext, void *data)
1881 {
1882         static int (* const fn[])(struct i915_gem_context *dst,
1883                                   struct i915_gem_context *src) = {
1884 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
1885                 MAP(ENGINES, clone_engines),
1886                 MAP(FLAGS, clone_flags),
1887                 MAP(SCHEDATTR, clone_schedattr),
1888                 MAP(SSEU, clone_sseu),
1889                 MAP(TIMELINE, clone_timeline),
1890                 MAP(VM, clone_vm),
1891 #undef MAP
1892         };
1893         struct drm_i915_gem_context_create_ext_clone local;
1894         const struct create_ext *arg = data;
1895         struct i915_gem_context *dst = arg->ctx;
1896         struct i915_gem_context *src;
1897         int err, bit;
1898
1899         if (copy_from_user(&local, ext, sizeof(local)))
1900                 return -EFAULT;
1901
1902         BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
1903                      I915_CONTEXT_CLONE_UNKNOWN);
1904
1905         if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
1906                 return -EINVAL;
1907
1908         if (local.rsvd)
1909                 return -EINVAL;
1910
1911         rcu_read_lock();
1912         src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
1913         rcu_read_unlock();
1914         if (!src)
1915                 return -ENOENT;
1916
1917         GEM_BUG_ON(src == dst);
1918
1919         for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
1920                 if (!(local.flags & BIT(bit)))
1921                         continue;
1922
1923                 err = fn[bit](dst, src);
1924                 if (err)
1925                         return err;
1926         }
1927
1928         return 0;
1929 }
1930
1931 static const i915_user_extension_fn create_extensions[] = {
1932         [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
1933         [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
1934 };
1935
1936 static bool client_is_banned(struct drm_i915_file_private *file_priv)
1937 {
1938         return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
1939 }
1940
1941 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1942                                   struct drm_file *file)
1943 {
1944         struct drm_i915_private *i915 = to_i915(dev);
1945         struct drm_i915_gem_context_create_ext *args = data;
1946         struct create_ext ext_data;
1947         int ret;
1948
1949         if (!DRIVER_CAPS(i915)->has_logical_contexts)
1950                 return -ENODEV;
1951
1952         if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
1953                 return -EINVAL;
1954
1955         ret = i915_terminally_wedged(i915);
1956         if (ret)
1957                 return ret;
1958
1959         ext_data.fpriv = file->driver_priv;
1960         if (client_is_banned(ext_data.fpriv)) {
1961                 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
1962                           current->comm,
1963                           pid_nr(get_task_pid(current, PIDTYPE_PID)));
1964                 return -EIO;
1965         }
1966
1967         ret = i915_mutex_lock_interruptible(dev);
1968         if (ret)
1969                 return ret;
1970
1971         ext_data.ctx = i915_gem_create_context(i915, args->flags);
1972         mutex_unlock(&dev->struct_mutex);
1973         if (IS_ERR(ext_data.ctx))
1974                 return PTR_ERR(ext_data.ctx);
1975
1976         if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
1977                 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
1978                                            create_extensions,
1979                                            ARRAY_SIZE(create_extensions),
1980                                            &ext_data);
1981                 if (ret)
1982                         goto err_ctx;
1983         }
1984
1985         ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
1986         if (ret < 0)
1987                 goto err_ctx;
1988
1989         args->ctx_id = ret;
1990         DRM_DEBUG("HW context %d created\n", args->ctx_id);
1991
1992         return 0;
1993
1994 err_ctx:
1995         mutex_lock(&dev->struct_mutex);
1996         context_close(ext_data.ctx);
1997         mutex_unlock(&dev->struct_mutex);
1998         return ret;
1999 }
2000
2001 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2002                                    struct drm_file *file)
2003 {
2004         struct drm_i915_gem_context_destroy *args = data;
2005         struct drm_i915_file_private *file_priv = file->driver_priv;
2006         struct i915_gem_context *ctx;
2007
2008         if (args->pad != 0)
2009                 return -EINVAL;
2010
2011         if (!args->ctx_id)
2012                 return -ENOENT;
2013
2014         if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2015                 return -EINTR;
2016
2017         ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2018         mutex_unlock(&file_priv->context_idr_lock);
2019         if (!ctx)
2020                 return -ENOENT;
2021
2022         mutex_lock(&dev->struct_mutex);
2023         context_close(ctx);
2024         mutex_unlock(&dev->struct_mutex);
2025
2026         return 0;
2027 }
2028
2029 static int get_sseu(struct i915_gem_context *ctx,
2030                     struct drm_i915_gem_context_param *args)
2031 {
2032         struct drm_i915_gem_context_param_sseu user_sseu;
2033         struct intel_context *ce;
2034         unsigned long lookup;
2035         int err;
2036
2037         if (args->size == 0)
2038                 goto out;
2039         else if (args->size < sizeof(user_sseu))
2040                 return -EINVAL;
2041
2042         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2043                            sizeof(user_sseu)))
2044                 return -EFAULT;
2045
2046         if (user_sseu.rsvd)
2047                 return -EINVAL;
2048
2049         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2050                 return -EINVAL;
2051
2052         lookup = 0;
2053         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2054                 lookup |= LOOKUP_USER_INDEX;
2055
2056         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2057         if (IS_ERR(ce))
2058                 return PTR_ERR(ce);
2059
2060         err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2061         if (err) {
2062                 intel_context_put(ce);
2063                 return err;
2064         }
2065
2066         user_sseu.slice_mask = ce->sseu.slice_mask;
2067         user_sseu.subslice_mask = ce->sseu.subslice_mask;
2068         user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2069         user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2070
2071         intel_context_unlock_pinned(ce);
2072         intel_context_put(ce);
2073
2074         if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2075                          sizeof(user_sseu)))
2076                 return -EFAULT;
2077
2078 out:
2079         args->size = sizeof(user_sseu);
2080
2081         return 0;
2082 }
2083
2084 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2085                                     struct drm_file *file)
2086 {
2087         struct drm_i915_file_private *file_priv = file->driver_priv;
2088         struct drm_i915_gem_context_param *args = data;
2089         struct i915_gem_context *ctx;
2090         int ret = 0;
2091
2092         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2093         if (!ctx)
2094                 return -ENOENT;
2095
2096         switch (args->param) {
2097         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2098                 args->size = 0;
2099                 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2100                 break;
2101
2102         case I915_CONTEXT_PARAM_GTT_SIZE:
2103                 args->size = 0;
2104                 if (ctx->ppgtt)
2105                         args->value = ctx->ppgtt->vm.total;
2106                 else if (to_i915(dev)->mm.aliasing_ppgtt)
2107                         args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
2108                 else
2109                         args->value = to_i915(dev)->ggtt.vm.total;
2110                 break;
2111
2112         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2113                 args->size = 0;
2114                 args->value = i915_gem_context_no_error_capture(ctx);
2115                 break;
2116
2117         case I915_CONTEXT_PARAM_BANNABLE:
2118                 args->size = 0;
2119                 args->value = i915_gem_context_is_bannable(ctx);
2120                 break;
2121
2122         case I915_CONTEXT_PARAM_RECOVERABLE:
2123                 args->size = 0;
2124                 args->value = i915_gem_context_is_recoverable(ctx);
2125                 break;
2126
2127         case I915_CONTEXT_PARAM_PRIORITY:
2128                 args->size = 0;
2129                 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2130                 break;
2131
2132         case I915_CONTEXT_PARAM_SSEU:
2133                 ret = get_sseu(ctx, args);
2134                 break;
2135
2136         case I915_CONTEXT_PARAM_VM:
2137                 ret = get_ppgtt(file_priv, ctx, args);
2138                 break;
2139
2140         case I915_CONTEXT_PARAM_ENGINES:
2141                 ret = get_engines(ctx, args);
2142                 break;
2143
2144         case I915_CONTEXT_PARAM_BAN_PERIOD:
2145         default:
2146                 ret = -EINVAL;
2147                 break;
2148         }
2149
2150         i915_gem_context_put(ctx);
2151         return ret;
2152 }
2153
2154 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2155                                     struct drm_file *file)
2156 {
2157         struct drm_i915_file_private *file_priv = file->driver_priv;
2158         struct drm_i915_gem_context_param *args = data;
2159         struct i915_gem_context *ctx;
2160         int ret;
2161
2162         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2163         if (!ctx)
2164                 return -ENOENT;
2165
2166         ret = ctx_setparam(file_priv, ctx, args);
2167
2168         i915_gem_context_put(ctx);
2169         return ret;
2170 }
2171
2172 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2173                                        void *data, struct drm_file *file)
2174 {
2175         struct drm_i915_private *dev_priv = to_i915(dev);
2176         struct drm_i915_reset_stats *args = data;
2177         struct i915_gem_context *ctx;
2178         int ret;
2179
2180         if (args->flags || args->pad)
2181                 return -EINVAL;
2182
2183         ret = -ENOENT;
2184         rcu_read_lock();
2185         ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2186         if (!ctx)
2187                 goto out;
2188
2189         /*
2190          * We opt for unserialised reads here. This may result in tearing
2191          * in the extremely unlikely event of a GPU hang on this context
2192          * as we are querying them. If we need that extra layer of protection,
2193          * we should wrap the hangstats with a seqlock.
2194          */
2195
2196         if (capable(CAP_SYS_ADMIN))
2197                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2198         else
2199                 args->reset_count = 0;
2200
2201         args->batch_active = atomic_read(&ctx->guilty_count);
2202         args->batch_pending = atomic_read(&ctx->active_count);
2203
2204         ret = 0;
2205 out:
2206         rcu_read_unlock();
2207         return ret;
2208 }
2209
2210 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2211 {
2212         struct drm_i915_private *i915 = ctx->i915;
2213         int err = 0;
2214
2215         mutex_lock(&i915->contexts.mutex);
2216
2217         GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2218
2219         if (list_empty(&ctx->hw_id_link)) {
2220                 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2221
2222                 err = assign_hw_id(i915, &ctx->hw_id);
2223                 if (err)
2224                         goto out_unlock;
2225
2226                 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2227         }
2228
2229         GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2230         atomic_inc(&ctx->hw_id_pin_count);
2231
2232 out_unlock:
2233         mutex_unlock(&i915->contexts.mutex);
2234         return err;
2235 }
2236
2237 /* GEM context-engines iterator: for_each_gem_engine() */
2238 struct intel_context *
2239 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2240 {
2241         const struct i915_gem_engines *e = it->engines;
2242         struct intel_context *ctx;
2243
2244         do {
2245                 if (it->idx >= e->num_engines)
2246                         return NULL;
2247
2248                 ctx = e->engines[it->idx++];
2249         } while (!ctx);
2250
2251         return ctx;
2252 }
2253
2254 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2255 #include "selftests/mock_context.c"
2256 #include "selftests/i915_gem_context.c"
2257 #endif
2258
2259 static void i915_global_gem_context_shrink(void)
2260 {
2261         kmem_cache_shrink(global.slab_luts);
2262 }
2263
2264 static void i915_global_gem_context_exit(void)
2265 {
2266         kmem_cache_destroy(global.slab_luts);
2267 }
2268
2269 static struct i915_global_gem_context global = { {
2270         .shrink = i915_global_gem_context_shrink,
2271         .exit = i915_global_gem_context_exit,
2272 } };
2273
2274 int __init i915_global_gem_context_init(void)
2275 {
2276         global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2277         if (!global.slab_luts)
2278                 return -ENOMEM;
2279
2280         i915_global_register(&global.base);
2281         return 0;
2282 }