]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gt/selftest_lrc.c
b26225751a54c4ba9c5e16dc674af4ea066c65af
[linux.git] / drivers / gpu / drm / i915 / gt / selftest_lrc.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/prime_numbers.h>
8
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_reset.h"
11
12 #include "i915_selftest.h"
13 #include "selftests/i915_random.h"
14 #include "selftests/igt_flush_test.h"
15 #include "selftests/igt_live_test.h"
16 #include "selftests/igt_spinner.h"
17 #include "selftests/lib_sw_fence.h"
18
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
21
22 static int live_sanitycheck(void *arg)
23 {
24         struct drm_i915_private *i915 = arg;
25         struct i915_gem_engines_iter it;
26         struct i915_gem_context *ctx;
27         struct intel_context *ce;
28         struct igt_spinner spin;
29         intel_wakeref_t wakeref;
30         int err = -ENOMEM;
31
32         if (!HAS_LOGICAL_RING_CONTEXTS(i915))
33                 return 0;
34
35         mutex_lock(&i915->drm.struct_mutex);
36         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
37
38         if (igt_spinner_init(&spin, &i915->gt))
39                 goto err_unlock;
40
41         ctx = kernel_context(i915);
42         if (!ctx)
43                 goto err_spin;
44
45         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
46                 struct i915_request *rq;
47
48                 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
49                 if (IS_ERR(rq)) {
50                         err = PTR_ERR(rq);
51                         goto err_ctx;
52                 }
53
54                 i915_request_add(rq);
55                 if (!igt_wait_for_spinner(&spin, rq)) {
56                         GEM_TRACE("spinner failed to start\n");
57                         GEM_TRACE_DUMP();
58                         intel_gt_set_wedged(&i915->gt);
59                         err = -EIO;
60                         goto err_ctx;
61                 }
62
63                 igt_spinner_end(&spin);
64                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
65                         err = -EIO;
66                         goto err_ctx;
67                 }
68         }
69
70         err = 0;
71 err_ctx:
72         i915_gem_context_unlock_engines(ctx);
73         kernel_context_close(ctx);
74 err_spin:
75         igt_spinner_fini(&spin);
76 err_unlock:
77         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
78         mutex_unlock(&i915->drm.struct_mutex);
79         return err;
80 }
81
82 static int
83 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
84 {
85         u32 *cs;
86
87         cs = intel_ring_begin(rq, 10);
88         if (IS_ERR(cs))
89                 return PTR_ERR(cs);
90
91         *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
92
93         *cs++ = MI_SEMAPHORE_WAIT |
94                 MI_SEMAPHORE_GLOBAL_GTT |
95                 MI_SEMAPHORE_POLL |
96                 MI_SEMAPHORE_SAD_NEQ_SDD;
97         *cs++ = 0;
98         *cs++ = i915_ggtt_offset(vma) + 4 * idx;
99         *cs++ = 0;
100
101         if (idx > 0) {
102                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
103                 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
104                 *cs++ = 0;
105                 *cs++ = 1;
106         } else {
107                 *cs++ = MI_NOOP;
108                 *cs++ = MI_NOOP;
109                 *cs++ = MI_NOOP;
110                 *cs++ = MI_NOOP;
111         }
112
113         *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
114
115         intel_ring_advance(rq, cs);
116         return 0;
117 }
118
119 static struct i915_request *
120 semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
121 {
122         struct i915_gem_context *ctx;
123         struct i915_request *rq;
124         int err;
125
126         ctx = kernel_context(engine->i915);
127         if (!ctx)
128                 return ERR_PTR(-ENOMEM);
129
130         rq = igt_request_alloc(ctx, engine);
131         if (IS_ERR(rq))
132                 goto out_ctx;
133
134         err = emit_semaphore_chain(rq, vma, idx);
135         i915_request_add(rq);
136         if (err)
137                 rq = ERR_PTR(err);
138
139 out_ctx:
140         kernel_context_close(ctx);
141         return rq;
142 }
143
144 static int
145 release_queue(struct intel_engine_cs *engine,
146               struct i915_vma *vma,
147               int idx)
148 {
149         struct i915_sched_attr attr = {
150                 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
151         };
152         struct i915_request *rq;
153         u32 *cs;
154
155         rq = i915_request_create(engine->kernel_context);
156         if (IS_ERR(rq))
157                 return PTR_ERR(rq);
158
159         cs = intel_ring_begin(rq, 4);
160         if (IS_ERR(cs)) {
161                 i915_request_add(rq);
162                 return PTR_ERR(cs);
163         }
164
165         *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
166         *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
167         *cs++ = 0;
168         *cs++ = 1;
169
170         intel_ring_advance(rq, cs);
171         i915_request_add(rq);
172
173         engine->schedule(rq, &attr);
174
175         return 0;
176 }
177
178 static int
179 slice_semaphore_queue(struct intel_engine_cs *outer,
180                       struct i915_vma *vma,
181                       int count)
182 {
183         struct intel_engine_cs *engine;
184         struct i915_request *head;
185         enum intel_engine_id id;
186         int err, i, n = 0;
187
188         head = semaphore_queue(outer, vma, n++);
189         if (IS_ERR(head))
190                 return PTR_ERR(head);
191
192         i915_request_get(head);
193         for_each_engine(engine, outer->i915, id) {
194                 for (i = 0; i < count; i++) {
195                         struct i915_request *rq;
196
197                         rq = semaphore_queue(engine, vma, n++);
198                         if (IS_ERR(rq)) {
199                                 err = PTR_ERR(rq);
200                                 goto out;
201                         }
202                 }
203         }
204
205         err = release_queue(outer, vma, n);
206         if (err)
207                 goto out;
208
209         if (i915_request_wait(head,
210                               I915_WAIT_LOCKED,
211                               2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
212                 pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
213                        count, n);
214                 GEM_TRACE_DUMP();
215                 intel_gt_set_wedged(outer->gt);
216                 err = -EIO;
217         }
218
219 out:
220         i915_request_put(head);
221         return err;
222 }
223
224 static int live_timeslice_preempt(void *arg)
225 {
226         struct drm_i915_private *i915 = arg;
227         struct drm_i915_gem_object *obj;
228         intel_wakeref_t wakeref;
229         struct i915_vma *vma;
230         void *vaddr;
231         int err = 0;
232         int count;
233
234         /*
235          * If a request takes too long, we would like to give other users
236          * a fair go on the GPU. In particular, users may create batches
237          * that wait upon external input, where that input may even be
238          * supplied by another GPU job. To avoid blocking forever, we
239          * need to preempt the current task and replace it with another
240          * ready task.
241          */
242
243         mutex_lock(&i915->drm.struct_mutex);
244         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
245
246         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
247         if (IS_ERR(obj)) {
248                 err = PTR_ERR(obj);
249                 goto err_unlock;
250         }
251
252         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
253         if (IS_ERR(vma)) {
254                 err = PTR_ERR(vma);
255                 goto err_obj;
256         }
257
258         vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
259         if (IS_ERR(vaddr)) {
260                 err = PTR_ERR(vaddr);
261                 goto err_obj;
262         }
263
264         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
265         if (err)
266                 goto err_map;
267
268         for_each_prime_number_from(count, 1, 16) {
269                 struct intel_engine_cs *engine;
270                 enum intel_engine_id id;
271
272                 for_each_engine(engine, i915, id) {
273                         if (!intel_engine_has_preemption(engine))
274                                 continue;
275
276                         memset(vaddr, 0, PAGE_SIZE);
277
278                         err = slice_semaphore_queue(engine, vma, count);
279                         if (err)
280                                 goto err_pin;
281
282                         if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
283                                 err = -EIO;
284                                 goto err_pin;
285                         }
286                 }
287         }
288
289 err_pin:
290         i915_vma_unpin(vma);
291 err_map:
292         i915_gem_object_unpin_map(obj);
293 err_obj:
294         i915_gem_object_put(obj);
295 err_unlock:
296         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
297         mutex_unlock(&i915->drm.struct_mutex);
298
299         return err;
300 }
301
302 static int live_busywait_preempt(void *arg)
303 {
304         struct drm_i915_private *i915 = arg;
305         struct i915_gem_context *ctx_hi, *ctx_lo;
306         struct intel_engine_cs *engine;
307         struct drm_i915_gem_object *obj;
308         struct i915_vma *vma;
309         enum intel_engine_id id;
310         intel_wakeref_t wakeref;
311         int err = -ENOMEM;
312         u32 *map;
313
314         /*
315          * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
316          * preempt the busywaits used to synchronise between rings.
317          */
318
319         mutex_lock(&i915->drm.struct_mutex);
320         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
321
322         ctx_hi = kernel_context(i915);
323         if (!ctx_hi)
324                 goto err_unlock;
325         ctx_hi->sched.priority =
326                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
327
328         ctx_lo = kernel_context(i915);
329         if (!ctx_lo)
330                 goto err_ctx_hi;
331         ctx_lo->sched.priority =
332                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
333
334         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
335         if (IS_ERR(obj)) {
336                 err = PTR_ERR(obj);
337                 goto err_ctx_lo;
338         }
339
340         map = i915_gem_object_pin_map(obj, I915_MAP_WC);
341         if (IS_ERR(map)) {
342                 err = PTR_ERR(map);
343                 goto err_obj;
344         }
345
346         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
347         if (IS_ERR(vma)) {
348                 err = PTR_ERR(vma);
349                 goto err_map;
350         }
351
352         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
353         if (err)
354                 goto err_map;
355
356         for_each_engine(engine, i915, id) {
357                 struct i915_request *lo, *hi;
358                 struct igt_live_test t;
359                 u32 *cs;
360
361                 if (!intel_engine_has_preemption(engine))
362                         continue;
363
364                 if (!intel_engine_can_store_dword(engine))
365                         continue;
366
367                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
368                         err = -EIO;
369                         goto err_vma;
370                 }
371
372                 /*
373                  * We create two requests. The low priority request
374                  * busywaits on a semaphore (inside the ringbuffer where
375                  * is should be preemptible) and the high priority requests
376                  * uses a MI_STORE_DWORD_IMM to update the semaphore value
377                  * allowing the first request to complete. If preemption
378                  * fails, we hang instead.
379                  */
380
381                 lo = igt_request_alloc(ctx_lo, engine);
382                 if (IS_ERR(lo)) {
383                         err = PTR_ERR(lo);
384                         goto err_vma;
385                 }
386
387                 cs = intel_ring_begin(lo, 8);
388                 if (IS_ERR(cs)) {
389                         err = PTR_ERR(cs);
390                         i915_request_add(lo);
391                         goto err_vma;
392                 }
393
394                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
395                 *cs++ = i915_ggtt_offset(vma);
396                 *cs++ = 0;
397                 *cs++ = 1;
398
399                 /* XXX Do we need a flush + invalidate here? */
400
401                 *cs++ = MI_SEMAPHORE_WAIT |
402                         MI_SEMAPHORE_GLOBAL_GTT |
403                         MI_SEMAPHORE_POLL |
404                         MI_SEMAPHORE_SAD_EQ_SDD;
405                 *cs++ = 0;
406                 *cs++ = i915_ggtt_offset(vma);
407                 *cs++ = 0;
408
409                 intel_ring_advance(lo, cs);
410                 i915_request_add(lo);
411
412                 if (wait_for(READ_ONCE(*map), 10)) {
413                         err = -ETIMEDOUT;
414                         goto err_vma;
415                 }
416
417                 /* Low priority request should be busywaiting now */
418                 if (i915_request_wait(lo, 0, 1) != -ETIME) {
419                         pr_err("%s: Busywaiting request did not!\n",
420                                engine->name);
421                         err = -EIO;
422                         goto err_vma;
423                 }
424
425                 hi = igt_request_alloc(ctx_hi, engine);
426                 if (IS_ERR(hi)) {
427                         err = PTR_ERR(hi);
428                         goto err_vma;
429                 }
430
431                 cs = intel_ring_begin(hi, 4);
432                 if (IS_ERR(cs)) {
433                         err = PTR_ERR(cs);
434                         i915_request_add(hi);
435                         goto err_vma;
436                 }
437
438                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
439                 *cs++ = i915_ggtt_offset(vma);
440                 *cs++ = 0;
441                 *cs++ = 0;
442
443                 intel_ring_advance(hi, cs);
444                 i915_request_add(hi);
445
446                 if (i915_request_wait(lo, 0, HZ / 5) < 0) {
447                         struct drm_printer p = drm_info_printer(i915->drm.dev);
448
449                         pr_err("%s: Failed to preempt semaphore busywait!\n",
450                                engine->name);
451
452                         intel_engine_dump(engine, &p, "%s\n", engine->name);
453                         GEM_TRACE_DUMP();
454
455                         intel_gt_set_wedged(&i915->gt);
456                         err = -EIO;
457                         goto err_vma;
458                 }
459                 GEM_BUG_ON(READ_ONCE(*map));
460
461                 if (igt_live_test_end(&t)) {
462                         err = -EIO;
463                         goto err_vma;
464                 }
465         }
466
467         err = 0;
468 err_vma:
469         i915_vma_unpin(vma);
470 err_map:
471         i915_gem_object_unpin_map(obj);
472 err_obj:
473         i915_gem_object_put(obj);
474 err_ctx_lo:
475         kernel_context_close(ctx_lo);
476 err_ctx_hi:
477         kernel_context_close(ctx_hi);
478 err_unlock:
479         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
480         mutex_unlock(&i915->drm.struct_mutex);
481         return err;
482 }
483
484 static struct i915_request *
485 spinner_create_request(struct igt_spinner *spin,
486                        struct i915_gem_context *ctx,
487                        struct intel_engine_cs *engine,
488                        u32 arb)
489 {
490         struct intel_context *ce;
491         struct i915_request *rq;
492
493         ce = i915_gem_context_get_engine(ctx, engine->id);
494         if (IS_ERR(ce))
495                 return ERR_CAST(ce);
496
497         rq = igt_spinner_create_request(spin, ce, arb);
498         intel_context_put(ce);
499         return rq;
500 }
501
502 static int live_preempt(void *arg)
503 {
504         struct drm_i915_private *i915 = arg;
505         struct i915_gem_context *ctx_hi, *ctx_lo;
506         struct igt_spinner spin_hi, spin_lo;
507         struct intel_engine_cs *engine;
508         enum intel_engine_id id;
509         intel_wakeref_t wakeref;
510         int err = -ENOMEM;
511
512         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
513                 return 0;
514
515         if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
516                 pr_err("Logical preemption supported, but not exposed\n");
517
518         mutex_lock(&i915->drm.struct_mutex);
519         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
520
521         if (igt_spinner_init(&spin_hi, &i915->gt))
522                 goto err_unlock;
523
524         if (igt_spinner_init(&spin_lo, &i915->gt))
525                 goto err_spin_hi;
526
527         ctx_hi = kernel_context(i915);
528         if (!ctx_hi)
529                 goto err_spin_lo;
530         ctx_hi->sched.priority =
531                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
532
533         ctx_lo = kernel_context(i915);
534         if (!ctx_lo)
535                 goto err_ctx_hi;
536         ctx_lo->sched.priority =
537                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
538
539         for_each_engine(engine, i915, id) {
540                 struct igt_live_test t;
541                 struct i915_request *rq;
542
543                 if (!intel_engine_has_preemption(engine))
544                         continue;
545
546                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
547                         err = -EIO;
548                         goto err_ctx_lo;
549                 }
550
551                 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
552                                             MI_ARB_CHECK);
553                 if (IS_ERR(rq)) {
554                         err = PTR_ERR(rq);
555                         goto err_ctx_lo;
556                 }
557
558                 i915_request_add(rq);
559                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
560                         GEM_TRACE("lo spinner failed to start\n");
561                         GEM_TRACE_DUMP();
562                         intel_gt_set_wedged(&i915->gt);
563                         err = -EIO;
564                         goto err_ctx_lo;
565                 }
566
567                 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
568                                             MI_ARB_CHECK);
569                 if (IS_ERR(rq)) {
570                         igt_spinner_end(&spin_lo);
571                         err = PTR_ERR(rq);
572                         goto err_ctx_lo;
573                 }
574
575                 i915_request_add(rq);
576                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
577                         GEM_TRACE("hi spinner failed to start\n");
578                         GEM_TRACE_DUMP();
579                         intel_gt_set_wedged(&i915->gt);
580                         err = -EIO;
581                         goto err_ctx_lo;
582                 }
583
584                 igt_spinner_end(&spin_hi);
585                 igt_spinner_end(&spin_lo);
586
587                 if (igt_live_test_end(&t)) {
588                         err = -EIO;
589                         goto err_ctx_lo;
590                 }
591         }
592
593         err = 0;
594 err_ctx_lo:
595         kernel_context_close(ctx_lo);
596 err_ctx_hi:
597         kernel_context_close(ctx_hi);
598 err_spin_lo:
599         igt_spinner_fini(&spin_lo);
600 err_spin_hi:
601         igt_spinner_fini(&spin_hi);
602 err_unlock:
603         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
604         mutex_unlock(&i915->drm.struct_mutex);
605         return err;
606 }
607
608 static int live_late_preempt(void *arg)
609 {
610         struct drm_i915_private *i915 = arg;
611         struct i915_gem_context *ctx_hi, *ctx_lo;
612         struct igt_spinner spin_hi, spin_lo;
613         struct intel_engine_cs *engine;
614         struct i915_sched_attr attr = {};
615         enum intel_engine_id id;
616         intel_wakeref_t wakeref;
617         int err = -ENOMEM;
618
619         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
620                 return 0;
621
622         mutex_lock(&i915->drm.struct_mutex);
623         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
624
625         if (igt_spinner_init(&spin_hi, &i915->gt))
626                 goto err_unlock;
627
628         if (igt_spinner_init(&spin_lo, &i915->gt))
629                 goto err_spin_hi;
630
631         ctx_hi = kernel_context(i915);
632         if (!ctx_hi)
633                 goto err_spin_lo;
634
635         ctx_lo = kernel_context(i915);
636         if (!ctx_lo)
637                 goto err_ctx_hi;
638
639         /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
640         ctx_lo->sched.priority = I915_USER_PRIORITY(1);
641
642         for_each_engine(engine, i915, id) {
643                 struct igt_live_test t;
644                 struct i915_request *rq;
645
646                 if (!intel_engine_has_preemption(engine))
647                         continue;
648
649                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
650                         err = -EIO;
651                         goto err_ctx_lo;
652                 }
653
654                 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
655                                             MI_ARB_CHECK);
656                 if (IS_ERR(rq)) {
657                         err = PTR_ERR(rq);
658                         goto err_ctx_lo;
659                 }
660
661                 i915_request_add(rq);
662                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
663                         pr_err("First context failed to start\n");
664                         goto err_wedged;
665                 }
666
667                 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
668                                             MI_NOOP);
669                 if (IS_ERR(rq)) {
670                         igt_spinner_end(&spin_lo);
671                         err = PTR_ERR(rq);
672                         goto err_ctx_lo;
673                 }
674
675                 i915_request_add(rq);
676                 if (igt_wait_for_spinner(&spin_hi, rq)) {
677                         pr_err("Second context overtook first?\n");
678                         goto err_wedged;
679                 }
680
681                 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
682                 engine->schedule(rq, &attr);
683
684                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
685                         pr_err("High priority context failed to preempt the low priority context\n");
686                         GEM_TRACE_DUMP();
687                         goto err_wedged;
688                 }
689
690                 igt_spinner_end(&spin_hi);
691                 igt_spinner_end(&spin_lo);
692
693                 if (igt_live_test_end(&t)) {
694                         err = -EIO;
695                         goto err_ctx_lo;
696                 }
697         }
698
699         err = 0;
700 err_ctx_lo:
701         kernel_context_close(ctx_lo);
702 err_ctx_hi:
703         kernel_context_close(ctx_hi);
704 err_spin_lo:
705         igt_spinner_fini(&spin_lo);
706 err_spin_hi:
707         igt_spinner_fini(&spin_hi);
708 err_unlock:
709         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
710         mutex_unlock(&i915->drm.struct_mutex);
711         return err;
712
713 err_wedged:
714         igt_spinner_end(&spin_hi);
715         igt_spinner_end(&spin_lo);
716         intel_gt_set_wedged(&i915->gt);
717         err = -EIO;
718         goto err_ctx_lo;
719 }
720
721 struct preempt_client {
722         struct igt_spinner spin;
723         struct i915_gem_context *ctx;
724 };
725
726 static int preempt_client_init(struct drm_i915_private *i915,
727                                struct preempt_client *c)
728 {
729         c->ctx = kernel_context(i915);
730         if (!c->ctx)
731                 return -ENOMEM;
732
733         if (igt_spinner_init(&c->spin, &i915->gt))
734                 goto err_ctx;
735
736         return 0;
737
738 err_ctx:
739         kernel_context_close(c->ctx);
740         return -ENOMEM;
741 }
742
743 static void preempt_client_fini(struct preempt_client *c)
744 {
745         igt_spinner_fini(&c->spin);
746         kernel_context_close(c->ctx);
747 }
748
749 static int live_nopreempt(void *arg)
750 {
751         struct drm_i915_private *i915 = arg;
752         struct intel_engine_cs *engine;
753         struct preempt_client a, b;
754         enum intel_engine_id id;
755         intel_wakeref_t wakeref;
756         int err = -ENOMEM;
757
758         /*
759          * Verify that we can disable preemption for an individual request
760          * that may be being observed and not want to be interrupted.
761          */
762
763         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
764                 return 0;
765
766         mutex_lock(&i915->drm.struct_mutex);
767         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
768
769         if (preempt_client_init(i915, &a))
770                 goto err_unlock;
771         if (preempt_client_init(i915, &b))
772                 goto err_client_a;
773         b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
774
775         for_each_engine(engine, i915, id) {
776                 struct i915_request *rq_a, *rq_b;
777
778                 if (!intel_engine_has_preemption(engine))
779                         continue;
780
781                 engine->execlists.preempt_hang.count = 0;
782
783                 rq_a = spinner_create_request(&a.spin,
784                                               a.ctx, engine,
785                                               MI_ARB_CHECK);
786                 if (IS_ERR(rq_a)) {
787                         err = PTR_ERR(rq_a);
788                         goto err_client_b;
789                 }
790
791                 /* Low priority client, but unpreemptable! */
792                 rq_a->flags |= I915_REQUEST_NOPREEMPT;
793
794                 i915_request_add(rq_a);
795                 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
796                         pr_err("First client failed to start\n");
797                         goto err_wedged;
798                 }
799
800                 rq_b = spinner_create_request(&b.spin,
801                                               b.ctx, engine,
802                                               MI_ARB_CHECK);
803                 if (IS_ERR(rq_b)) {
804                         err = PTR_ERR(rq_b);
805                         goto err_client_b;
806                 }
807
808                 i915_request_add(rq_b);
809
810                 /* B is much more important than A! (But A is unpreemptable.) */
811                 GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
812
813                 /* Wait long enough for preemption and timeslicing */
814                 if (igt_wait_for_spinner(&b.spin, rq_b)) {
815                         pr_err("Second client started too early!\n");
816                         goto err_wedged;
817                 }
818
819                 igt_spinner_end(&a.spin);
820
821                 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
822                         pr_err("Second client failed to start\n");
823                         goto err_wedged;
824                 }
825
826                 igt_spinner_end(&b.spin);
827
828                 if (engine->execlists.preempt_hang.count) {
829                         pr_err("Preemption recorded x%d; should have been suppressed!\n",
830                                engine->execlists.preempt_hang.count);
831                         err = -EINVAL;
832                         goto err_wedged;
833                 }
834
835                 if (igt_flush_test(i915, I915_WAIT_LOCKED))
836                         goto err_wedged;
837         }
838
839         err = 0;
840 err_client_b:
841         preempt_client_fini(&b);
842 err_client_a:
843         preempt_client_fini(&a);
844 err_unlock:
845         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
846         mutex_unlock(&i915->drm.struct_mutex);
847         return err;
848
849 err_wedged:
850         igt_spinner_end(&b.spin);
851         igt_spinner_end(&a.spin);
852         intel_gt_set_wedged(&i915->gt);
853         err = -EIO;
854         goto err_client_b;
855 }
856
857 static int live_suppress_self_preempt(void *arg)
858 {
859         struct drm_i915_private *i915 = arg;
860         struct intel_engine_cs *engine;
861         struct i915_sched_attr attr = {
862                 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
863         };
864         struct preempt_client a, b;
865         enum intel_engine_id id;
866         intel_wakeref_t wakeref;
867         int err = -ENOMEM;
868
869         /*
870          * Verify that if a preemption request does not cause a change in
871          * the current execution order, the preempt-to-idle injection is
872          * skipped and that we do not accidentally apply it after the CS
873          * completion event.
874          */
875
876         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
877                 return 0;
878
879         if (USES_GUC_SUBMISSION(i915))
880                 return 0; /* presume black blox */
881
882         if (intel_vgpu_active(i915))
883                 return 0; /* GVT forces single port & request submission */
884
885         mutex_lock(&i915->drm.struct_mutex);
886         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
887
888         if (preempt_client_init(i915, &a))
889                 goto err_unlock;
890         if (preempt_client_init(i915, &b))
891                 goto err_client_a;
892
893         for_each_engine(engine, i915, id) {
894                 struct i915_request *rq_a, *rq_b;
895                 int depth;
896
897                 if (!intel_engine_has_preemption(engine))
898                         continue;
899
900                 engine->execlists.preempt_hang.count = 0;
901
902                 rq_a = spinner_create_request(&a.spin,
903                                               a.ctx, engine,
904                                               MI_NOOP);
905                 if (IS_ERR(rq_a)) {
906                         err = PTR_ERR(rq_a);
907                         goto err_client_b;
908                 }
909
910                 i915_request_add(rq_a);
911                 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
912                         pr_err("First client failed to start\n");
913                         goto err_wedged;
914                 }
915
916                 for (depth = 0; depth < 8; depth++) {
917                         rq_b = spinner_create_request(&b.spin,
918                                                       b.ctx, engine,
919                                                       MI_NOOP);
920                         if (IS_ERR(rq_b)) {
921                                 err = PTR_ERR(rq_b);
922                                 goto err_client_b;
923                         }
924                         i915_request_add(rq_b);
925
926                         GEM_BUG_ON(i915_request_completed(rq_a));
927                         engine->schedule(rq_a, &attr);
928                         igt_spinner_end(&a.spin);
929
930                         if (!igt_wait_for_spinner(&b.spin, rq_b)) {
931                                 pr_err("Second client failed to start\n");
932                                 goto err_wedged;
933                         }
934
935                         swap(a, b);
936                         rq_a = rq_b;
937                 }
938                 igt_spinner_end(&a.spin);
939
940                 if (engine->execlists.preempt_hang.count) {
941                         pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
942                                engine->execlists.preempt_hang.count,
943                                depth);
944                         err = -EINVAL;
945                         goto err_client_b;
946                 }
947
948                 if (igt_flush_test(i915, I915_WAIT_LOCKED))
949                         goto err_wedged;
950         }
951
952         err = 0;
953 err_client_b:
954         preempt_client_fini(&b);
955 err_client_a:
956         preempt_client_fini(&a);
957 err_unlock:
958         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
959         mutex_unlock(&i915->drm.struct_mutex);
960         return err;
961
962 err_wedged:
963         igt_spinner_end(&b.spin);
964         igt_spinner_end(&a.spin);
965         intel_gt_set_wedged(&i915->gt);
966         err = -EIO;
967         goto err_client_b;
968 }
969
970 static int __i915_sw_fence_call
971 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
972 {
973         return NOTIFY_DONE;
974 }
975
976 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
977 {
978         struct i915_request *rq;
979
980         rq = kzalloc(sizeof(*rq), GFP_KERNEL);
981         if (!rq)
982                 return NULL;
983
984         INIT_LIST_HEAD(&rq->active_list);
985         rq->engine = engine;
986
987         i915_sched_node_init(&rq->sched);
988
989         /* mark this request as permanently incomplete */
990         rq->fence.seqno = 1;
991         BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
992         rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
993         GEM_BUG_ON(i915_request_completed(rq));
994
995         i915_sw_fence_init(&rq->submit, dummy_notify);
996         set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
997
998         spin_lock_init(&rq->lock);
999         rq->fence.lock = &rq->lock;
1000         INIT_LIST_HEAD(&rq->fence.cb_list);
1001
1002         return rq;
1003 }
1004
1005 static void dummy_request_free(struct i915_request *dummy)
1006 {
1007         /* We have to fake the CS interrupt to kick the next request */
1008         i915_sw_fence_commit(&dummy->submit);
1009
1010         i915_request_mark_complete(dummy);
1011         dma_fence_signal(&dummy->fence);
1012
1013         i915_sched_node_fini(&dummy->sched);
1014         i915_sw_fence_fini(&dummy->submit);
1015
1016         dma_fence_free(&dummy->fence);
1017 }
1018
1019 static int live_suppress_wait_preempt(void *arg)
1020 {
1021         struct drm_i915_private *i915 = arg;
1022         struct preempt_client client[4];
1023         struct intel_engine_cs *engine;
1024         enum intel_engine_id id;
1025         intel_wakeref_t wakeref;
1026         int err = -ENOMEM;
1027         int i;
1028
1029         /*
1030          * Waiters are given a little priority nudge, but not enough
1031          * to actually cause any preemption. Double check that we do
1032          * not needlessly generate preempt-to-idle cycles.
1033          */
1034
1035         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
1036                 return 0;
1037
1038         mutex_lock(&i915->drm.struct_mutex);
1039         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1040
1041         if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
1042                 goto err_unlock;
1043         if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
1044                 goto err_client_0;
1045         if (preempt_client_init(i915, &client[2])) /* head of queue */
1046                 goto err_client_1;
1047         if (preempt_client_init(i915, &client[3])) /* bystander */
1048                 goto err_client_2;
1049
1050         for_each_engine(engine, i915, id) {
1051                 int depth;
1052
1053                 if (!intel_engine_has_preemption(engine))
1054                         continue;
1055
1056                 if (!engine->emit_init_breadcrumb)
1057                         continue;
1058
1059                 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
1060                         struct i915_request *rq[ARRAY_SIZE(client)];
1061                         struct i915_request *dummy;
1062
1063                         engine->execlists.preempt_hang.count = 0;
1064
1065                         dummy = dummy_request(engine);
1066                         if (!dummy)
1067                                 goto err_client_3;
1068
1069                         for (i = 0; i < ARRAY_SIZE(client); i++) {
1070                                 rq[i] = spinner_create_request(&client[i].spin,
1071                                                                client[i].ctx, engine,
1072                                                                MI_NOOP);
1073                                 if (IS_ERR(rq[i])) {
1074                                         err = PTR_ERR(rq[i]);
1075                                         goto err_wedged;
1076                                 }
1077
1078                                 /* Disable NEWCLIENT promotion */
1079                                 __i915_active_request_set(&rq[i]->timeline->last_request,
1080                                                           dummy);
1081                                 i915_request_add(rq[i]);
1082                         }
1083
1084                         dummy_request_free(dummy);
1085
1086                         GEM_BUG_ON(i915_request_completed(rq[0]));
1087                         if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
1088                                 pr_err("%s: First client failed to start\n",
1089                                        engine->name);
1090                                 goto err_wedged;
1091                         }
1092                         GEM_BUG_ON(!i915_request_started(rq[0]));
1093
1094                         if (i915_request_wait(rq[depth],
1095                                               I915_WAIT_PRIORITY,
1096                                               1) != -ETIME) {
1097                                 pr_err("%s: Waiter depth:%d completed!\n",
1098                                        engine->name, depth);
1099                                 goto err_wedged;
1100                         }
1101
1102                         for (i = 0; i < ARRAY_SIZE(client); i++)
1103                                 igt_spinner_end(&client[i].spin);
1104
1105                         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1106                                 goto err_wedged;
1107
1108                         if (engine->execlists.preempt_hang.count) {
1109                                 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
1110                                        engine->name,
1111                                        engine->execlists.preempt_hang.count,
1112                                        depth);
1113                                 err = -EINVAL;
1114                                 goto err_client_3;
1115                         }
1116                 }
1117         }
1118
1119         err = 0;
1120 err_client_3:
1121         preempt_client_fini(&client[3]);
1122 err_client_2:
1123         preempt_client_fini(&client[2]);
1124 err_client_1:
1125         preempt_client_fini(&client[1]);
1126 err_client_0:
1127         preempt_client_fini(&client[0]);
1128 err_unlock:
1129         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1130         mutex_unlock(&i915->drm.struct_mutex);
1131         return err;
1132
1133 err_wedged:
1134         for (i = 0; i < ARRAY_SIZE(client); i++)
1135                 igt_spinner_end(&client[i].spin);
1136         intel_gt_set_wedged(&i915->gt);
1137         err = -EIO;
1138         goto err_client_3;
1139 }
1140
1141 static int live_chain_preempt(void *arg)
1142 {
1143         struct drm_i915_private *i915 = arg;
1144         struct intel_engine_cs *engine;
1145         struct preempt_client hi, lo;
1146         enum intel_engine_id id;
1147         intel_wakeref_t wakeref;
1148         int err = -ENOMEM;
1149
1150         /*
1151          * Build a chain AB...BA between two contexts (A, B) and request
1152          * preemption of the last request. It should then complete before
1153          * the previously submitted spinner in B.
1154          */
1155
1156         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
1157                 return 0;
1158
1159         mutex_lock(&i915->drm.struct_mutex);
1160         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1161
1162         if (preempt_client_init(i915, &hi))
1163                 goto err_unlock;
1164
1165         if (preempt_client_init(i915, &lo))
1166                 goto err_client_hi;
1167
1168         for_each_engine(engine, i915, id) {
1169                 struct i915_sched_attr attr = {
1170                         .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
1171                 };
1172                 struct igt_live_test t;
1173                 struct i915_request *rq;
1174                 int ring_size, count, i;
1175
1176                 if (!intel_engine_has_preemption(engine))
1177                         continue;
1178
1179                 rq = spinner_create_request(&lo.spin,
1180                                             lo.ctx, engine,
1181                                             MI_ARB_CHECK);
1182                 if (IS_ERR(rq))
1183                         goto err_wedged;
1184                 i915_request_add(rq);
1185
1186                 ring_size = rq->wa_tail - rq->head;
1187                 if (ring_size < 0)
1188                         ring_size += rq->ring->size;
1189                 ring_size = rq->ring->size / ring_size;
1190                 pr_debug("%s(%s): Using maximum of %d requests\n",
1191                          __func__, engine->name, ring_size);
1192
1193                 igt_spinner_end(&lo.spin);
1194                 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
1195                         pr_err("Timed out waiting to flush %s\n", engine->name);
1196                         goto err_wedged;
1197                 }
1198
1199                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
1200                         err = -EIO;
1201                         goto err_wedged;
1202                 }
1203
1204                 for_each_prime_number_from(count, 1, ring_size) {
1205                         rq = spinner_create_request(&hi.spin,
1206                                                     hi.ctx, engine,
1207                                                     MI_ARB_CHECK);
1208                         if (IS_ERR(rq))
1209                                 goto err_wedged;
1210                         i915_request_add(rq);
1211                         if (!igt_wait_for_spinner(&hi.spin, rq))
1212                                 goto err_wedged;
1213
1214                         rq = spinner_create_request(&lo.spin,
1215                                                     lo.ctx, engine,
1216                                                     MI_ARB_CHECK);
1217                         if (IS_ERR(rq))
1218                                 goto err_wedged;
1219                         i915_request_add(rq);
1220
1221                         for (i = 0; i < count; i++) {
1222                                 rq = igt_request_alloc(lo.ctx, engine);
1223                                 if (IS_ERR(rq))
1224                                         goto err_wedged;
1225                                 i915_request_add(rq);
1226                         }
1227
1228                         rq = igt_request_alloc(hi.ctx, engine);
1229                         if (IS_ERR(rq))
1230                                 goto err_wedged;
1231                         i915_request_add(rq);
1232                         engine->schedule(rq, &attr);
1233
1234                         igt_spinner_end(&hi.spin);
1235                         if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1236                                 struct drm_printer p =
1237                                         drm_info_printer(i915->drm.dev);
1238
1239                                 pr_err("Failed to preempt over chain of %d\n",
1240                                        count);
1241                                 intel_engine_dump(engine, &p,
1242                                                   "%s\n", engine->name);
1243                                 goto err_wedged;
1244                         }
1245                         igt_spinner_end(&lo.spin);
1246
1247                         rq = igt_request_alloc(lo.ctx, engine);
1248                         if (IS_ERR(rq))
1249                                 goto err_wedged;
1250                         i915_request_add(rq);
1251                         if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1252                                 struct drm_printer p =
1253                                         drm_info_printer(i915->drm.dev);
1254
1255                                 pr_err("Failed to flush low priority chain of %d requests\n",
1256                                        count);
1257                                 intel_engine_dump(engine, &p,
1258                                                   "%s\n", engine->name);
1259                                 goto err_wedged;
1260                         }
1261                 }
1262
1263                 if (igt_live_test_end(&t)) {
1264                         err = -EIO;
1265                         goto err_wedged;
1266                 }
1267         }
1268
1269         err = 0;
1270 err_client_lo:
1271         preempt_client_fini(&lo);
1272 err_client_hi:
1273         preempt_client_fini(&hi);
1274 err_unlock:
1275         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1276         mutex_unlock(&i915->drm.struct_mutex);
1277         return err;
1278
1279 err_wedged:
1280         igt_spinner_end(&hi.spin);
1281         igt_spinner_end(&lo.spin);
1282         intel_gt_set_wedged(&i915->gt);
1283         err = -EIO;
1284         goto err_client_lo;
1285 }
1286
1287 static int live_preempt_hang(void *arg)
1288 {
1289         struct drm_i915_private *i915 = arg;
1290         struct i915_gem_context *ctx_hi, *ctx_lo;
1291         struct igt_spinner spin_hi, spin_lo;
1292         struct intel_engine_cs *engine;
1293         enum intel_engine_id id;
1294         intel_wakeref_t wakeref;
1295         int err = -ENOMEM;
1296
1297         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
1298                 return 0;
1299
1300         if (!intel_has_reset_engine(i915))
1301                 return 0;
1302
1303         mutex_lock(&i915->drm.struct_mutex);
1304         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1305
1306         if (igt_spinner_init(&spin_hi, &i915->gt))
1307                 goto err_unlock;
1308
1309         if (igt_spinner_init(&spin_lo, &i915->gt))
1310                 goto err_spin_hi;
1311
1312         ctx_hi = kernel_context(i915);
1313         if (!ctx_hi)
1314                 goto err_spin_lo;
1315         ctx_hi->sched.priority =
1316                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
1317
1318         ctx_lo = kernel_context(i915);
1319         if (!ctx_lo)
1320                 goto err_ctx_hi;
1321         ctx_lo->sched.priority =
1322                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
1323
1324         for_each_engine(engine, i915, id) {
1325                 struct i915_request *rq;
1326
1327                 if (!intel_engine_has_preemption(engine))
1328                         continue;
1329
1330                 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
1331                                             MI_ARB_CHECK);
1332                 if (IS_ERR(rq)) {
1333                         err = PTR_ERR(rq);
1334                         goto err_ctx_lo;
1335                 }
1336
1337                 i915_request_add(rq);
1338                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
1339                         GEM_TRACE("lo spinner failed to start\n");
1340                         GEM_TRACE_DUMP();
1341                         intel_gt_set_wedged(&i915->gt);
1342                         err = -EIO;
1343                         goto err_ctx_lo;
1344                 }
1345
1346                 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
1347                                             MI_ARB_CHECK);
1348                 if (IS_ERR(rq)) {
1349                         igt_spinner_end(&spin_lo);
1350                         err = PTR_ERR(rq);
1351                         goto err_ctx_lo;
1352                 }
1353
1354                 init_completion(&engine->execlists.preempt_hang.completion);
1355                 engine->execlists.preempt_hang.inject_hang = true;
1356
1357                 i915_request_add(rq);
1358
1359                 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1360                                                  HZ / 10)) {
1361                         pr_err("Preemption did not occur within timeout!");
1362                         GEM_TRACE_DUMP();
1363                         intel_gt_set_wedged(&i915->gt);
1364                         err = -EIO;
1365                         goto err_ctx_lo;
1366                 }
1367
1368                 set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
1369                 intel_engine_reset(engine, NULL);
1370                 clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
1371
1372                 engine->execlists.preempt_hang.inject_hang = false;
1373
1374                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1375                         GEM_TRACE("hi spinner failed to start\n");
1376                         GEM_TRACE_DUMP();
1377                         intel_gt_set_wedged(&i915->gt);
1378                         err = -EIO;
1379                         goto err_ctx_lo;
1380                 }
1381
1382                 igt_spinner_end(&spin_hi);
1383                 igt_spinner_end(&spin_lo);
1384                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1385                         err = -EIO;
1386                         goto err_ctx_lo;
1387                 }
1388         }
1389
1390         err = 0;
1391 err_ctx_lo:
1392         kernel_context_close(ctx_lo);
1393 err_ctx_hi:
1394         kernel_context_close(ctx_hi);
1395 err_spin_lo:
1396         igt_spinner_fini(&spin_lo);
1397 err_spin_hi:
1398         igt_spinner_fini(&spin_hi);
1399 err_unlock:
1400         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1401         mutex_unlock(&i915->drm.struct_mutex);
1402         return err;
1403 }
1404
1405 static int random_range(struct rnd_state *rnd, int min, int max)
1406 {
1407         return i915_prandom_u32_max_state(max - min, rnd) + min;
1408 }
1409
1410 static int random_priority(struct rnd_state *rnd)
1411 {
1412         return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1413 }
1414
1415 struct preempt_smoke {
1416         struct drm_i915_private *i915;
1417         struct i915_gem_context **contexts;
1418         struct intel_engine_cs *engine;
1419         struct drm_i915_gem_object *batch;
1420         unsigned int ncontext;
1421         struct rnd_state prng;
1422         unsigned long count;
1423 };
1424
1425 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1426 {
1427         return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1428                                                           &smoke->prng)];
1429 }
1430
1431 static int smoke_submit(struct preempt_smoke *smoke,
1432                         struct i915_gem_context *ctx, int prio,
1433                         struct drm_i915_gem_object *batch)
1434 {
1435         struct i915_request *rq;
1436         struct i915_vma *vma = NULL;
1437         int err = 0;
1438
1439         if (batch) {
1440                 vma = i915_vma_instance(batch, ctx->vm, NULL);
1441                 if (IS_ERR(vma))
1442                         return PTR_ERR(vma);
1443
1444                 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1445                 if (err)
1446                         return err;
1447         }
1448
1449         ctx->sched.priority = prio;
1450
1451         rq = igt_request_alloc(ctx, smoke->engine);
1452         if (IS_ERR(rq)) {
1453                 err = PTR_ERR(rq);
1454                 goto unpin;
1455         }
1456
1457         if (vma) {
1458                 i915_vma_lock(vma);
1459                 err = rq->engine->emit_bb_start(rq,
1460                                                 vma->node.start,
1461                                                 PAGE_SIZE, 0);
1462                 if (!err)
1463                         err = i915_vma_move_to_active(vma, rq, 0);
1464                 i915_vma_unlock(vma);
1465         }
1466
1467         i915_request_add(rq);
1468
1469 unpin:
1470         if (vma)
1471                 i915_vma_unpin(vma);
1472
1473         return err;
1474 }
1475
1476 static int smoke_crescendo_thread(void *arg)
1477 {
1478         struct preempt_smoke *smoke = arg;
1479         IGT_TIMEOUT(end_time);
1480         unsigned long count;
1481
1482         count = 0;
1483         do {
1484                 struct i915_gem_context *ctx = smoke_context(smoke);
1485                 int err;
1486
1487                 mutex_lock(&smoke->i915->drm.struct_mutex);
1488                 err = smoke_submit(smoke,
1489                                    ctx, count % I915_PRIORITY_MAX,
1490                                    smoke->batch);
1491                 mutex_unlock(&smoke->i915->drm.struct_mutex);
1492                 if (err)
1493                         return err;
1494
1495                 count++;
1496         } while (!__igt_timeout(end_time, NULL));
1497
1498         smoke->count = count;
1499         return 0;
1500 }
1501
1502 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1503 #define BATCH BIT(0)
1504 {
1505         struct task_struct *tsk[I915_NUM_ENGINES] = {};
1506         struct preempt_smoke arg[I915_NUM_ENGINES];
1507         struct intel_engine_cs *engine;
1508         enum intel_engine_id id;
1509         unsigned long count;
1510         int err = 0;
1511
1512         mutex_unlock(&smoke->i915->drm.struct_mutex);
1513
1514         for_each_engine(engine, smoke->i915, id) {
1515                 arg[id] = *smoke;
1516                 arg[id].engine = engine;
1517                 if (!(flags & BATCH))
1518                         arg[id].batch = NULL;
1519                 arg[id].count = 0;
1520
1521                 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1522                                       "igt/smoke:%d", id);
1523                 if (IS_ERR(tsk[id])) {
1524                         err = PTR_ERR(tsk[id]);
1525                         break;
1526                 }
1527                 get_task_struct(tsk[id]);
1528         }
1529
1530         count = 0;
1531         for_each_engine(engine, smoke->i915, id) {
1532                 int status;
1533
1534                 if (IS_ERR_OR_NULL(tsk[id]))
1535                         continue;
1536
1537                 status = kthread_stop(tsk[id]);
1538                 if (status && !err)
1539                         err = status;
1540
1541                 count += arg[id].count;
1542
1543                 put_task_struct(tsk[id]);
1544         }
1545
1546         mutex_lock(&smoke->i915->drm.struct_mutex);
1547
1548         pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1549                 count, flags,
1550                 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1551         return 0;
1552 }
1553
1554 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1555 {
1556         enum intel_engine_id id;
1557         IGT_TIMEOUT(end_time);
1558         unsigned long count;
1559
1560         count = 0;
1561         do {
1562                 for_each_engine(smoke->engine, smoke->i915, id) {
1563                         struct i915_gem_context *ctx = smoke_context(smoke);
1564                         int err;
1565
1566                         err = smoke_submit(smoke,
1567                                            ctx, random_priority(&smoke->prng),
1568                                            flags & BATCH ? smoke->batch : NULL);
1569                         if (err)
1570                                 return err;
1571
1572                         count++;
1573                 }
1574         } while (!__igt_timeout(end_time, NULL));
1575
1576         pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1577                 count, flags,
1578                 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1579         return 0;
1580 }
1581
1582 static int live_preempt_smoke(void *arg)
1583 {
1584         struct preempt_smoke smoke = {
1585                 .i915 = arg,
1586                 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1587                 .ncontext = 1024,
1588         };
1589         const unsigned int phase[] = { 0, BATCH };
1590         intel_wakeref_t wakeref;
1591         struct igt_live_test t;
1592         int err = -ENOMEM;
1593         u32 *cs;
1594         int n;
1595
1596         if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1597                 return 0;
1598
1599         smoke.contexts = kmalloc_array(smoke.ncontext,
1600                                        sizeof(*smoke.contexts),
1601                                        GFP_KERNEL);
1602         if (!smoke.contexts)
1603                 return -ENOMEM;
1604
1605         mutex_lock(&smoke.i915->drm.struct_mutex);
1606         wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
1607
1608         smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1609         if (IS_ERR(smoke.batch)) {
1610                 err = PTR_ERR(smoke.batch);
1611                 goto err_unlock;
1612         }
1613
1614         cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1615         if (IS_ERR(cs)) {
1616                 err = PTR_ERR(cs);
1617                 goto err_batch;
1618         }
1619         for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1620                 cs[n] = MI_ARB_CHECK;
1621         cs[n] = MI_BATCH_BUFFER_END;
1622         i915_gem_object_flush_map(smoke.batch);
1623         i915_gem_object_unpin_map(smoke.batch);
1624
1625         if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1626                 err = -EIO;
1627                 goto err_batch;
1628         }
1629
1630         for (n = 0; n < smoke.ncontext; n++) {
1631                 smoke.contexts[n] = kernel_context(smoke.i915);
1632                 if (!smoke.contexts[n])
1633                         goto err_ctx;
1634         }
1635
1636         for (n = 0; n < ARRAY_SIZE(phase); n++) {
1637                 err = smoke_crescendo(&smoke, phase[n]);
1638                 if (err)
1639                         goto err_ctx;
1640
1641                 err = smoke_random(&smoke, phase[n]);
1642                 if (err)
1643                         goto err_ctx;
1644         }
1645
1646 err_ctx:
1647         if (igt_live_test_end(&t))
1648                 err = -EIO;
1649
1650         for (n = 0; n < smoke.ncontext; n++) {
1651                 if (!smoke.contexts[n])
1652                         break;
1653                 kernel_context_close(smoke.contexts[n]);
1654         }
1655
1656 err_batch:
1657         i915_gem_object_put(smoke.batch);
1658 err_unlock:
1659         intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
1660         mutex_unlock(&smoke.i915->drm.struct_mutex);
1661         kfree(smoke.contexts);
1662
1663         return err;
1664 }
1665
1666 static int nop_virtual_engine(struct drm_i915_private *i915,
1667                               struct intel_engine_cs **siblings,
1668                               unsigned int nsibling,
1669                               unsigned int nctx,
1670                               unsigned int flags)
1671 #define CHAIN BIT(0)
1672 {
1673         IGT_TIMEOUT(end_time);
1674         struct i915_request *request[16];
1675         struct i915_gem_context *ctx[16];
1676         struct intel_context *ve[16];
1677         unsigned long n, prime, nc;
1678         struct igt_live_test t;
1679         ktime_t times[2] = {};
1680         int err;
1681
1682         GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
1683
1684         for (n = 0; n < nctx; n++) {
1685                 ctx[n] = kernel_context(i915);
1686                 if (!ctx[n]) {
1687                         err = -ENOMEM;
1688                         nctx = n;
1689                         goto out;
1690                 }
1691
1692                 ve[n] = intel_execlists_create_virtual(ctx[n],
1693                                                        siblings, nsibling);
1694                 if (IS_ERR(ve[n])) {
1695                         kernel_context_close(ctx[n]);
1696                         err = PTR_ERR(ve[n]);
1697                         nctx = n;
1698                         goto out;
1699                 }
1700
1701                 err = intel_context_pin(ve[n]);
1702                 if (err) {
1703                         intel_context_put(ve[n]);
1704                         kernel_context_close(ctx[n]);
1705                         nctx = n;
1706                         goto out;
1707                 }
1708         }
1709
1710         err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
1711         if (err)
1712                 goto out;
1713
1714         for_each_prime_number_from(prime, 1, 8192) {
1715                 times[1] = ktime_get_raw();
1716
1717                 if (flags & CHAIN) {
1718                         for (nc = 0; nc < nctx; nc++) {
1719                                 for (n = 0; n < prime; n++) {
1720                                         request[nc] =
1721                                                 i915_request_create(ve[nc]);
1722                                         if (IS_ERR(request[nc])) {
1723                                                 err = PTR_ERR(request[nc]);
1724                                                 goto out;
1725                                         }
1726
1727                                         i915_request_add(request[nc]);
1728                                 }
1729                         }
1730                 } else {
1731                         for (n = 0; n < prime; n++) {
1732                                 for (nc = 0; nc < nctx; nc++) {
1733                                         request[nc] =
1734                                                 i915_request_create(ve[nc]);
1735                                         if (IS_ERR(request[nc])) {
1736                                                 err = PTR_ERR(request[nc]);
1737                                                 goto out;
1738                                         }
1739
1740                                         i915_request_add(request[nc]);
1741                                 }
1742                         }
1743                 }
1744
1745                 for (nc = 0; nc < nctx; nc++) {
1746                         if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
1747                                 pr_err("%s(%s): wait for %llx:%lld timed out\n",
1748                                        __func__, ve[0]->engine->name,
1749                                        request[nc]->fence.context,
1750                                        request[nc]->fence.seqno);
1751
1752                                 GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1753                                           __func__, ve[0]->engine->name,
1754                                           request[nc]->fence.context,
1755                                           request[nc]->fence.seqno);
1756                                 GEM_TRACE_DUMP();
1757                                 intel_gt_set_wedged(&i915->gt);
1758                                 break;
1759                         }
1760                 }
1761
1762                 times[1] = ktime_sub(ktime_get_raw(), times[1]);
1763                 if (prime == 1)
1764                         times[0] = times[1];
1765
1766                 if (__igt_timeout(end_time, NULL))
1767                         break;
1768         }
1769
1770         err = igt_live_test_end(&t);
1771         if (err)
1772                 goto out;
1773
1774         pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
1775                 nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
1776                 prime, div64_u64(ktime_to_ns(times[1]), prime));
1777
1778 out:
1779         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1780                 err = -EIO;
1781
1782         for (nc = 0; nc < nctx; nc++) {
1783                 intel_context_unpin(ve[nc]);
1784                 intel_context_put(ve[nc]);
1785                 kernel_context_close(ctx[nc]);
1786         }
1787         return err;
1788 }
1789
1790 static int live_virtual_engine(void *arg)
1791 {
1792         struct drm_i915_private *i915 = arg;
1793         struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1794         struct intel_engine_cs *engine;
1795         struct intel_gt *gt = &i915->gt;
1796         enum intel_engine_id id;
1797         unsigned int class, inst;
1798         int err = -ENODEV;
1799
1800         if (USES_GUC_SUBMISSION(i915))
1801                 return 0;
1802
1803         mutex_lock(&i915->drm.struct_mutex);
1804
1805         for_each_engine(engine, i915, id) {
1806                 err = nop_virtual_engine(i915, &engine, 1, 1, 0);
1807                 if (err) {
1808                         pr_err("Failed to wrap engine %s: err=%d\n",
1809                                engine->name, err);
1810                         goto out_unlock;
1811                 }
1812         }
1813
1814         for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1815                 int nsibling, n;
1816
1817                 nsibling = 0;
1818                 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1819                         if (!gt->engine_class[class][inst])
1820                                 continue;
1821
1822                         siblings[nsibling++] = gt->engine_class[class][inst];
1823                 }
1824                 if (nsibling < 2)
1825                         continue;
1826
1827                 for (n = 1; n <= nsibling + 1; n++) {
1828                         err = nop_virtual_engine(i915, siblings, nsibling,
1829                                                  n, 0);
1830                         if (err)
1831                                 goto out_unlock;
1832                 }
1833
1834                 err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
1835                 if (err)
1836                         goto out_unlock;
1837         }
1838
1839 out_unlock:
1840         mutex_unlock(&i915->drm.struct_mutex);
1841         return err;
1842 }
1843
1844 static int mask_virtual_engine(struct drm_i915_private *i915,
1845                                struct intel_engine_cs **siblings,
1846                                unsigned int nsibling)
1847 {
1848         struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
1849         struct i915_gem_context *ctx;
1850         struct intel_context *ve;
1851         struct igt_live_test t;
1852         unsigned int n;
1853         int err;
1854
1855         /*
1856          * Check that by setting the execution mask on a request, we can
1857          * restrict it to our desired engine within the virtual engine.
1858          */
1859
1860         ctx = kernel_context(i915);
1861         if (!ctx)
1862                 return -ENOMEM;
1863
1864         ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
1865         if (IS_ERR(ve)) {
1866                 err = PTR_ERR(ve);
1867                 goto out_close;
1868         }
1869
1870         err = intel_context_pin(ve);
1871         if (err)
1872                 goto out_put;
1873
1874         err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
1875         if (err)
1876                 goto out_unpin;
1877
1878         for (n = 0; n < nsibling; n++) {
1879                 request[n] = i915_request_create(ve);
1880                 if (IS_ERR(request[n])) {
1881                         err = PTR_ERR(request[n]);
1882                         nsibling = n;
1883                         goto out;
1884                 }
1885
1886                 /* Reverse order as it's more likely to be unnatural */
1887                 request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
1888
1889                 i915_request_get(request[n]);
1890                 i915_request_add(request[n]);
1891         }
1892
1893         for (n = 0; n < nsibling; n++) {
1894                 if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
1895                         pr_err("%s(%s): wait for %llx:%lld timed out\n",
1896                                __func__, ve->engine->name,
1897                                request[n]->fence.context,
1898                                request[n]->fence.seqno);
1899
1900                         GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1901                                   __func__, ve->engine->name,
1902                                   request[n]->fence.context,
1903                                   request[n]->fence.seqno);
1904                         GEM_TRACE_DUMP();
1905                         intel_gt_set_wedged(&i915->gt);
1906                         err = -EIO;
1907                         goto out;
1908                 }
1909
1910                 if (request[n]->engine != siblings[nsibling - n - 1]) {
1911                         pr_err("Executed on wrong sibling '%s', expected '%s'\n",
1912                                request[n]->engine->name,
1913                                siblings[nsibling - n - 1]->name);
1914                         err = -EINVAL;
1915                         goto out;
1916                 }
1917         }
1918
1919         err = igt_live_test_end(&t);
1920         if (err)
1921                 goto out;
1922
1923 out:
1924         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1925                 err = -EIO;
1926
1927         for (n = 0; n < nsibling; n++)
1928                 i915_request_put(request[n]);
1929
1930 out_unpin:
1931         intel_context_unpin(ve);
1932 out_put:
1933         intel_context_put(ve);
1934 out_close:
1935         kernel_context_close(ctx);
1936         return err;
1937 }
1938
1939 static int live_virtual_mask(void *arg)
1940 {
1941         struct drm_i915_private *i915 = arg;
1942         struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1943         struct intel_gt *gt = &i915->gt;
1944         unsigned int class, inst;
1945         int err = 0;
1946
1947         if (USES_GUC_SUBMISSION(i915))
1948                 return 0;
1949
1950         mutex_lock(&i915->drm.struct_mutex);
1951
1952         for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1953                 unsigned int nsibling;
1954
1955                 nsibling = 0;
1956                 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1957                         if (!gt->engine_class[class][inst])
1958                                 break;
1959
1960                         siblings[nsibling++] = gt->engine_class[class][inst];
1961                 }
1962                 if (nsibling < 2)
1963                         continue;
1964
1965                 err = mask_virtual_engine(i915, siblings, nsibling);
1966                 if (err)
1967                         goto out_unlock;
1968         }
1969
1970 out_unlock:
1971         mutex_unlock(&i915->drm.struct_mutex);
1972         return err;
1973 }
1974
1975 static int bond_virtual_engine(struct drm_i915_private *i915,
1976                                unsigned int class,
1977                                struct intel_engine_cs **siblings,
1978                                unsigned int nsibling,
1979                                unsigned int flags)
1980 #define BOND_SCHEDULE BIT(0)
1981 {
1982         struct intel_engine_cs *master;
1983         struct i915_gem_context *ctx;
1984         struct i915_request *rq[16];
1985         enum intel_engine_id id;
1986         unsigned long n;
1987         int err;
1988
1989         GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
1990
1991         ctx = kernel_context(i915);
1992         if (!ctx)
1993                 return -ENOMEM;
1994
1995         err = 0;
1996         rq[0] = ERR_PTR(-ENOMEM);
1997         for_each_engine(master, i915, id) {
1998                 struct i915_sw_fence fence = {};
1999
2000                 if (master->class == class)
2001                         continue;
2002
2003                 memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
2004
2005                 rq[0] = igt_request_alloc(ctx, master);
2006                 if (IS_ERR(rq[0])) {
2007                         err = PTR_ERR(rq[0]);
2008                         goto out;
2009                 }
2010                 i915_request_get(rq[0]);
2011
2012                 if (flags & BOND_SCHEDULE) {
2013                         onstack_fence_init(&fence);
2014                         err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
2015                                                                &fence,
2016                                                                GFP_KERNEL);
2017                 }
2018                 i915_request_add(rq[0]);
2019                 if (err < 0)
2020                         goto out;
2021
2022                 for (n = 0; n < nsibling; n++) {
2023                         struct intel_context *ve;
2024
2025                         ve = intel_execlists_create_virtual(ctx,
2026                                                             siblings,
2027                                                             nsibling);
2028                         if (IS_ERR(ve)) {
2029                                 err = PTR_ERR(ve);
2030                                 onstack_fence_fini(&fence);
2031                                 goto out;
2032                         }
2033
2034                         err = intel_virtual_engine_attach_bond(ve->engine,
2035                                                                master,
2036                                                                siblings[n]);
2037                         if (err) {
2038                                 intel_context_put(ve);
2039                                 onstack_fence_fini(&fence);
2040                                 goto out;
2041                         }
2042
2043                         err = intel_context_pin(ve);
2044                         intel_context_put(ve);
2045                         if (err) {
2046                                 onstack_fence_fini(&fence);
2047                                 goto out;
2048                         }
2049
2050                         rq[n + 1] = i915_request_create(ve);
2051                         intel_context_unpin(ve);
2052                         if (IS_ERR(rq[n + 1])) {
2053                                 err = PTR_ERR(rq[n + 1]);
2054                                 onstack_fence_fini(&fence);
2055                                 goto out;
2056                         }
2057                         i915_request_get(rq[n + 1]);
2058
2059                         err = i915_request_await_execution(rq[n + 1],
2060                                                            &rq[0]->fence,
2061                                                            ve->engine->bond_execute);
2062                         i915_request_add(rq[n + 1]);
2063                         if (err < 0) {
2064                                 onstack_fence_fini(&fence);
2065                                 goto out;
2066                         }
2067                 }
2068                 onstack_fence_fini(&fence);
2069
2070                 if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
2071                         pr_err("Master request did not execute (on %s)!\n",
2072                                rq[0]->engine->name);
2073                         err = -EIO;
2074                         goto out;
2075                 }
2076
2077                 for (n = 0; n < nsibling; n++) {
2078                         if (i915_request_wait(rq[n + 1], 0,
2079                                               MAX_SCHEDULE_TIMEOUT) < 0) {
2080                                 err = -EIO;
2081                                 goto out;
2082                         }
2083
2084                         if (rq[n + 1]->engine != siblings[n]) {
2085                                 pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
2086                                        siblings[n]->name,
2087                                        rq[n + 1]->engine->name,
2088                                        rq[0]->engine->name);
2089                                 err = -EINVAL;
2090                                 goto out;
2091                         }
2092                 }
2093
2094                 for (n = 0; !IS_ERR(rq[n]); n++)
2095                         i915_request_put(rq[n]);
2096                 rq[0] = ERR_PTR(-ENOMEM);
2097         }
2098
2099 out:
2100         for (n = 0; !IS_ERR(rq[n]); n++)
2101                 i915_request_put(rq[n]);
2102         if (igt_flush_test(i915, I915_WAIT_LOCKED))
2103                 err = -EIO;
2104
2105         kernel_context_close(ctx);
2106         return err;
2107 }
2108
2109 static int live_virtual_bond(void *arg)
2110 {
2111         static const struct phase {
2112                 const char *name;
2113                 unsigned int flags;
2114         } phases[] = {
2115                 { "", 0 },
2116                 { "schedule", BOND_SCHEDULE },
2117                 { },
2118         };
2119         struct drm_i915_private *i915 = arg;
2120         struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
2121         struct intel_gt *gt = &i915->gt;
2122         unsigned int class, inst;
2123         int err = 0;
2124
2125         if (USES_GUC_SUBMISSION(i915))
2126                 return 0;
2127
2128         mutex_lock(&i915->drm.struct_mutex);
2129
2130         for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
2131                 const struct phase *p;
2132                 int nsibling;
2133
2134                 nsibling = 0;
2135                 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
2136                         if (!gt->engine_class[class][inst])
2137                                 break;
2138
2139                         GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
2140                         siblings[nsibling++] = gt->engine_class[class][inst];
2141                 }
2142                 if (nsibling < 2)
2143                         continue;
2144
2145                 for (p = phases; p->name; p++) {
2146                         err = bond_virtual_engine(i915,
2147                                                   class, siblings, nsibling,
2148                                                   p->flags);
2149                         if (err) {
2150                                 pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
2151                                        __func__, p->name, class, nsibling, err);
2152                                 goto out_unlock;
2153                         }
2154                 }
2155         }
2156
2157 out_unlock:
2158         mutex_unlock(&i915->drm.struct_mutex);
2159         return err;
2160 }
2161
2162 int intel_execlists_live_selftests(struct drm_i915_private *i915)
2163 {
2164         static const struct i915_subtest tests[] = {
2165                 SUBTEST(live_sanitycheck),
2166                 SUBTEST(live_timeslice_preempt),
2167                 SUBTEST(live_busywait_preempt),
2168                 SUBTEST(live_preempt),
2169                 SUBTEST(live_late_preempt),
2170                 SUBTEST(live_nopreempt),
2171                 SUBTEST(live_suppress_self_preempt),
2172                 SUBTEST(live_suppress_wait_preempt),
2173                 SUBTEST(live_chain_preempt),
2174                 SUBTEST(live_preempt_hang),
2175                 SUBTEST(live_preempt_smoke),
2176                 SUBTEST(live_virtual_engine),
2177                 SUBTEST(live_virtual_mask),
2178                 SUBTEST(live_virtual_bond),
2179         };
2180
2181         if (!HAS_EXECLISTS(i915))
2182                 return 0;
2183
2184         if (intel_gt_is_wedged(&i915->gt))
2185                 return 0;
2186
2187         return i915_live_subtests(tests, i915);
2188 }