2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_reset.h"
12 #include "i915_selftest.h"
13 #include "selftests/i915_random.h"
14 #include "selftests/igt_flush_test.h"
15 #include "selftests/igt_live_test.h"
16 #include "selftests/igt_spinner.h"
17 #include "selftests/lib_sw_fence.h"
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
22 static int live_sanitycheck(void *arg)
24 struct drm_i915_private *i915 = arg;
25 struct i915_gem_engines_iter it;
26 struct i915_gem_context *ctx;
27 struct intel_context *ce;
28 struct igt_spinner spin;
29 intel_wakeref_t wakeref;
32 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
35 mutex_lock(&i915->drm.struct_mutex);
36 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
38 if (igt_spinner_init(&spin, &i915->gt))
41 ctx = kernel_context(i915);
45 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
46 struct i915_request *rq;
48 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
55 if (!igt_wait_for_spinner(&spin, rq)) {
56 GEM_TRACE("spinner failed to start\n");
58 intel_gt_set_wedged(&i915->gt);
63 igt_spinner_end(&spin);
64 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
72 i915_gem_context_unlock_engines(ctx);
73 kernel_context_close(ctx);
75 igt_spinner_fini(&spin);
77 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
78 mutex_unlock(&i915->drm.struct_mutex);
83 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
87 cs = intel_ring_begin(rq, 10);
91 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
93 *cs++ = MI_SEMAPHORE_WAIT |
94 MI_SEMAPHORE_GLOBAL_GTT |
96 MI_SEMAPHORE_SAD_NEQ_SDD;
98 *cs++ = i915_ggtt_offset(vma) + 4 * idx;
102 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
103 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
113 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
115 intel_ring_advance(rq, cs);
119 static struct i915_request *
120 semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
122 struct i915_gem_context *ctx;
123 struct i915_request *rq;
126 ctx = kernel_context(engine->i915);
128 return ERR_PTR(-ENOMEM);
130 rq = igt_request_alloc(ctx, engine);
134 err = emit_semaphore_chain(rq, vma, idx);
135 i915_request_add(rq);
140 kernel_context_close(ctx);
145 release_queue(struct intel_engine_cs *engine,
146 struct i915_vma *vma,
149 struct i915_sched_attr attr = {
150 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
152 struct i915_request *rq;
155 rq = i915_request_create(engine->kernel_context);
159 cs = intel_ring_begin(rq, 4);
161 i915_request_add(rq);
165 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
166 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
170 intel_ring_advance(rq, cs);
171 i915_request_add(rq);
173 engine->schedule(rq, &attr);
179 slice_semaphore_queue(struct intel_engine_cs *outer,
180 struct i915_vma *vma,
183 struct intel_engine_cs *engine;
184 struct i915_request *head;
185 enum intel_engine_id id;
188 head = semaphore_queue(outer, vma, n++);
190 return PTR_ERR(head);
192 i915_request_get(head);
193 for_each_engine(engine, outer->i915, id) {
194 for (i = 0; i < count; i++) {
195 struct i915_request *rq;
197 rq = semaphore_queue(engine, vma, n++);
205 err = release_queue(outer, vma, n);
209 if (i915_request_wait(head,
211 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
212 pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
215 intel_gt_set_wedged(outer->gt);
220 i915_request_put(head);
224 static int live_timeslice_preempt(void *arg)
226 struct drm_i915_private *i915 = arg;
227 struct drm_i915_gem_object *obj;
228 intel_wakeref_t wakeref;
229 struct i915_vma *vma;
235 * If a request takes too long, we would like to give other users
236 * a fair go on the GPU. In particular, users may create batches
237 * that wait upon external input, where that input may even be
238 * supplied by another GPU job. To avoid blocking forever, we
239 * need to preempt the current task and replace it with another
243 mutex_lock(&i915->drm.struct_mutex);
244 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
246 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
252 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
258 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
260 err = PTR_ERR(vaddr);
264 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
268 for_each_prime_number_from(count, 1, 16) {
269 struct intel_engine_cs *engine;
270 enum intel_engine_id id;
272 for_each_engine(engine, i915, id) {
273 if (!intel_engine_has_preemption(engine))
276 memset(vaddr, 0, PAGE_SIZE);
278 err = slice_semaphore_queue(engine, vma, count);
282 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
292 i915_gem_object_unpin_map(obj);
294 i915_gem_object_put(obj);
296 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
297 mutex_unlock(&i915->drm.struct_mutex);
302 static int live_busywait_preempt(void *arg)
304 struct drm_i915_private *i915 = arg;
305 struct i915_gem_context *ctx_hi, *ctx_lo;
306 struct intel_engine_cs *engine;
307 struct drm_i915_gem_object *obj;
308 struct i915_vma *vma;
309 enum intel_engine_id id;
310 intel_wakeref_t wakeref;
315 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
316 * preempt the busywaits used to synchronise between rings.
319 mutex_lock(&i915->drm.struct_mutex);
320 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
322 ctx_hi = kernel_context(i915);
325 ctx_hi->sched.priority =
326 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
328 ctx_lo = kernel_context(i915);
331 ctx_lo->sched.priority =
332 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
334 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
340 map = i915_gem_object_pin_map(obj, I915_MAP_WC);
346 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
352 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
356 for_each_engine(engine, i915, id) {
357 struct i915_request *lo, *hi;
358 struct igt_live_test t;
361 if (!intel_engine_has_preemption(engine))
364 if (!intel_engine_can_store_dword(engine))
367 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
373 * We create two requests. The low priority request
374 * busywaits on a semaphore (inside the ringbuffer where
375 * is should be preemptible) and the high priority requests
376 * uses a MI_STORE_DWORD_IMM to update the semaphore value
377 * allowing the first request to complete. If preemption
378 * fails, we hang instead.
381 lo = igt_request_alloc(ctx_lo, engine);
387 cs = intel_ring_begin(lo, 8);
390 i915_request_add(lo);
394 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
395 *cs++ = i915_ggtt_offset(vma);
399 /* XXX Do we need a flush + invalidate here? */
401 *cs++ = MI_SEMAPHORE_WAIT |
402 MI_SEMAPHORE_GLOBAL_GTT |
404 MI_SEMAPHORE_SAD_EQ_SDD;
406 *cs++ = i915_ggtt_offset(vma);
409 intel_ring_advance(lo, cs);
410 i915_request_add(lo);
412 if (wait_for(READ_ONCE(*map), 10)) {
417 /* Low priority request should be busywaiting now */
418 if (i915_request_wait(lo, 0, 1) != -ETIME) {
419 pr_err("%s: Busywaiting request did not!\n",
425 hi = igt_request_alloc(ctx_hi, engine);
431 cs = intel_ring_begin(hi, 4);
434 i915_request_add(hi);
438 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
439 *cs++ = i915_ggtt_offset(vma);
443 intel_ring_advance(hi, cs);
444 i915_request_add(hi);
446 if (i915_request_wait(lo, 0, HZ / 5) < 0) {
447 struct drm_printer p = drm_info_printer(i915->drm.dev);
449 pr_err("%s: Failed to preempt semaphore busywait!\n",
452 intel_engine_dump(engine, &p, "%s\n", engine->name);
455 intel_gt_set_wedged(&i915->gt);
459 GEM_BUG_ON(READ_ONCE(*map));
461 if (igt_live_test_end(&t)) {
471 i915_gem_object_unpin_map(obj);
473 i915_gem_object_put(obj);
475 kernel_context_close(ctx_lo);
477 kernel_context_close(ctx_hi);
479 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
480 mutex_unlock(&i915->drm.struct_mutex);
484 static struct i915_request *
485 spinner_create_request(struct igt_spinner *spin,
486 struct i915_gem_context *ctx,
487 struct intel_engine_cs *engine,
490 struct intel_context *ce;
491 struct i915_request *rq;
493 ce = i915_gem_context_get_engine(ctx, engine->id);
497 rq = igt_spinner_create_request(spin, ce, arb);
498 intel_context_put(ce);
502 static int live_preempt(void *arg)
504 struct drm_i915_private *i915 = arg;
505 struct i915_gem_context *ctx_hi, *ctx_lo;
506 struct igt_spinner spin_hi, spin_lo;
507 struct intel_engine_cs *engine;
508 enum intel_engine_id id;
509 intel_wakeref_t wakeref;
512 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
515 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
516 pr_err("Logical preemption supported, but not exposed\n");
518 mutex_lock(&i915->drm.struct_mutex);
519 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
521 if (igt_spinner_init(&spin_hi, &i915->gt))
524 if (igt_spinner_init(&spin_lo, &i915->gt))
527 ctx_hi = kernel_context(i915);
530 ctx_hi->sched.priority =
531 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
533 ctx_lo = kernel_context(i915);
536 ctx_lo->sched.priority =
537 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
539 for_each_engine(engine, i915, id) {
540 struct igt_live_test t;
541 struct i915_request *rq;
543 if (!intel_engine_has_preemption(engine))
546 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
551 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
558 i915_request_add(rq);
559 if (!igt_wait_for_spinner(&spin_lo, rq)) {
560 GEM_TRACE("lo spinner failed to start\n");
562 intel_gt_set_wedged(&i915->gt);
567 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
570 igt_spinner_end(&spin_lo);
575 i915_request_add(rq);
576 if (!igt_wait_for_spinner(&spin_hi, rq)) {
577 GEM_TRACE("hi spinner failed to start\n");
579 intel_gt_set_wedged(&i915->gt);
584 igt_spinner_end(&spin_hi);
585 igt_spinner_end(&spin_lo);
587 if (igt_live_test_end(&t)) {
595 kernel_context_close(ctx_lo);
597 kernel_context_close(ctx_hi);
599 igt_spinner_fini(&spin_lo);
601 igt_spinner_fini(&spin_hi);
603 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
604 mutex_unlock(&i915->drm.struct_mutex);
608 static int live_late_preempt(void *arg)
610 struct drm_i915_private *i915 = arg;
611 struct i915_gem_context *ctx_hi, *ctx_lo;
612 struct igt_spinner spin_hi, spin_lo;
613 struct intel_engine_cs *engine;
614 struct i915_sched_attr attr = {};
615 enum intel_engine_id id;
616 intel_wakeref_t wakeref;
619 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
622 mutex_lock(&i915->drm.struct_mutex);
623 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
625 if (igt_spinner_init(&spin_hi, &i915->gt))
628 if (igt_spinner_init(&spin_lo, &i915->gt))
631 ctx_hi = kernel_context(i915);
635 ctx_lo = kernel_context(i915);
639 /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
640 ctx_lo->sched.priority = I915_USER_PRIORITY(1);
642 for_each_engine(engine, i915, id) {
643 struct igt_live_test t;
644 struct i915_request *rq;
646 if (!intel_engine_has_preemption(engine))
649 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
654 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
661 i915_request_add(rq);
662 if (!igt_wait_for_spinner(&spin_lo, rq)) {
663 pr_err("First context failed to start\n");
667 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
670 igt_spinner_end(&spin_lo);
675 i915_request_add(rq);
676 if (igt_wait_for_spinner(&spin_hi, rq)) {
677 pr_err("Second context overtook first?\n");
681 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
682 engine->schedule(rq, &attr);
684 if (!igt_wait_for_spinner(&spin_hi, rq)) {
685 pr_err("High priority context failed to preempt the low priority context\n");
690 igt_spinner_end(&spin_hi);
691 igt_spinner_end(&spin_lo);
693 if (igt_live_test_end(&t)) {
701 kernel_context_close(ctx_lo);
703 kernel_context_close(ctx_hi);
705 igt_spinner_fini(&spin_lo);
707 igt_spinner_fini(&spin_hi);
709 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
710 mutex_unlock(&i915->drm.struct_mutex);
714 igt_spinner_end(&spin_hi);
715 igt_spinner_end(&spin_lo);
716 intel_gt_set_wedged(&i915->gt);
721 struct preempt_client {
722 struct igt_spinner spin;
723 struct i915_gem_context *ctx;
726 static int preempt_client_init(struct drm_i915_private *i915,
727 struct preempt_client *c)
729 c->ctx = kernel_context(i915);
733 if (igt_spinner_init(&c->spin, &i915->gt))
739 kernel_context_close(c->ctx);
743 static void preempt_client_fini(struct preempt_client *c)
745 igt_spinner_fini(&c->spin);
746 kernel_context_close(c->ctx);
749 static int live_nopreempt(void *arg)
751 struct drm_i915_private *i915 = arg;
752 struct intel_engine_cs *engine;
753 struct preempt_client a, b;
754 enum intel_engine_id id;
755 intel_wakeref_t wakeref;
759 * Verify that we can disable preemption for an individual request
760 * that may be being observed and not want to be interrupted.
763 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
766 mutex_lock(&i915->drm.struct_mutex);
767 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
769 if (preempt_client_init(i915, &a))
771 if (preempt_client_init(i915, &b))
773 b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
775 for_each_engine(engine, i915, id) {
776 struct i915_request *rq_a, *rq_b;
778 if (!intel_engine_has_preemption(engine))
781 engine->execlists.preempt_hang.count = 0;
783 rq_a = spinner_create_request(&a.spin,
791 /* Low priority client, but unpreemptable! */
792 rq_a->flags |= I915_REQUEST_NOPREEMPT;
794 i915_request_add(rq_a);
795 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
796 pr_err("First client failed to start\n");
800 rq_b = spinner_create_request(&b.spin,
808 i915_request_add(rq_b);
810 /* B is much more important than A! (But A is unpreemptable.) */
811 GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
813 /* Wait long enough for preemption and timeslicing */
814 if (igt_wait_for_spinner(&b.spin, rq_b)) {
815 pr_err("Second client started too early!\n");
819 igt_spinner_end(&a.spin);
821 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
822 pr_err("Second client failed to start\n");
826 igt_spinner_end(&b.spin);
828 if (engine->execlists.preempt_hang.count) {
829 pr_err("Preemption recorded x%d; should have been suppressed!\n",
830 engine->execlists.preempt_hang.count);
835 if (igt_flush_test(i915, I915_WAIT_LOCKED))
841 preempt_client_fini(&b);
843 preempt_client_fini(&a);
845 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
846 mutex_unlock(&i915->drm.struct_mutex);
850 igt_spinner_end(&b.spin);
851 igt_spinner_end(&a.spin);
852 intel_gt_set_wedged(&i915->gt);
857 static int live_suppress_self_preempt(void *arg)
859 struct drm_i915_private *i915 = arg;
860 struct intel_engine_cs *engine;
861 struct i915_sched_attr attr = {
862 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
864 struct preempt_client a, b;
865 enum intel_engine_id id;
866 intel_wakeref_t wakeref;
870 * Verify that if a preemption request does not cause a change in
871 * the current execution order, the preempt-to-idle injection is
872 * skipped and that we do not accidentally apply it after the CS
876 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
879 if (USES_GUC_SUBMISSION(i915))
880 return 0; /* presume black blox */
882 if (intel_vgpu_active(i915))
883 return 0; /* GVT forces single port & request submission */
885 mutex_lock(&i915->drm.struct_mutex);
886 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
888 if (preempt_client_init(i915, &a))
890 if (preempt_client_init(i915, &b))
893 for_each_engine(engine, i915, id) {
894 struct i915_request *rq_a, *rq_b;
897 if (!intel_engine_has_preemption(engine))
900 engine->execlists.preempt_hang.count = 0;
902 rq_a = spinner_create_request(&a.spin,
910 i915_request_add(rq_a);
911 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
912 pr_err("First client failed to start\n");
916 for (depth = 0; depth < 8; depth++) {
917 rq_b = spinner_create_request(&b.spin,
924 i915_request_add(rq_b);
926 GEM_BUG_ON(i915_request_completed(rq_a));
927 engine->schedule(rq_a, &attr);
928 igt_spinner_end(&a.spin);
930 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
931 pr_err("Second client failed to start\n");
938 igt_spinner_end(&a.spin);
940 if (engine->execlists.preempt_hang.count) {
941 pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
942 engine->execlists.preempt_hang.count,
948 if (igt_flush_test(i915, I915_WAIT_LOCKED))
954 preempt_client_fini(&b);
956 preempt_client_fini(&a);
958 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
959 mutex_unlock(&i915->drm.struct_mutex);
963 igt_spinner_end(&b.spin);
964 igt_spinner_end(&a.spin);
965 intel_gt_set_wedged(&i915->gt);
970 static int __i915_sw_fence_call
971 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
976 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
978 struct i915_request *rq;
980 rq = kzalloc(sizeof(*rq), GFP_KERNEL);
984 INIT_LIST_HEAD(&rq->active_list);
987 i915_sched_node_init(&rq->sched);
989 /* mark this request as permanently incomplete */
991 BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
992 rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
993 GEM_BUG_ON(i915_request_completed(rq));
995 i915_sw_fence_init(&rq->submit, dummy_notify);
996 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
998 spin_lock_init(&rq->lock);
999 rq->fence.lock = &rq->lock;
1000 INIT_LIST_HEAD(&rq->fence.cb_list);
1005 static void dummy_request_free(struct i915_request *dummy)
1007 /* We have to fake the CS interrupt to kick the next request */
1008 i915_sw_fence_commit(&dummy->submit);
1010 i915_request_mark_complete(dummy);
1011 dma_fence_signal(&dummy->fence);
1013 i915_sched_node_fini(&dummy->sched);
1014 i915_sw_fence_fini(&dummy->submit);
1016 dma_fence_free(&dummy->fence);
1019 static int live_suppress_wait_preempt(void *arg)
1021 struct drm_i915_private *i915 = arg;
1022 struct preempt_client client[4];
1023 struct intel_engine_cs *engine;
1024 enum intel_engine_id id;
1025 intel_wakeref_t wakeref;
1030 * Waiters are given a little priority nudge, but not enough
1031 * to actually cause any preemption. Double check that we do
1032 * not needlessly generate preempt-to-idle cycles.
1035 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
1038 mutex_lock(&i915->drm.struct_mutex);
1039 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1041 if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
1043 if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
1045 if (preempt_client_init(i915, &client[2])) /* head of queue */
1047 if (preempt_client_init(i915, &client[3])) /* bystander */
1050 for_each_engine(engine, i915, id) {
1053 if (!intel_engine_has_preemption(engine))
1056 if (!engine->emit_init_breadcrumb)
1059 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
1060 struct i915_request *rq[ARRAY_SIZE(client)];
1061 struct i915_request *dummy;
1063 engine->execlists.preempt_hang.count = 0;
1065 dummy = dummy_request(engine);
1069 for (i = 0; i < ARRAY_SIZE(client); i++) {
1070 rq[i] = spinner_create_request(&client[i].spin,
1071 client[i].ctx, engine,
1073 if (IS_ERR(rq[i])) {
1074 err = PTR_ERR(rq[i]);
1078 /* Disable NEWCLIENT promotion */
1079 __i915_active_request_set(&rq[i]->timeline->last_request,
1081 i915_request_add(rq[i]);
1084 dummy_request_free(dummy);
1086 GEM_BUG_ON(i915_request_completed(rq[0]));
1087 if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
1088 pr_err("%s: First client failed to start\n",
1092 GEM_BUG_ON(!i915_request_started(rq[0]));
1094 if (i915_request_wait(rq[depth],
1097 pr_err("%s: Waiter depth:%d completed!\n",
1098 engine->name, depth);
1102 for (i = 0; i < ARRAY_SIZE(client); i++)
1103 igt_spinner_end(&client[i].spin);
1105 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1108 if (engine->execlists.preempt_hang.count) {
1109 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
1111 engine->execlists.preempt_hang.count,
1121 preempt_client_fini(&client[3]);
1123 preempt_client_fini(&client[2]);
1125 preempt_client_fini(&client[1]);
1127 preempt_client_fini(&client[0]);
1129 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1130 mutex_unlock(&i915->drm.struct_mutex);
1134 for (i = 0; i < ARRAY_SIZE(client); i++)
1135 igt_spinner_end(&client[i].spin);
1136 intel_gt_set_wedged(&i915->gt);
1141 static int live_chain_preempt(void *arg)
1143 struct drm_i915_private *i915 = arg;
1144 struct intel_engine_cs *engine;
1145 struct preempt_client hi, lo;
1146 enum intel_engine_id id;
1147 intel_wakeref_t wakeref;
1151 * Build a chain AB...BA between two contexts (A, B) and request
1152 * preemption of the last request. It should then complete before
1153 * the previously submitted spinner in B.
1156 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
1159 mutex_lock(&i915->drm.struct_mutex);
1160 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1162 if (preempt_client_init(i915, &hi))
1165 if (preempt_client_init(i915, &lo))
1168 for_each_engine(engine, i915, id) {
1169 struct i915_sched_attr attr = {
1170 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
1172 struct igt_live_test t;
1173 struct i915_request *rq;
1174 int ring_size, count, i;
1176 if (!intel_engine_has_preemption(engine))
1179 rq = spinner_create_request(&lo.spin,
1184 i915_request_add(rq);
1186 ring_size = rq->wa_tail - rq->head;
1188 ring_size += rq->ring->size;
1189 ring_size = rq->ring->size / ring_size;
1190 pr_debug("%s(%s): Using maximum of %d requests\n",
1191 __func__, engine->name, ring_size);
1193 igt_spinner_end(&lo.spin);
1194 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
1195 pr_err("Timed out waiting to flush %s\n", engine->name);
1199 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
1204 for_each_prime_number_from(count, 1, ring_size) {
1205 rq = spinner_create_request(&hi.spin,
1210 i915_request_add(rq);
1211 if (!igt_wait_for_spinner(&hi.spin, rq))
1214 rq = spinner_create_request(&lo.spin,
1219 i915_request_add(rq);
1221 for (i = 0; i < count; i++) {
1222 rq = igt_request_alloc(lo.ctx, engine);
1225 i915_request_add(rq);
1228 rq = igt_request_alloc(hi.ctx, engine);
1231 i915_request_add(rq);
1232 engine->schedule(rq, &attr);
1234 igt_spinner_end(&hi.spin);
1235 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1236 struct drm_printer p =
1237 drm_info_printer(i915->drm.dev);
1239 pr_err("Failed to preempt over chain of %d\n",
1241 intel_engine_dump(engine, &p,
1242 "%s\n", engine->name);
1245 igt_spinner_end(&lo.spin);
1247 rq = igt_request_alloc(lo.ctx, engine);
1250 i915_request_add(rq);
1251 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1252 struct drm_printer p =
1253 drm_info_printer(i915->drm.dev);
1255 pr_err("Failed to flush low priority chain of %d requests\n",
1257 intel_engine_dump(engine, &p,
1258 "%s\n", engine->name);
1263 if (igt_live_test_end(&t)) {
1271 preempt_client_fini(&lo);
1273 preempt_client_fini(&hi);
1275 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1276 mutex_unlock(&i915->drm.struct_mutex);
1280 igt_spinner_end(&hi.spin);
1281 igt_spinner_end(&lo.spin);
1282 intel_gt_set_wedged(&i915->gt);
1287 static int live_preempt_hang(void *arg)
1289 struct drm_i915_private *i915 = arg;
1290 struct i915_gem_context *ctx_hi, *ctx_lo;
1291 struct igt_spinner spin_hi, spin_lo;
1292 struct intel_engine_cs *engine;
1293 enum intel_engine_id id;
1294 intel_wakeref_t wakeref;
1297 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
1300 if (!intel_has_reset_engine(i915))
1303 mutex_lock(&i915->drm.struct_mutex);
1304 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1306 if (igt_spinner_init(&spin_hi, &i915->gt))
1309 if (igt_spinner_init(&spin_lo, &i915->gt))
1312 ctx_hi = kernel_context(i915);
1315 ctx_hi->sched.priority =
1316 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
1318 ctx_lo = kernel_context(i915);
1321 ctx_lo->sched.priority =
1322 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
1324 for_each_engine(engine, i915, id) {
1325 struct i915_request *rq;
1327 if (!intel_engine_has_preemption(engine))
1330 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
1337 i915_request_add(rq);
1338 if (!igt_wait_for_spinner(&spin_lo, rq)) {
1339 GEM_TRACE("lo spinner failed to start\n");
1341 intel_gt_set_wedged(&i915->gt);
1346 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
1349 igt_spinner_end(&spin_lo);
1354 init_completion(&engine->execlists.preempt_hang.completion);
1355 engine->execlists.preempt_hang.inject_hang = true;
1357 i915_request_add(rq);
1359 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1361 pr_err("Preemption did not occur within timeout!");
1363 intel_gt_set_wedged(&i915->gt);
1368 set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
1369 intel_engine_reset(engine, NULL);
1370 clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
1372 engine->execlists.preempt_hang.inject_hang = false;
1374 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1375 GEM_TRACE("hi spinner failed to start\n");
1377 intel_gt_set_wedged(&i915->gt);
1382 igt_spinner_end(&spin_hi);
1383 igt_spinner_end(&spin_lo);
1384 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1392 kernel_context_close(ctx_lo);
1394 kernel_context_close(ctx_hi);
1396 igt_spinner_fini(&spin_lo);
1398 igt_spinner_fini(&spin_hi);
1400 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1401 mutex_unlock(&i915->drm.struct_mutex);
1405 static int random_range(struct rnd_state *rnd, int min, int max)
1407 return i915_prandom_u32_max_state(max - min, rnd) + min;
1410 static int random_priority(struct rnd_state *rnd)
1412 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1415 struct preempt_smoke {
1416 struct drm_i915_private *i915;
1417 struct i915_gem_context **contexts;
1418 struct intel_engine_cs *engine;
1419 struct drm_i915_gem_object *batch;
1420 unsigned int ncontext;
1421 struct rnd_state prng;
1422 unsigned long count;
1425 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1427 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1431 static int smoke_submit(struct preempt_smoke *smoke,
1432 struct i915_gem_context *ctx, int prio,
1433 struct drm_i915_gem_object *batch)
1435 struct i915_request *rq;
1436 struct i915_vma *vma = NULL;
1440 vma = i915_vma_instance(batch, ctx->vm, NULL);
1442 return PTR_ERR(vma);
1444 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1449 ctx->sched.priority = prio;
1451 rq = igt_request_alloc(ctx, smoke->engine);
1459 err = rq->engine->emit_bb_start(rq,
1463 err = i915_vma_move_to_active(vma, rq, 0);
1464 i915_vma_unlock(vma);
1467 i915_request_add(rq);
1471 i915_vma_unpin(vma);
1476 static int smoke_crescendo_thread(void *arg)
1478 struct preempt_smoke *smoke = arg;
1479 IGT_TIMEOUT(end_time);
1480 unsigned long count;
1484 struct i915_gem_context *ctx = smoke_context(smoke);
1487 mutex_lock(&smoke->i915->drm.struct_mutex);
1488 err = smoke_submit(smoke,
1489 ctx, count % I915_PRIORITY_MAX,
1491 mutex_unlock(&smoke->i915->drm.struct_mutex);
1496 } while (!__igt_timeout(end_time, NULL));
1498 smoke->count = count;
1502 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1503 #define BATCH BIT(0)
1505 struct task_struct *tsk[I915_NUM_ENGINES] = {};
1506 struct preempt_smoke arg[I915_NUM_ENGINES];
1507 struct intel_engine_cs *engine;
1508 enum intel_engine_id id;
1509 unsigned long count;
1512 mutex_unlock(&smoke->i915->drm.struct_mutex);
1514 for_each_engine(engine, smoke->i915, id) {
1516 arg[id].engine = engine;
1517 if (!(flags & BATCH))
1518 arg[id].batch = NULL;
1521 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1522 "igt/smoke:%d", id);
1523 if (IS_ERR(tsk[id])) {
1524 err = PTR_ERR(tsk[id]);
1527 get_task_struct(tsk[id]);
1531 for_each_engine(engine, smoke->i915, id) {
1534 if (IS_ERR_OR_NULL(tsk[id]))
1537 status = kthread_stop(tsk[id]);
1541 count += arg[id].count;
1543 put_task_struct(tsk[id]);
1546 mutex_lock(&smoke->i915->drm.struct_mutex);
1548 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1550 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1554 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1556 enum intel_engine_id id;
1557 IGT_TIMEOUT(end_time);
1558 unsigned long count;
1562 for_each_engine(smoke->engine, smoke->i915, id) {
1563 struct i915_gem_context *ctx = smoke_context(smoke);
1566 err = smoke_submit(smoke,
1567 ctx, random_priority(&smoke->prng),
1568 flags & BATCH ? smoke->batch : NULL);
1574 } while (!__igt_timeout(end_time, NULL));
1576 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1578 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1582 static int live_preempt_smoke(void *arg)
1584 struct preempt_smoke smoke = {
1586 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1589 const unsigned int phase[] = { 0, BATCH };
1590 intel_wakeref_t wakeref;
1591 struct igt_live_test t;
1596 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1599 smoke.contexts = kmalloc_array(smoke.ncontext,
1600 sizeof(*smoke.contexts),
1602 if (!smoke.contexts)
1605 mutex_lock(&smoke.i915->drm.struct_mutex);
1606 wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
1608 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1609 if (IS_ERR(smoke.batch)) {
1610 err = PTR_ERR(smoke.batch);
1614 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1619 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1620 cs[n] = MI_ARB_CHECK;
1621 cs[n] = MI_BATCH_BUFFER_END;
1622 i915_gem_object_flush_map(smoke.batch);
1623 i915_gem_object_unpin_map(smoke.batch);
1625 if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1630 for (n = 0; n < smoke.ncontext; n++) {
1631 smoke.contexts[n] = kernel_context(smoke.i915);
1632 if (!smoke.contexts[n])
1636 for (n = 0; n < ARRAY_SIZE(phase); n++) {
1637 err = smoke_crescendo(&smoke, phase[n]);
1641 err = smoke_random(&smoke, phase[n]);
1647 if (igt_live_test_end(&t))
1650 for (n = 0; n < smoke.ncontext; n++) {
1651 if (!smoke.contexts[n])
1653 kernel_context_close(smoke.contexts[n]);
1657 i915_gem_object_put(smoke.batch);
1659 intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
1660 mutex_unlock(&smoke.i915->drm.struct_mutex);
1661 kfree(smoke.contexts);
1666 static int nop_virtual_engine(struct drm_i915_private *i915,
1667 struct intel_engine_cs **siblings,
1668 unsigned int nsibling,
1671 #define CHAIN BIT(0)
1673 IGT_TIMEOUT(end_time);
1674 struct i915_request *request[16];
1675 struct i915_gem_context *ctx[16];
1676 struct intel_context *ve[16];
1677 unsigned long n, prime, nc;
1678 struct igt_live_test t;
1679 ktime_t times[2] = {};
1682 GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
1684 for (n = 0; n < nctx; n++) {
1685 ctx[n] = kernel_context(i915);
1692 ve[n] = intel_execlists_create_virtual(ctx[n],
1693 siblings, nsibling);
1694 if (IS_ERR(ve[n])) {
1695 kernel_context_close(ctx[n]);
1696 err = PTR_ERR(ve[n]);
1701 err = intel_context_pin(ve[n]);
1703 intel_context_put(ve[n]);
1704 kernel_context_close(ctx[n]);
1710 err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
1714 for_each_prime_number_from(prime, 1, 8192) {
1715 times[1] = ktime_get_raw();
1717 if (flags & CHAIN) {
1718 for (nc = 0; nc < nctx; nc++) {
1719 for (n = 0; n < prime; n++) {
1721 i915_request_create(ve[nc]);
1722 if (IS_ERR(request[nc])) {
1723 err = PTR_ERR(request[nc]);
1727 i915_request_add(request[nc]);
1731 for (n = 0; n < prime; n++) {
1732 for (nc = 0; nc < nctx; nc++) {
1734 i915_request_create(ve[nc]);
1735 if (IS_ERR(request[nc])) {
1736 err = PTR_ERR(request[nc]);
1740 i915_request_add(request[nc]);
1745 for (nc = 0; nc < nctx; nc++) {
1746 if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
1747 pr_err("%s(%s): wait for %llx:%lld timed out\n",
1748 __func__, ve[0]->engine->name,
1749 request[nc]->fence.context,
1750 request[nc]->fence.seqno);
1752 GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1753 __func__, ve[0]->engine->name,
1754 request[nc]->fence.context,
1755 request[nc]->fence.seqno);
1757 intel_gt_set_wedged(&i915->gt);
1762 times[1] = ktime_sub(ktime_get_raw(), times[1]);
1764 times[0] = times[1];
1766 if (__igt_timeout(end_time, NULL))
1770 err = igt_live_test_end(&t);
1774 pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
1775 nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
1776 prime, div64_u64(ktime_to_ns(times[1]), prime));
1779 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1782 for (nc = 0; nc < nctx; nc++) {
1783 intel_context_unpin(ve[nc]);
1784 intel_context_put(ve[nc]);
1785 kernel_context_close(ctx[nc]);
1790 static int live_virtual_engine(void *arg)
1792 struct drm_i915_private *i915 = arg;
1793 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1794 struct intel_engine_cs *engine;
1795 struct intel_gt *gt = &i915->gt;
1796 enum intel_engine_id id;
1797 unsigned int class, inst;
1800 if (USES_GUC_SUBMISSION(i915))
1803 mutex_lock(&i915->drm.struct_mutex);
1805 for_each_engine(engine, i915, id) {
1806 err = nop_virtual_engine(i915, &engine, 1, 1, 0);
1808 pr_err("Failed to wrap engine %s: err=%d\n",
1814 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1818 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1819 if (!gt->engine_class[class][inst])
1822 siblings[nsibling++] = gt->engine_class[class][inst];
1827 for (n = 1; n <= nsibling + 1; n++) {
1828 err = nop_virtual_engine(i915, siblings, nsibling,
1834 err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
1840 mutex_unlock(&i915->drm.struct_mutex);
1844 static int mask_virtual_engine(struct drm_i915_private *i915,
1845 struct intel_engine_cs **siblings,
1846 unsigned int nsibling)
1848 struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
1849 struct i915_gem_context *ctx;
1850 struct intel_context *ve;
1851 struct igt_live_test t;
1856 * Check that by setting the execution mask on a request, we can
1857 * restrict it to our desired engine within the virtual engine.
1860 ctx = kernel_context(i915);
1864 ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
1870 err = intel_context_pin(ve);
1874 err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
1878 for (n = 0; n < nsibling; n++) {
1879 request[n] = i915_request_create(ve);
1880 if (IS_ERR(request[n])) {
1881 err = PTR_ERR(request[n]);
1886 /* Reverse order as it's more likely to be unnatural */
1887 request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
1889 i915_request_get(request[n]);
1890 i915_request_add(request[n]);
1893 for (n = 0; n < nsibling; n++) {
1894 if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
1895 pr_err("%s(%s): wait for %llx:%lld timed out\n",
1896 __func__, ve->engine->name,
1897 request[n]->fence.context,
1898 request[n]->fence.seqno);
1900 GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1901 __func__, ve->engine->name,
1902 request[n]->fence.context,
1903 request[n]->fence.seqno);
1905 intel_gt_set_wedged(&i915->gt);
1910 if (request[n]->engine != siblings[nsibling - n - 1]) {
1911 pr_err("Executed on wrong sibling '%s', expected '%s'\n",
1912 request[n]->engine->name,
1913 siblings[nsibling - n - 1]->name);
1919 err = igt_live_test_end(&t);
1924 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1927 for (n = 0; n < nsibling; n++)
1928 i915_request_put(request[n]);
1931 intel_context_unpin(ve);
1933 intel_context_put(ve);
1935 kernel_context_close(ctx);
1939 static int live_virtual_mask(void *arg)
1941 struct drm_i915_private *i915 = arg;
1942 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1943 struct intel_gt *gt = &i915->gt;
1944 unsigned int class, inst;
1947 if (USES_GUC_SUBMISSION(i915))
1950 mutex_lock(&i915->drm.struct_mutex);
1952 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1953 unsigned int nsibling;
1956 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1957 if (!gt->engine_class[class][inst])
1960 siblings[nsibling++] = gt->engine_class[class][inst];
1965 err = mask_virtual_engine(i915, siblings, nsibling);
1971 mutex_unlock(&i915->drm.struct_mutex);
1975 static int bond_virtual_engine(struct drm_i915_private *i915,
1977 struct intel_engine_cs **siblings,
1978 unsigned int nsibling,
1980 #define BOND_SCHEDULE BIT(0)
1982 struct intel_engine_cs *master;
1983 struct i915_gem_context *ctx;
1984 struct i915_request *rq[16];
1985 enum intel_engine_id id;
1989 GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
1991 ctx = kernel_context(i915);
1996 rq[0] = ERR_PTR(-ENOMEM);
1997 for_each_engine(master, i915, id) {
1998 struct i915_sw_fence fence = {};
2000 if (master->class == class)
2003 memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
2005 rq[0] = igt_request_alloc(ctx, master);
2006 if (IS_ERR(rq[0])) {
2007 err = PTR_ERR(rq[0]);
2010 i915_request_get(rq[0]);
2012 if (flags & BOND_SCHEDULE) {
2013 onstack_fence_init(&fence);
2014 err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
2018 i915_request_add(rq[0]);
2022 for (n = 0; n < nsibling; n++) {
2023 struct intel_context *ve;
2025 ve = intel_execlists_create_virtual(ctx,
2030 onstack_fence_fini(&fence);
2034 err = intel_virtual_engine_attach_bond(ve->engine,
2038 intel_context_put(ve);
2039 onstack_fence_fini(&fence);
2043 err = intel_context_pin(ve);
2044 intel_context_put(ve);
2046 onstack_fence_fini(&fence);
2050 rq[n + 1] = i915_request_create(ve);
2051 intel_context_unpin(ve);
2052 if (IS_ERR(rq[n + 1])) {
2053 err = PTR_ERR(rq[n + 1]);
2054 onstack_fence_fini(&fence);
2057 i915_request_get(rq[n + 1]);
2059 err = i915_request_await_execution(rq[n + 1],
2061 ve->engine->bond_execute);
2062 i915_request_add(rq[n + 1]);
2064 onstack_fence_fini(&fence);
2068 onstack_fence_fini(&fence);
2070 if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
2071 pr_err("Master request did not execute (on %s)!\n",
2072 rq[0]->engine->name);
2077 for (n = 0; n < nsibling; n++) {
2078 if (i915_request_wait(rq[n + 1], 0,
2079 MAX_SCHEDULE_TIMEOUT) < 0) {
2084 if (rq[n + 1]->engine != siblings[n]) {
2085 pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
2087 rq[n + 1]->engine->name,
2088 rq[0]->engine->name);
2094 for (n = 0; !IS_ERR(rq[n]); n++)
2095 i915_request_put(rq[n]);
2096 rq[0] = ERR_PTR(-ENOMEM);
2100 for (n = 0; !IS_ERR(rq[n]); n++)
2101 i915_request_put(rq[n]);
2102 if (igt_flush_test(i915, I915_WAIT_LOCKED))
2105 kernel_context_close(ctx);
2109 static int live_virtual_bond(void *arg)
2111 static const struct phase {
2116 { "schedule", BOND_SCHEDULE },
2119 struct drm_i915_private *i915 = arg;
2120 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
2121 struct intel_gt *gt = &i915->gt;
2122 unsigned int class, inst;
2125 if (USES_GUC_SUBMISSION(i915))
2128 mutex_lock(&i915->drm.struct_mutex);
2130 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
2131 const struct phase *p;
2135 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
2136 if (!gt->engine_class[class][inst])
2139 GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
2140 siblings[nsibling++] = gt->engine_class[class][inst];
2145 for (p = phases; p->name; p++) {
2146 err = bond_virtual_engine(i915,
2147 class, siblings, nsibling,
2150 pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
2151 __func__, p->name, class, nsibling, err);
2158 mutex_unlock(&i915->drm.struct_mutex);
2162 int intel_execlists_live_selftests(struct drm_i915_private *i915)
2164 static const struct i915_subtest tests[] = {
2165 SUBTEST(live_sanitycheck),
2166 SUBTEST(live_timeslice_preempt),
2167 SUBTEST(live_busywait_preempt),
2168 SUBTEST(live_preempt),
2169 SUBTEST(live_late_preempt),
2170 SUBTEST(live_nopreempt),
2171 SUBTEST(live_suppress_self_preempt),
2172 SUBTEST(live_suppress_wait_preempt),
2173 SUBTEST(live_chain_preempt),
2174 SUBTEST(live_preempt_hang),
2175 SUBTEST(live_preempt_smoke),
2176 SUBTEST(live_virtual_engine),
2177 SUBTEST(live_virtual_mask),
2178 SUBTEST(live_virtual_bond),
2181 if (!HAS_EXECLISTS(i915))
2184 if (intel_gt_is_wedged(&i915->gt))
2187 return i915_live_subtests(tests, i915);