2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "../i915_reset.h"
11 #include "../i915_selftest.h"
12 #include "igt_flush_test.h"
13 #include "igt_live_test.h"
14 #include "igt_spinner.h"
15 #include "i915_random.h"
17 #include "mock_context.h"
19 static int live_sanitycheck(void *arg)
21 struct drm_i915_private *i915 = arg;
22 struct intel_engine_cs *engine;
23 struct i915_gem_context *ctx;
24 enum intel_engine_id id;
25 struct igt_spinner spin;
26 intel_wakeref_t wakeref;
29 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
32 mutex_lock(&i915->drm.struct_mutex);
33 wakeref = intel_runtime_pm_get(i915);
35 if (igt_spinner_init(&spin, i915))
38 ctx = kernel_context(i915);
42 for_each_engine(engine, i915, id) {
43 struct i915_request *rq;
45 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
52 if (!igt_wait_for_spinner(&spin, rq)) {
53 GEM_TRACE("spinner failed to start\n");
55 i915_gem_set_wedged(i915);
60 igt_spinner_end(&spin);
61 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
69 kernel_context_close(ctx);
71 igt_spinner_fini(&spin);
73 igt_flush_test(i915, I915_WAIT_LOCKED);
74 intel_runtime_pm_put(i915, wakeref);
75 mutex_unlock(&i915->drm.struct_mutex);
79 static int live_busywait_preempt(void *arg)
81 struct drm_i915_private *i915 = arg;
82 struct i915_gem_context *ctx_hi, *ctx_lo;
83 struct intel_engine_cs *engine;
84 struct drm_i915_gem_object *obj;
86 enum intel_engine_id id;
87 intel_wakeref_t wakeref;
92 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
93 * preempt the busywaits used to synchronise between rings.
96 mutex_lock(&i915->drm.struct_mutex);
97 wakeref = intel_runtime_pm_get(i915);
99 ctx_hi = kernel_context(i915);
102 ctx_hi->sched.priority =
103 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
105 ctx_lo = kernel_context(i915);
108 ctx_lo->sched.priority =
109 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
111 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
117 map = i915_gem_object_pin_map(obj, I915_MAP_WC);
123 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
129 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
133 for_each_engine(engine, i915, id) {
134 struct i915_request *lo, *hi;
135 struct igt_live_test t;
138 if (!intel_engine_can_store_dword(engine))
141 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
147 * We create two requests. The low priority request
148 * busywaits on a semaphore (inside the ringbuffer where
149 * is should be preemptible) and the high priority requests
150 * uses a MI_STORE_DWORD_IMM to update the semaphore value
151 * allowing the first request to complete. If preemption
152 * fails, we hang instead.
155 lo = i915_request_alloc(engine, ctx_lo);
161 cs = intel_ring_begin(lo, 8);
164 i915_request_add(lo);
168 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
169 *cs++ = i915_ggtt_offset(vma);
173 /* XXX Do we need a flush + invalidate here? */
175 *cs++ = MI_SEMAPHORE_WAIT |
176 MI_SEMAPHORE_GLOBAL_GTT |
178 MI_SEMAPHORE_SAD_EQ_SDD;
180 *cs++ = i915_ggtt_offset(vma);
183 intel_ring_advance(lo, cs);
184 i915_request_add(lo);
186 if (wait_for(READ_ONCE(*map), 10)) {
191 /* Low priority request should be busywaiting now */
192 if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
193 pr_err("%s: Busywaiting request did not!\n",
199 hi = i915_request_alloc(engine, ctx_hi);
205 cs = intel_ring_begin(hi, 4);
208 i915_request_add(hi);
212 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
213 *cs++ = i915_ggtt_offset(vma);
217 intel_ring_advance(hi, cs);
218 i915_request_add(hi);
220 if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
221 struct drm_printer p = drm_info_printer(i915->drm.dev);
223 pr_err("%s: Failed to preempt semaphore busywait!\n",
226 intel_engine_dump(engine, &p, "%s\n", engine->name);
229 i915_gem_set_wedged(i915);
233 GEM_BUG_ON(READ_ONCE(*map));
235 if (igt_live_test_end(&t)) {
245 i915_gem_object_unpin_map(obj);
247 i915_gem_object_put(obj);
249 kernel_context_close(ctx_lo);
251 kernel_context_close(ctx_hi);
253 if (igt_flush_test(i915, I915_WAIT_LOCKED))
255 intel_runtime_pm_put(i915, wakeref);
256 mutex_unlock(&i915->drm.struct_mutex);
260 static int live_preempt(void *arg)
262 struct drm_i915_private *i915 = arg;
263 struct i915_gem_context *ctx_hi, *ctx_lo;
264 struct igt_spinner spin_hi, spin_lo;
265 struct intel_engine_cs *engine;
266 enum intel_engine_id id;
267 intel_wakeref_t wakeref;
270 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
273 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
274 pr_err("Logical preemption supported, but not exposed\n");
276 mutex_lock(&i915->drm.struct_mutex);
277 wakeref = intel_runtime_pm_get(i915);
279 if (igt_spinner_init(&spin_hi, i915))
282 if (igt_spinner_init(&spin_lo, i915))
285 ctx_hi = kernel_context(i915);
288 ctx_hi->sched.priority =
289 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
291 ctx_lo = kernel_context(i915);
294 ctx_lo->sched.priority =
295 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
297 for_each_engine(engine, i915, id) {
298 struct igt_live_test t;
299 struct i915_request *rq;
301 if (!intel_engine_has_preemption(engine))
304 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
309 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
316 i915_request_add(rq);
317 if (!igt_wait_for_spinner(&spin_lo, rq)) {
318 GEM_TRACE("lo spinner failed to start\n");
320 i915_gem_set_wedged(i915);
325 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
328 igt_spinner_end(&spin_lo);
333 i915_request_add(rq);
334 if (!igt_wait_for_spinner(&spin_hi, rq)) {
335 GEM_TRACE("hi spinner failed to start\n");
337 i915_gem_set_wedged(i915);
342 igt_spinner_end(&spin_hi);
343 igt_spinner_end(&spin_lo);
345 if (igt_live_test_end(&t)) {
353 kernel_context_close(ctx_lo);
355 kernel_context_close(ctx_hi);
357 igt_spinner_fini(&spin_lo);
359 igt_spinner_fini(&spin_hi);
361 igt_flush_test(i915, I915_WAIT_LOCKED);
362 intel_runtime_pm_put(i915, wakeref);
363 mutex_unlock(&i915->drm.struct_mutex);
367 static int live_late_preempt(void *arg)
369 struct drm_i915_private *i915 = arg;
370 struct i915_gem_context *ctx_hi, *ctx_lo;
371 struct igt_spinner spin_hi, spin_lo;
372 struct intel_engine_cs *engine;
373 struct i915_sched_attr attr = {};
374 enum intel_engine_id id;
375 intel_wakeref_t wakeref;
378 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
381 mutex_lock(&i915->drm.struct_mutex);
382 wakeref = intel_runtime_pm_get(i915);
384 if (igt_spinner_init(&spin_hi, i915))
387 if (igt_spinner_init(&spin_lo, i915))
390 ctx_hi = kernel_context(i915);
394 ctx_lo = kernel_context(i915);
398 for_each_engine(engine, i915, id) {
399 struct igt_live_test t;
400 struct i915_request *rq;
402 if (!intel_engine_has_preemption(engine))
405 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
410 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
417 i915_request_add(rq);
418 if (!igt_wait_for_spinner(&spin_lo, rq)) {
419 pr_err("First context failed to start\n");
423 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
426 igt_spinner_end(&spin_lo);
431 i915_request_add(rq);
432 if (igt_wait_for_spinner(&spin_hi, rq)) {
433 pr_err("Second context overtook first?\n");
437 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
438 engine->schedule(rq, &attr);
440 if (!igt_wait_for_spinner(&spin_hi, rq)) {
441 pr_err("High priority context failed to preempt the low priority context\n");
446 igt_spinner_end(&spin_hi);
447 igt_spinner_end(&spin_lo);
449 if (igt_live_test_end(&t)) {
457 kernel_context_close(ctx_lo);
459 kernel_context_close(ctx_hi);
461 igt_spinner_fini(&spin_lo);
463 igt_spinner_fini(&spin_hi);
465 igt_flush_test(i915, I915_WAIT_LOCKED);
466 intel_runtime_pm_put(i915, wakeref);
467 mutex_unlock(&i915->drm.struct_mutex);
471 igt_spinner_end(&spin_hi);
472 igt_spinner_end(&spin_lo);
473 i915_gem_set_wedged(i915);
478 struct preempt_client {
479 struct igt_spinner spin;
480 struct i915_gem_context *ctx;
483 static int preempt_client_init(struct drm_i915_private *i915,
484 struct preempt_client *c)
486 c->ctx = kernel_context(i915);
490 if (igt_spinner_init(&c->spin, i915))
496 kernel_context_close(c->ctx);
500 static void preempt_client_fini(struct preempt_client *c)
502 igt_spinner_fini(&c->spin);
503 kernel_context_close(c->ctx);
506 static int live_suppress_self_preempt(void *arg)
508 struct drm_i915_private *i915 = arg;
509 struct intel_engine_cs *engine;
510 struct i915_sched_attr attr = {
511 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
513 struct preempt_client a, b;
514 enum intel_engine_id id;
515 intel_wakeref_t wakeref;
519 * Verify that if a preemption request does not cause a change in
520 * the current execution order, the preempt-to-idle injection is
521 * skipped and that we do not accidentally apply it after the CS
525 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
528 if (USES_GUC_SUBMISSION(i915))
529 return 0; /* presume black blox */
531 mutex_lock(&i915->drm.struct_mutex);
532 wakeref = intel_runtime_pm_get(i915);
534 if (preempt_client_init(i915, &a))
536 if (preempt_client_init(i915, &b))
539 for_each_engine(engine, i915, id) {
540 struct i915_request *rq_a, *rq_b;
543 if (!intel_engine_has_preemption(engine))
546 engine->execlists.preempt_hang.count = 0;
548 rq_a = igt_spinner_create_request(&a.spin,
556 i915_request_add(rq_a);
557 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
558 pr_err("First client failed to start\n");
562 for (depth = 0; depth < 8; depth++) {
563 rq_b = igt_spinner_create_request(&b.spin,
570 i915_request_add(rq_b);
572 GEM_BUG_ON(i915_request_completed(rq_a));
573 engine->schedule(rq_a, &attr);
574 igt_spinner_end(&a.spin);
576 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
577 pr_err("Second client failed to start\n");
584 igt_spinner_end(&a.spin);
586 if (engine->execlists.preempt_hang.count) {
587 pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
588 engine->execlists.preempt_hang.count,
594 if (igt_flush_test(i915, I915_WAIT_LOCKED))
600 preempt_client_fini(&b);
602 preempt_client_fini(&a);
604 if (igt_flush_test(i915, I915_WAIT_LOCKED))
606 intel_runtime_pm_put(i915, wakeref);
607 mutex_unlock(&i915->drm.struct_mutex);
611 igt_spinner_end(&b.spin);
612 igt_spinner_end(&a.spin);
613 i915_gem_set_wedged(i915);
618 static int __i915_sw_fence_call
619 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
624 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
626 struct i915_request *rq;
628 rq = kzalloc(sizeof(*rq), GFP_KERNEL);
632 INIT_LIST_HEAD(&rq->active_list);
635 i915_sched_node_init(&rq->sched);
637 /* mark this request as permanently incomplete */
639 BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
640 rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
641 GEM_BUG_ON(i915_request_completed(rq));
643 i915_sw_fence_init(&rq->submit, dummy_notify);
644 i915_sw_fence_commit(&rq->submit);
649 static void dummy_request_free(struct i915_request *dummy)
651 i915_request_mark_complete(dummy);
652 i915_sched_node_fini(&dummy->sched);
653 i915_sw_fence_fini(&dummy->submit);
655 dma_fence_free(&dummy->fence);
658 static int live_suppress_wait_preempt(void *arg)
660 struct drm_i915_private *i915 = arg;
661 struct preempt_client client[4];
662 struct intel_engine_cs *engine;
663 enum intel_engine_id id;
664 intel_wakeref_t wakeref;
669 * Waiters are given a little priority nudge, but not enough
670 * to actually cause any preemption. Double check that we do
671 * not needlessly generate preempt-to-idle cycles.
674 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
677 mutex_lock(&i915->drm.struct_mutex);
678 wakeref = intel_runtime_pm_get(i915);
680 if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
682 if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
684 if (preempt_client_init(i915, &client[2])) /* head of queue */
686 if (preempt_client_init(i915, &client[3])) /* bystander */
689 for_each_engine(engine, i915, id) {
692 if (!intel_engine_has_preemption(engine))
695 if (!engine->emit_init_breadcrumb)
698 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
699 struct i915_request *rq[ARRAY_SIZE(client)];
700 struct i915_request *dummy;
702 engine->execlists.preempt_hang.count = 0;
704 dummy = dummy_request(engine);
708 for (i = 0; i < ARRAY_SIZE(client); i++) {
709 rq[i] = igt_spinner_create_request(&client[i].spin,
710 client[i].ctx, engine,
713 err = PTR_ERR(rq[i]);
717 /* Disable NEWCLIENT promotion */
718 __i915_active_request_set(&rq[i]->timeline->last_request,
720 i915_request_add(rq[i]);
723 dummy_request_free(dummy);
725 GEM_BUG_ON(i915_request_completed(rq[0]));
726 if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
727 pr_err("%s: First client failed to start\n",
731 GEM_BUG_ON(!i915_request_started(rq[0]));
733 if (i915_request_wait(rq[depth],
737 pr_err("%s: Waiter depth:%d completed!\n",
738 engine->name, depth);
742 for (i = 0; i < ARRAY_SIZE(client); i++)
743 igt_spinner_end(&client[i].spin);
745 if (igt_flush_test(i915, I915_WAIT_LOCKED))
748 if (engine->execlists.preempt_hang.count) {
749 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
751 engine->execlists.preempt_hang.count,
761 preempt_client_fini(&client[3]);
763 preempt_client_fini(&client[2]);
765 preempt_client_fini(&client[1]);
767 preempt_client_fini(&client[0]);
769 if (igt_flush_test(i915, I915_WAIT_LOCKED))
771 intel_runtime_pm_put(i915, wakeref);
772 mutex_unlock(&i915->drm.struct_mutex);
776 for (i = 0; i < ARRAY_SIZE(client); i++)
777 igt_spinner_end(&client[i].spin);
778 i915_gem_set_wedged(i915);
783 static int live_chain_preempt(void *arg)
785 struct drm_i915_private *i915 = arg;
786 struct intel_engine_cs *engine;
787 struct preempt_client hi, lo;
788 enum intel_engine_id id;
789 intel_wakeref_t wakeref;
793 * Build a chain AB...BA between two contexts (A, B) and request
794 * preemption of the last request. It should then complete before
795 * the previously submitted spinner in B.
798 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
801 mutex_lock(&i915->drm.struct_mutex);
802 wakeref = intel_runtime_pm_get(i915);
804 if (preempt_client_init(i915, &hi))
807 if (preempt_client_init(i915, &lo))
810 for_each_engine(engine, i915, id) {
811 struct i915_sched_attr attr = {
812 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
814 struct igt_live_test t;
815 struct i915_request *rq;
816 int ring_size, count, i;
818 if (!intel_engine_has_preemption(engine))
821 rq = igt_spinner_create_request(&lo.spin,
826 i915_request_add(rq);
828 ring_size = rq->wa_tail - rq->head;
830 ring_size += rq->ring->size;
831 ring_size = rq->ring->size / ring_size;
832 pr_debug("%s(%s): Using maximum of %d requests\n",
833 __func__, engine->name, ring_size);
835 igt_spinner_end(&lo.spin);
836 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
837 pr_err("Timed out waiting to flush %s\n", engine->name);
841 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
846 for_each_prime_number_from(count, 1, ring_size) {
847 rq = igt_spinner_create_request(&hi.spin,
852 i915_request_add(rq);
853 if (!igt_wait_for_spinner(&hi.spin, rq))
856 rq = igt_spinner_create_request(&lo.spin,
861 i915_request_add(rq);
863 for (i = 0; i < count; i++) {
864 rq = i915_request_alloc(engine, lo.ctx);
867 i915_request_add(rq);
870 rq = i915_request_alloc(engine, hi.ctx);
873 i915_request_add(rq);
874 engine->schedule(rq, &attr);
876 igt_spinner_end(&hi.spin);
877 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
878 struct drm_printer p =
879 drm_info_printer(i915->drm.dev);
881 pr_err("Failed to preempt over chain of %d\n",
883 intel_engine_dump(engine, &p,
884 "%s\n", engine->name);
887 igt_spinner_end(&lo.spin);
889 rq = i915_request_alloc(engine, lo.ctx);
892 i915_request_add(rq);
893 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
894 struct drm_printer p =
895 drm_info_printer(i915->drm.dev);
897 pr_err("Failed to flush low priority chain of %d requests\n",
899 intel_engine_dump(engine, &p,
900 "%s\n", engine->name);
905 if (igt_live_test_end(&t)) {
913 preempt_client_fini(&lo);
915 preempt_client_fini(&hi);
917 if (igt_flush_test(i915, I915_WAIT_LOCKED))
919 intel_runtime_pm_put(i915, wakeref);
920 mutex_unlock(&i915->drm.struct_mutex);
924 igt_spinner_end(&hi.spin);
925 igt_spinner_end(&lo.spin);
926 i915_gem_set_wedged(i915);
931 static int live_preempt_hang(void *arg)
933 struct drm_i915_private *i915 = arg;
934 struct i915_gem_context *ctx_hi, *ctx_lo;
935 struct igt_spinner spin_hi, spin_lo;
936 struct intel_engine_cs *engine;
937 enum intel_engine_id id;
938 intel_wakeref_t wakeref;
941 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
944 if (!intel_has_reset_engine(i915))
947 mutex_lock(&i915->drm.struct_mutex);
948 wakeref = intel_runtime_pm_get(i915);
950 if (igt_spinner_init(&spin_hi, i915))
953 if (igt_spinner_init(&spin_lo, i915))
956 ctx_hi = kernel_context(i915);
959 ctx_hi->sched.priority =
960 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
962 ctx_lo = kernel_context(i915);
965 ctx_lo->sched.priority =
966 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
968 for_each_engine(engine, i915, id) {
969 struct i915_request *rq;
971 if (!intel_engine_has_preemption(engine))
974 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
981 i915_request_add(rq);
982 if (!igt_wait_for_spinner(&spin_lo, rq)) {
983 GEM_TRACE("lo spinner failed to start\n");
985 i915_gem_set_wedged(i915);
990 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
993 igt_spinner_end(&spin_lo);
998 init_completion(&engine->execlists.preempt_hang.completion);
999 engine->execlists.preempt_hang.inject_hang = true;
1001 i915_request_add(rq);
1003 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1005 pr_err("Preemption did not occur within timeout!");
1007 i915_gem_set_wedged(i915);
1012 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1013 i915_reset_engine(engine, NULL);
1014 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1016 engine->execlists.preempt_hang.inject_hang = false;
1018 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1019 GEM_TRACE("hi spinner failed to start\n");
1021 i915_gem_set_wedged(i915);
1026 igt_spinner_end(&spin_hi);
1027 igt_spinner_end(&spin_lo);
1028 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1036 kernel_context_close(ctx_lo);
1038 kernel_context_close(ctx_hi);
1040 igt_spinner_fini(&spin_lo);
1042 igt_spinner_fini(&spin_hi);
1044 igt_flush_test(i915, I915_WAIT_LOCKED);
1045 intel_runtime_pm_put(i915, wakeref);
1046 mutex_unlock(&i915->drm.struct_mutex);
1050 static int random_range(struct rnd_state *rnd, int min, int max)
1052 return i915_prandom_u32_max_state(max - min, rnd) + min;
1055 static int random_priority(struct rnd_state *rnd)
1057 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1060 struct preempt_smoke {
1061 struct drm_i915_private *i915;
1062 struct i915_gem_context **contexts;
1063 struct intel_engine_cs *engine;
1064 struct drm_i915_gem_object *batch;
1065 unsigned int ncontext;
1066 struct rnd_state prng;
1067 unsigned long count;
1070 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1072 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1076 static int smoke_submit(struct preempt_smoke *smoke,
1077 struct i915_gem_context *ctx, int prio,
1078 struct drm_i915_gem_object *batch)
1080 struct i915_request *rq;
1081 struct i915_vma *vma = NULL;
1085 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
1087 return PTR_ERR(vma);
1089 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1094 ctx->sched.priority = prio;
1096 rq = i915_request_alloc(smoke->engine, ctx);
1103 err = rq->engine->emit_bb_start(rq,
1107 err = i915_vma_move_to_active(vma, rq, 0);
1110 i915_request_add(rq);
1114 i915_vma_unpin(vma);
1119 static int smoke_crescendo_thread(void *arg)
1121 struct preempt_smoke *smoke = arg;
1122 IGT_TIMEOUT(end_time);
1123 unsigned long count;
1127 struct i915_gem_context *ctx = smoke_context(smoke);
1130 mutex_lock(&smoke->i915->drm.struct_mutex);
1131 err = smoke_submit(smoke,
1132 ctx, count % I915_PRIORITY_MAX,
1134 mutex_unlock(&smoke->i915->drm.struct_mutex);
1139 } while (!__igt_timeout(end_time, NULL));
1141 smoke->count = count;
1145 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1146 #define BATCH BIT(0)
1148 struct task_struct *tsk[I915_NUM_ENGINES] = {};
1149 struct preempt_smoke arg[I915_NUM_ENGINES];
1150 struct intel_engine_cs *engine;
1151 enum intel_engine_id id;
1152 unsigned long count;
1155 mutex_unlock(&smoke->i915->drm.struct_mutex);
1157 for_each_engine(engine, smoke->i915, id) {
1159 arg[id].engine = engine;
1160 if (!(flags & BATCH))
1161 arg[id].batch = NULL;
1164 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1165 "igt/smoke:%d", id);
1166 if (IS_ERR(tsk[id])) {
1167 err = PTR_ERR(tsk[id]);
1170 get_task_struct(tsk[id]);
1174 for_each_engine(engine, smoke->i915, id) {
1177 if (IS_ERR_OR_NULL(tsk[id]))
1180 status = kthread_stop(tsk[id]);
1184 count += arg[id].count;
1186 put_task_struct(tsk[id]);
1189 mutex_lock(&smoke->i915->drm.struct_mutex);
1191 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1193 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1197 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1199 enum intel_engine_id id;
1200 IGT_TIMEOUT(end_time);
1201 unsigned long count;
1205 for_each_engine(smoke->engine, smoke->i915, id) {
1206 struct i915_gem_context *ctx = smoke_context(smoke);
1209 err = smoke_submit(smoke,
1210 ctx, random_priority(&smoke->prng),
1211 flags & BATCH ? smoke->batch : NULL);
1217 } while (!__igt_timeout(end_time, NULL));
1219 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1221 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1225 static int live_preempt_smoke(void *arg)
1227 struct preempt_smoke smoke = {
1229 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1232 const unsigned int phase[] = { 0, BATCH };
1233 intel_wakeref_t wakeref;
1234 struct igt_live_test t;
1239 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1242 smoke.contexts = kmalloc_array(smoke.ncontext,
1243 sizeof(*smoke.contexts),
1245 if (!smoke.contexts)
1248 mutex_lock(&smoke.i915->drm.struct_mutex);
1249 wakeref = intel_runtime_pm_get(smoke.i915);
1251 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1252 if (IS_ERR(smoke.batch)) {
1253 err = PTR_ERR(smoke.batch);
1257 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1262 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1263 cs[n] = MI_ARB_CHECK;
1264 cs[n] = MI_BATCH_BUFFER_END;
1265 i915_gem_object_flush_map(smoke.batch);
1266 i915_gem_object_unpin_map(smoke.batch);
1268 if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1273 for (n = 0; n < smoke.ncontext; n++) {
1274 smoke.contexts[n] = kernel_context(smoke.i915);
1275 if (!smoke.contexts[n])
1279 for (n = 0; n < ARRAY_SIZE(phase); n++) {
1280 err = smoke_crescendo(&smoke, phase[n]);
1284 err = smoke_random(&smoke, phase[n]);
1290 if (igt_live_test_end(&t))
1293 for (n = 0; n < smoke.ncontext; n++) {
1294 if (!smoke.contexts[n])
1296 kernel_context_close(smoke.contexts[n]);
1300 i915_gem_object_put(smoke.batch);
1302 intel_runtime_pm_put(smoke.i915, wakeref);
1303 mutex_unlock(&smoke.i915->drm.struct_mutex);
1304 kfree(smoke.contexts);
1309 int intel_execlists_live_selftests(struct drm_i915_private *i915)
1311 static const struct i915_subtest tests[] = {
1312 SUBTEST(live_sanitycheck),
1313 SUBTEST(live_busywait_preempt),
1314 SUBTEST(live_preempt),
1315 SUBTEST(live_late_preempt),
1316 SUBTEST(live_suppress_self_preempt),
1317 SUBTEST(live_suppress_wait_preempt),
1318 SUBTEST(live_chain_preempt),
1319 SUBTEST(live_preempt_hang),
1320 SUBTEST(live_preempt_smoke),
1323 if (!HAS_EXECLISTS(i915))
1326 if (i915_terminally_wedged(i915))
1329 return i915_subtests(tests, i915);