2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "gt/intel_reset.h"
10 #include "i915_selftest.h"
11 #include "selftests/i915_random.h"
12 #include "selftests/igt_flush_test.h"
13 #include "selftests/igt_gem_utils.h"
14 #include "selftests/igt_live_test.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_context.h"
18 static int live_sanitycheck(void *arg)
20 struct drm_i915_private *i915 = arg;
21 struct intel_engine_cs *engine;
22 struct i915_gem_context *ctx;
23 enum intel_engine_id id;
24 struct igt_spinner spin;
25 intel_wakeref_t wakeref;
28 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
31 mutex_lock(&i915->drm.struct_mutex);
32 wakeref = intel_runtime_pm_get(i915);
34 if (igt_spinner_init(&spin, i915))
37 ctx = kernel_context(i915);
41 for_each_engine(engine, i915, id) {
42 struct i915_request *rq;
44 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
51 if (!igt_wait_for_spinner(&spin, rq)) {
52 GEM_TRACE("spinner failed to start\n");
54 i915_gem_set_wedged(i915);
59 igt_spinner_end(&spin);
60 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
68 kernel_context_close(ctx);
70 igt_spinner_fini(&spin);
72 igt_flush_test(i915, I915_WAIT_LOCKED);
73 intel_runtime_pm_put(i915, wakeref);
74 mutex_unlock(&i915->drm.struct_mutex);
78 static int live_busywait_preempt(void *arg)
80 struct drm_i915_private *i915 = arg;
81 struct i915_gem_context *ctx_hi, *ctx_lo;
82 struct intel_engine_cs *engine;
83 struct drm_i915_gem_object *obj;
85 enum intel_engine_id id;
86 intel_wakeref_t wakeref;
91 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
92 * preempt the busywaits used to synchronise between rings.
95 mutex_lock(&i915->drm.struct_mutex);
96 wakeref = intel_runtime_pm_get(i915);
98 ctx_hi = kernel_context(i915);
101 ctx_hi->sched.priority =
102 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
104 ctx_lo = kernel_context(i915);
107 ctx_lo->sched.priority =
108 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
110 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
116 map = i915_gem_object_pin_map(obj, I915_MAP_WC);
122 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
128 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
132 for_each_engine(engine, i915, id) {
133 struct i915_request *lo, *hi;
134 struct igt_live_test t;
137 if (!intel_engine_can_store_dword(engine))
140 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
146 * We create two requests. The low priority request
147 * busywaits on a semaphore (inside the ringbuffer where
148 * is should be preemptible) and the high priority requests
149 * uses a MI_STORE_DWORD_IMM to update the semaphore value
150 * allowing the first request to complete. If preemption
151 * fails, we hang instead.
154 lo = igt_request_alloc(ctx_lo, engine);
160 cs = intel_ring_begin(lo, 8);
163 i915_request_add(lo);
167 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
168 *cs++ = i915_ggtt_offset(vma);
172 /* XXX Do we need a flush + invalidate here? */
174 *cs++ = MI_SEMAPHORE_WAIT |
175 MI_SEMAPHORE_GLOBAL_GTT |
177 MI_SEMAPHORE_SAD_EQ_SDD;
179 *cs++ = i915_ggtt_offset(vma);
182 intel_ring_advance(lo, cs);
183 i915_request_add(lo);
185 if (wait_for(READ_ONCE(*map), 10)) {
190 /* Low priority request should be busywaiting now */
191 if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
192 pr_err("%s: Busywaiting request did not!\n",
198 hi = igt_request_alloc(ctx_hi, engine);
204 cs = intel_ring_begin(hi, 4);
207 i915_request_add(hi);
211 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
212 *cs++ = i915_ggtt_offset(vma);
216 intel_ring_advance(hi, cs);
217 i915_request_add(hi);
219 if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
220 struct drm_printer p = drm_info_printer(i915->drm.dev);
222 pr_err("%s: Failed to preempt semaphore busywait!\n",
225 intel_engine_dump(engine, &p, "%s\n", engine->name);
228 i915_gem_set_wedged(i915);
232 GEM_BUG_ON(READ_ONCE(*map));
234 if (igt_live_test_end(&t)) {
244 i915_gem_object_unpin_map(obj);
246 i915_gem_object_put(obj);
248 kernel_context_close(ctx_lo);
250 kernel_context_close(ctx_hi);
252 if (igt_flush_test(i915, I915_WAIT_LOCKED))
254 intel_runtime_pm_put(i915, wakeref);
255 mutex_unlock(&i915->drm.struct_mutex);
259 static int live_preempt(void *arg)
261 struct drm_i915_private *i915 = arg;
262 struct i915_gem_context *ctx_hi, *ctx_lo;
263 struct igt_spinner spin_hi, spin_lo;
264 struct intel_engine_cs *engine;
265 enum intel_engine_id id;
266 intel_wakeref_t wakeref;
269 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
272 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
273 pr_err("Logical preemption supported, but not exposed\n");
275 mutex_lock(&i915->drm.struct_mutex);
276 wakeref = intel_runtime_pm_get(i915);
278 if (igt_spinner_init(&spin_hi, i915))
281 if (igt_spinner_init(&spin_lo, i915))
284 ctx_hi = kernel_context(i915);
287 ctx_hi->sched.priority =
288 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
290 ctx_lo = kernel_context(i915);
293 ctx_lo->sched.priority =
294 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
296 for_each_engine(engine, i915, id) {
297 struct igt_live_test t;
298 struct i915_request *rq;
300 if (!intel_engine_has_preemption(engine))
303 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
308 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
315 i915_request_add(rq);
316 if (!igt_wait_for_spinner(&spin_lo, rq)) {
317 GEM_TRACE("lo spinner failed to start\n");
319 i915_gem_set_wedged(i915);
324 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
327 igt_spinner_end(&spin_lo);
332 i915_request_add(rq);
333 if (!igt_wait_for_spinner(&spin_hi, rq)) {
334 GEM_TRACE("hi spinner failed to start\n");
336 i915_gem_set_wedged(i915);
341 igt_spinner_end(&spin_hi);
342 igt_spinner_end(&spin_lo);
344 if (igt_live_test_end(&t)) {
352 kernel_context_close(ctx_lo);
354 kernel_context_close(ctx_hi);
356 igt_spinner_fini(&spin_lo);
358 igt_spinner_fini(&spin_hi);
360 igt_flush_test(i915, I915_WAIT_LOCKED);
361 intel_runtime_pm_put(i915, wakeref);
362 mutex_unlock(&i915->drm.struct_mutex);
366 static int live_late_preempt(void *arg)
368 struct drm_i915_private *i915 = arg;
369 struct i915_gem_context *ctx_hi, *ctx_lo;
370 struct igt_spinner spin_hi, spin_lo;
371 struct intel_engine_cs *engine;
372 struct i915_sched_attr attr = {};
373 enum intel_engine_id id;
374 intel_wakeref_t wakeref;
377 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
380 mutex_lock(&i915->drm.struct_mutex);
381 wakeref = intel_runtime_pm_get(i915);
383 if (igt_spinner_init(&spin_hi, i915))
386 if (igt_spinner_init(&spin_lo, i915))
389 ctx_hi = kernel_context(i915);
393 ctx_lo = kernel_context(i915);
397 for_each_engine(engine, i915, id) {
398 struct igt_live_test t;
399 struct i915_request *rq;
401 if (!intel_engine_has_preemption(engine))
404 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
409 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
416 i915_request_add(rq);
417 if (!igt_wait_for_spinner(&spin_lo, rq)) {
418 pr_err("First context failed to start\n");
422 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
425 igt_spinner_end(&spin_lo);
430 i915_request_add(rq);
431 if (igt_wait_for_spinner(&spin_hi, rq)) {
432 pr_err("Second context overtook first?\n");
436 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
437 engine->schedule(rq, &attr);
439 if (!igt_wait_for_spinner(&spin_hi, rq)) {
440 pr_err("High priority context failed to preempt the low priority context\n");
445 igt_spinner_end(&spin_hi);
446 igt_spinner_end(&spin_lo);
448 if (igt_live_test_end(&t)) {
456 kernel_context_close(ctx_lo);
458 kernel_context_close(ctx_hi);
460 igt_spinner_fini(&spin_lo);
462 igt_spinner_fini(&spin_hi);
464 igt_flush_test(i915, I915_WAIT_LOCKED);
465 intel_runtime_pm_put(i915, wakeref);
466 mutex_unlock(&i915->drm.struct_mutex);
470 igt_spinner_end(&spin_hi);
471 igt_spinner_end(&spin_lo);
472 i915_gem_set_wedged(i915);
477 struct preempt_client {
478 struct igt_spinner spin;
479 struct i915_gem_context *ctx;
482 static int preempt_client_init(struct drm_i915_private *i915,
483 struct preempt_client *c)
485 c->ctx = kernel_context(i915);
489 if (igt_spinner_init(&c->spin, i915))
495 kernel_context_close(c->ctx);
499 static void preempt_client_fini(struct preempt_client *c)
501 igt_spinner_fini(&c->spin);
502 kernel_context_close(c->ctx);
505 static int live_suppress_self_preempt(void *arg)
507 struct drm_i915_private *i915 = arg;
508 struct intel_engine_cs *engine;
509 struct i915_sched_attr attr = {
510 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
512 struct preempt_client a, b;
513 enum intel_engine_id id;
514 intel_wakeref_t wakeref;
518 * Verify that if a preemption request does not cause a change in
519 * the current execution order, the preempt-to-idle injection is
520 * skipped and that we do not accidentally apply it after the CS
524 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
527 if (USES_GUC_SUBMISSION(i915))
528 return 0; /* presume black blox */
530 mutex_lock(&i915->drm.struct_mutex);
531 wakeref = intel_runtime_pm_get(i915);
533 if (preempt_client_init(i915, &a))
535 if (preempt_client_init(i915, &b))
538 for_each_engine(engine, i915, id) {
539 struct i915_request *rq_a, *rq_b;
542 if (!intel_engine_has_preemption(engine))
545 engine->execlists.preempt_hang.count = 0;
547 rq_a = igt_spinner_create_request(&a.spin,
555 i915_request_add(rq_a);
556 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
557 pr_err("First client failed to start\n");
561 for (depth = 0; depth < 8; depth++) {
562 rq_b = igt_spinner_create_request(&b.spin,
569 i915_request_add(rq_b);
571 GEM_BUG_ON(i915_request_completed(rq_a));
572 engine->schedule(rq_a, &attr);
573 igt_spinner_end(&a.spin);
575 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
576 pr_err("Second client failed to start\n");
583 igt_spinner_end(&a.spin);
585 if (engine->execlists.preempt_hang.count) {
586 pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
587 engine->execlists.preempt_hang.count,
593 if (igt_flush_test(i915, I915_WAIT_LOCKED))
599 preempt_client_fini(&b);
601 preempt_client_fini(&a);
603 if (igt_flush_test(i915, I915_WAIT_LOCKED))
605 intel_runtime_pm_put(i915, wakeref);
606 mutex_unlock(&i915->drm.struct_mutex);
610 igt_spinner_end(&b.spin);
611 igt_spinner_end(&a.spin);
612 i915_gem_set_wedged(i915);
617 static int __i915_sw_fence_call
618 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
623 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
625 struct i915_request *rq;
627 rq = kzalloc(sizeof(*rq), GFP_KERNEL);
631 INIT_LIST_HEAD(&rq->active_list);
634 i915_sched_node_init(&rq->sched);
636 /* mark this request as permanently incomplete */
638 BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
639 rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
640 GEM_BUG_ON(i915_request_completed(rq));
642 i915_sw_fence_init(&rq->submit, dummy_notify);
643 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
648 static void dummy_request_free(struct i915_request *dummy)
650 /* We have to fake the CS interrupt to kick the next request */
651 i915_sw_fence_commit(&dummy->submit);
653 i915_request_mark_complete(dummy);
654 dma_fence_signal(&dummy->fence);
656 i915_sched_node_fini(&dummy->sched);
657 i915_sw_fence_fini(&dummy->submit);
659 dma_fence_free(&dummy->fence);
662 static int live_suppress_wait_preempt(void *arg)
664 struct drm_i915_private *i915 = arg;
665 struct preempt_client client[4];
666 struct intel_engine_cs *engine;
667 enum intel_engine_id id;
668 intel_wakeref_t wakeref;
673 * Waiters are given a little priority nudge, but not enough
674 * to actually cause any preemption. Double check that we do
675 * not needlessly generate preempt-to-idle cycles.
678 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
681 mutex_lock(&i915->drm.struct_mutex);
682 wakeref = intel_runtime_pm_get(i915);
684 if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
686 if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
688 if (preempt_client_init(i915, &client[2])) /* head of queue */
690 if (preempt_client_init(i915, &client[3])) /* bystander */
693 for_each_engine(engine, i915, id) {
696 if (!intel_engine_has_preemption(engine))
699 if (!engine->emit_init_breadcrumb)
702 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
703 struct i915_request *rq[ARRAY_SIZE(client)];
704 struct i915_request *dummy;
706 engine->execlists.preempt_hang.count = 0;
708 dummy = dummy_request(engine);
712 for (i = 0; i < ARRAY_SIZE(client); i++) {
713 rq[i] = igt_spinner_create_request(&client[i].spin,
714 client[i].ctx, engine,
717 err = PTR_ERR(rq[i]);
721 /* Disable NEWCLIENT promotion */
722 __i915_active_request_set(&rq[i]->timeline->last_request,
724 i915_request_add(rq[i]);
727 dummy_request_free(dummy);
729 GEM_BUG_ON(i915_request_completed(rq[0]));
730 if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
731 pr_err("%s: First client failed to start\n",
735 GEM_BUG_ON(!i915_request_started(rq[0]));
737 if (i915_request_wait(rq[depth],
741 pr_err("%s: Waiter depth:%d completed!\n",
742 engine->name, depth);
746 for (i = 0; i < ARRAY_SIZE(client); i++)
747 igt_spinner_end(&client[i].spin);
749 if (igt_flush_test(i915, I915_WAIT_LOCKED))
752 if (engine->execlists.preempt_hang.count) {
753 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
755 engine->execlists.preempt_hang.count,
765 preempt_client_fini(&client[3]);
767 preempt_client_fini(&client[2]);
769 preempt_client_fini(&client[1]);
771 preempt_client_fini(&client[0]);
773 if (igt_flush_test(i915, I915_WAIT_LOCKED))
775 intel_runtime_pm_put(i915, wakeref);
776 mutex_unlock(&i915->drm.struct_mutex);
780 for (i = 0; i < ARRAY_SIZE(client); i++)
781 igt_spinner_end(&client[i].spin);
782 i915_gem_set_wedged(i915);
787 static int live_chain_preempt(void *arg)
789 struct drm_i915_private *i915 = arg;
790 struct intel_engine_cs *engine;
791 struct preempt_client hi, lo;
792 enum intel_engine_id id;
793 intel_wakeref_t wakeref;
797 * Build a chain AB...BA between two contexts (A, B) and request
798 * preemption of the last request. It should then complete before
799 * the previously submitted spinner in B.
802 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
805 mutex_lock(&i915->drm.struct_mutex);
806 wakeref = intel_runtime_pm_get(i915);
808 if (preempt_client_init(i915, &hi))
811 if (preempt_client_init(i915, &lo))
814 for_each_engine(engine, i915, id) {
815 struct i915_sched_attr attr = {
816 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
818 struct igt_live_test t;
819 struct i915_request *rq;
820 int ring_size, count, i;
822 if (!intel_engine_has_preemption(engine))
825 rq = igt_spinner_create_request(&lo.spin,
830 i915_request_add(rq);
832 ring_size = rq->wa_tail - rq->head;
834 ring_size += rq->ring->size;
835 ring_size = rq->ring->size / ring_size;
836 pr_debug("%s(%s): Using maximum of %d requests\n",
837 __func__, engine->name, ring_size);
839 igt_spinner_end(&lo.spin);
840 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
841 pr_err("Timed out waiting to flush %s\n", engine->name);
845 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
850 for_each_prime_number_from(count, 1, ring_size) {
851 rq = igt_spinner_create_request(&hi.spin,
856 i915_request_add(rq);
857 if (!igt_wait_for_spinner(&hi.spin, rq))
860 rq = igt_spinner_create_request(&lo.spin,
865 i915_request_add(rq);
867 for (i = 0; i < count; i++) {
868 rq = igt_request_alloc(lo.ctx, engine);
871 i915_request_add(rq);
874 rq = igt_request_alloc(hi.ctx, engine);
877 i915_request_add(rq);
878 engine->schedule(rq, &attr);
880 igt_spinner_end(&hi.spin);
881 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
882 struct drm_printer p =
883 drm_info_printer(i915->drm.dev);
885 pr_err("Failed to preempt over chain of %d\n",
887 intel_engine_dump(engine, &p,
888 "%s\n", engine->name);
891 igt_spinner_end(&lo.spin);
893 rq = igt_request_alloc(lo.ctx, engine);
896 i915_request_add(rq);
897 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
898 struct drm_printer p =
899 drm_info_printer(i915->drm.dev);
901 pr_err("Failed to flush low priority chain of %d requests\n",
903 intel_engine_dump(engine, &p,
904 "%s\n", engine->name);
909 if (igt_live_test_end(&t)) {
917 preempt_client_fini(&lo);
919 preempt_client_fini(&hi);
921 if (igt_flush_test(i915, I915_WAIT_LOCKED))
923 intel_runtime_pm_put(i915, wakeref);
924 mutex_unlock(&i915->drm.struct_mutex);
928 igt_spinner_end(&hi.spin);
929 igt_spinner_end(&lo.spin);
930 i915_gem_set_wedged(i915);
935 static int live_preempt_hang(void *arg)
937 struct drm_i915_private *i915 = arg;
938 struct i915_gem_context *ctx_hi, *ctx_lo;
939 struct igt_spinner spin_hi, spin_lo;
940 struct intel_engine_cs *engine;
941 enum intel_engine_id id;
942 intel_wakeref_t wakeref;
945 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
948 if (!intel_has_reset_engine(i915))
951 mutex_lock(&i915->drm.struct_mutex);
952 wakeref = intel_runtime_pm_get(i915);
954 if (igt_spinner_init(&spin_hi, i915))
957 if (igt_spinner_init(&spin_lo, i915))
960 ctx_hi = kernel_context(i915);
963 ctx_hi->sched.priority =
964 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
966 ctx_lo = kernel_context(i915);
969 ctx_lo->sched.priority =
970 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
972 for_each_engine(engine, i915, id) {
973 struct i915_request *rq;
975 if (!intel_engine_has_preemption(engine))
978 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
985 i915_request_add(rq);
986 if (!igt_wait_for_spinner(&spin_lo, rq)) {
987 GEM_TRACE("lo spinner failed to start\n");
989 i915_gem_set_wedged(i915);
994 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
997 igt_spinner_end(&spin_lo);
1002 init_completion(&engine->execlists.preempt_hang.completion);
1003 engine->execlists.preempt_hang.inject_hang = true;
1005 i915_request_add(rq);
1007 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1009 pr_err("Preemption did not occur within timeout!");
1011 i915_gem_set_wedged(i915);
1016 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1017 i915_reset_engine(engine, NULL);
1018 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1020 engine->execlists.preempt_hang.inject_hang = false;
1022 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1023 GEM_TRACE("hi spinner failed to start\n");
1025 i915_gem_set_wedged(i915);
1030 igt_spinner_end(&spin_hi);
1031 igt_spinner_end(&spin_lo);
1032 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1040 kernel_context_close(ctx_lo);
1042 kernel_context_close(ctx_hi);
1044 igt_spinner_fini(&spin_lo);
1046 igt_spinner_fini(&spin_hi);
1048 igt_flush_test(i915, I915_WAIT_LOCKED);
1049 intel_runtime_pm_put(i915, wakeref);
1050 mutex_unlock(&i915->drm.struct_mutex);
1054 static int random_range(struct rnd_state *rnd, int min, int max)
1056 return i915_prandom_u32_max_state(max - min, rnd) + min;
1059 static int random_priority(struct rnd_state *rnd)
1061 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1064 struct preempt_smoke {
1065 struct drm_i915_private *i915;
1066 struct i915_gem_context **contexts;
1067 struct intel_engine_cs *engine;
1068 struct drm_i915_gem_object *batch;
1069 unsigned int ncontext;
1070 struct rnd_state prng;
1071 unsigned long count;
1074 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1076 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1080 static int smoke_submit(struct preempt_smoke *smoke,
1081 struct i915_gem_context *ctx, int prio,
1082 struct drm_i915_gem_object *batch)
1084 struct i915_request *rq;
1085 struct i915_vma *vma = NULL;
1089 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
1091 return PTR_ERR(vma);
1093 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1098 ctx->sched.priority = prio;
1100 rq = igt_request_alloc(ctx, smoke->engine);
1107 err = rq->engine->emit_bb_start(rq,
1111 err = i915_vma_move_to_active(vma, rq, 0);
1114 i915_request_add(rq);
1118 i915_vma_unpin(vma);
1123 static int smoke_crescendo_thread(void *arg)
1125 struct preempt_smoke *smoke = arg;
1126 IGT_TIMEOUT(end_time);
1127 unsigned long count;
1131 struct i915_gem_context *ctx = smoke_context(smoke);
1134 mutex_lock(&smoke->i915->drm.struct_mutex);
1135 err = smoke_submit(smoke,
1136 ctx, count % I915_PRIORITY_MAX,
1138 mutex_unlock(&smoke->i915->drm.struct_mutex);
1143 } while (!__igt_timeout(end_time, NULL));
1145 smoke->count = count;
1149 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1150 #define BATCH BIT(0)
1152 struct task_struct *tsk[I915_NUM_ENGINES] = {};
1153 struct preempt_smoke arg[I915_NUM_ENGINES];
1154 struct intel_engine_cs *engine;
1155 enum intel_engine_id id;
1156 unsigned long count;
1159 mutex_unlock(&smoke->i915->drm.struct_mutex);
1161 for_each_engine(engine, smoke->i915, id) {
1163 arg[id].engine = engine;
1164 if (!(flags & BATCH))
1165 arg[id].batch = NULL;
1168 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1169 "igt/smoke:%d", id);
1170 if (IS_ERR(tsk[id])) {
1171 err = PTR_ERR(tsk[id]);
1174 get_task_struct(tsk[id]);
1178 for_each_engine(engine, smoke->i915, id) {
1181 if (IS_ERR_OR_NULL(tsk[id]))
1184 status = kthread_stop(tsk[id]);
1188 count += arg[id].count;
1190 put_task_struct(tsk[id]);
1193 mutex_lock(&smoke->i915->drm.struct_mutex);
1195 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1197 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1201 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1203 enum intel_engine_id id;
1204 IGT_TIMEOUT(end_time);
1205 unsigned long count;
1209 for_each_engine(smoke->engine, smoke->i915, id) {
1210 struct i915_gem_context *ctx = smoke_context(smoke);
1213 err = smoke_submit(smoke,
1214 ctx, random_priority(&smoke->prng),
1215 flags & BATCH ? smoke->batch : NULL);
1221 } while (!__igt_timeout(end_time, NULL));
1223 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1225 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1229 static int live_preempt_smoke(void *arg)
1231 struct preempt_smoke smoke = {
1233 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1236 const unsigned int phase[] = { 0, BATCH };
1237 intel_wakeref_t wakeref;
1238 struct igt_live_test t;
1243 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1246 smoke.contexts = kmalloc_array(smoke.ncontext,
1247 sizeof(*smoke.contexts),
1249 if (!smoke.contexts)
1252 mutex_lock(&smoke.i915->drm.struct_mutex);
1253 wakeref = intel_runtime_pm_get(smoke.i915);
1255 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1256 if (IS_ERR(smoke.batch)) {
1257 err = PTR_ERR(smoke.batch);
1261 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1266 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1267 cs[n] = MI_ARB_CHECK;
1268 cs[n] = MI_BATCH_BUFFER_END;
1269 i915_gem_object_flush_map(smoke.batch);
1270 i915_gem_object_unpin_map(smoke.batch);
1272 if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1277 for (n = 0; n < smoke.ncontext; n++) {
1278 smoke.contexts[n] = kernel_context(smoke.i915);
1279 if (!smoke.contexts[n])
1283 for (n = 0; n < ARRAY_SIZE(phase); n++) {
1284 err = smoke_crescendo(&smoke, phase[n]);
1288 err = smoke_random(&smoke, phase[n]);
1294 if (igt_live_test_end(&t))
1297 for (n = 0; n < smoke.ncontext; n++) {
1298 if (!smoke.contexts[n])
1300 kernel_context_close(smoke.contexts[n]);
1304 i915_gem_object_put(smoke.batch);
1306 intel_runtime_pm_put(smoke.i915, wakeref);
1307 mutex_unlock(&smoke.i915->drm.struct_mutex);
1308 kfree(smoke.contexts);
1313 int intel_execlists_live_selftests(struct drm_i915_private *i915)
1315 static const struct i915_subtest tests[] = {
1316 SUBTEST(live_sanitycheck),
1317 SUBTEST(live_busywait_preempt),
1318 SUBTEST(live_preempt),
1319 SUBTEST(live_late_preempt),
1320 SUBTEST(live_suppress_self_preempt),
1321 SUBTEST(live_suppress_wait_preempt),
1322 SUBTEST(live_chain_preempt),
1323 SUBTEST(live_preempt_hang),
1324 SUBTEST(live_preempt_smoke),
1327 if (!HAS_EXECLISTS(i915))
1330 if (i915_terminally_wedged(i915))
1333 return i915_subtests(tests, i915);