2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "../i915_reset.h"
11 #include "../i915_selftest.h"
12 #include "igt_flush_test.h"
13 #include "igt_live_test.h"
14 #include "igt_spinner.h"
15 #include "i915_random.h"
17 #include "mock_context.h"
19 static int live_sanitycheck(void *arg)
21 struct drm_i915_private *i915 = arg;
22 struct intel_engine_cs *engine;
23 struct i915_gem_context *ctx;
24 enum intel_engine_id id;
25 struct igt_spinner spin;
26 intel_wakeref_t wakeref;
29 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
32 mutex_lock(&i915->drm.struct_mutex);
33 wakeref = intel_runtime_pm_get(i915);
35 if (igt_spinner_init(&spin, i915))
38 ctx = kernel_context(i915);
42 for_each_engine(engine, i915, id) {
43 struct i915_request *rq;
45 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
52 if (!igt_wait_for_spinner(&spin, rq)) {
53 GEM_TRACE("spinner failed to start\n");
55 i915_gem_set_wedged(i915);
60 igt_spinner_end(&spin);
61 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
69 kernel_context_close(ctx);
71 igt_spinner_fini(&spin);
73 igt_flush_test(i915, I915_WAIT_LOCKED);
74 intel_runtime_pm_put(i915, wakeref);
75 mutex_unlock(&i915->drm.struct_mutex);
79 static int live_busywait_preempt(void *arg)
81 struct drm_i915_private *i915 = arg;
82 struct i915_gem_context *ctx_hi, *ctx_lo;
83 struct intel_engine_cs *engine;
84 struct drm_i915_gem_object *obj;
86 enum intel_engine_id id;
87 intel_wakeref_t wakeref;
92 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
93 * preempt the busywaits used to synchronise between rings.
96 mutex_lock(&i915->drm.struct_mutex);
97 wakeref = intel_runtime_pm_get(i915);
99 ctx_hi = kernel_context(i915);
102 ctx_hi->sched.priority = INT_MAX;
104 ctx_lo = kernel_context(i915);
107 ctx_lo->sched.priority = INT_MIN;
109 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
115 map = i915_gem_object_pin_map(obj, I915_MAP_WC);
121 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
127 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
131 for_each_engine(engine, i915, id) {
132 struct i915_request *lo, *hi;
133 struct igt_live_test t;
136 if (!intel_engine_can_store_dword(engine))
139 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
145 * We create two requests. The low priority request
146 * busywaits on a semaphore (inside the ringbuffer where
147 * is should be preemptible) and the high priority requests
148 * uses a MI_STORE_DWORD_IMM to update the semaphore value
149 * allowing the first request to complete. If preemption
150 * fails, we hang instead.
153 lo = i915_request_alloc(engine, ctx_lo);
159 cs = intel_ring_begin(lo, 8);
162 i915_request_add(lo);
166 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
167 *cs++ = i915_ggtt_offset(vma);
171 /* XXX Do we need a flush + invalidate here? */
173 *cs++ = MI_SEMAPHORE_WAIT |
174 MI_SEMAPHORE_GLOBAL_GTT |
176 MI_SEMAPHORE_SAD_EQ_SDD;
178 *cs++ = i915_ggtt_offset(vma);
181 intel_ring_advance(lo, cs);
182 i915_request_add(lo);
184 if (wait_for(READ_ONCE(*map), 10)) {
189 /* Low priority request should be busywaiting now */
190 if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
191 pr_err("%s: Busywaiting request did not!\n",
197 hi = i915_request_alloc(engine, ctx_hi);
203 cs = intel_ring_begin(hi, 4);
206 i915_request_add(hi);
210 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
211 *cs++ = i915_ggtt_offset(vma);
215 intel_ring_advance(hi, cs);
216 i915_request_add(hi);
218 if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
219 struct drm_printer p = drm_info_printer(i915->drm.dev);
221 pr_err("%s: Failed to preempt semaphore busywait!\n",
224 intel_engine_dump(engine, &p, "%s\n", engine->name);
227 i915_gem_set_wedged(i915);
231 GEM_BUG_ON(READ_ONCE(*map));
233 if (igt_live_test_end(&t)) {
243 i915_gem_object_unpin_map(obj);
245 i915_gem_object_put(obj);
247 kernel_context_close(ctx_lo);
249 kernel_context_close(ctx_hi);
251 if (igt_flush_test(i915, I915_WAIT_LOCKED))
253 intel_runtime_pm_put(i915, wakeref);
254 mutex_unlock(&i915->drm.struct_mutex);
258 static int live_preempt(void *arg)
260 struct drm_i915_private *i915 = arg;
261 struct i915_gem_context *ctx_hi, *ctx_lo;
262 struct igt_spinner spin_hi, spin_lo;
263 struct intel_engine_cs *engine;
264 enum intel_engine_id id;
265 intel_wakeref_t wakeref;
268 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
271 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
272 pr_err("Logical preemption supported, but not exposed\n");
274 mutex_lock(&i915->drm.struct_mutex);
275 wakeref = intel_runtime_pm_get(i915);
277 if (igt_spinner_init(&spin_hi, i915))
280 if (igt_spinner_init(&spin_lo, i915))
283 ctx_hi = kernel_context(i915);
286 ctx_hi->sched.priority =
287 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
289 ctx_lo = kernel_context(i915);
292 ctx_lo->sched.priority =
293 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
295 for_each_engine(engine, i915, id) {
296 struct igt_live_test t;
297 struct i915_request *rq;
299 if (!intel_engine_has_preemption(engine))
302 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
307 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
314 i915_request_add(rq);
315 if (!igt_wait_for_spinner(&spin_lo, rq)) {
316 GEM_TRACE("lo spinner failed to start\n");
318 i915_gem_set_wedged(i915);
323 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
326 igt_spinner_end(&spin_lo);
331 i915_request_add(rq);
332 if (!igt_wait_for_spinner(&spin_hi, rq)) {
333 GEM_TRACE("hi spinner failed to start\n");
335 i915_gem_set_wedged(i915);
340 igt_spinner_end(&spin_hi);
341 igt_spinner_end(&spin_lo);
343 if (igt_live_test_end(&t)) {
351 kernel_context_close(ctx_lo);
353 kernel_context_close(ctx_hi);
355 igt_spinner_fini(&spin_lo);
357 igt_spinner_fini(&spin_hi);
359 igt_flush_test(i915, I915_WAIT_LOCKED);
360 intel_runtime_pm_put(i915, wakeref);
361 mutex_unlock(&i915->drm.struct_mutex);
365 static int live_late_preempt(void *arg)
367 struct drm_i915_private *i915 = arg;
368 struct i915_gem_context *ctx_hi, *ctx_lo;
369 struct igt_spinner spin_hi, spin_lo;
370 struct intel_engine_cs *engine;
371 struct i915_sched_attr attr = {};
372 enum intel_engine_id id;
373 intel_wakeref_t wakeref;
376 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
379 mutex_lock(&i915->drm.struct_mutex);
380 wakeref = intel_runtime_pm_get(i915);
382 if (igt_spinner_init(&spin_hi, i915))
385 if (igt_spinner_init(&spin_lo, i915))
388 ctx_hi = kernel_context(i915);
392 ctx_lo = kernel_context(i915);
396 for_each_engine(engine, i915, id) {
397 struct igt_live_test t;
398 struct i915_request *rq;
400 if (!intel_engine_has_preemption(engine))
403 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
408 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
415 i915_request_add(rq);
416 if (!igt_wait_for_spinner(&spin_lo, rq)) {
417 pr_err("First context failed to start\n");
421 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
424 igt_spinner_end(&spin_lo);
429 i915_request_add(rq);
430 if (igt_wait_for_spinner(&spin_hi, rq)) {
431 pr_err("Second context overtook first?\n");
435 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
436 engine->schedule(rq, &attr);
438 if (!igt_wait_for_spinner(&spin_hi, rq)) {
439 pr_err("High priority context failed to preempt the low priority context\n");
444 igt_spinner_end(&spin_hi);
445 igt_spinner_end(&spin_lo);
447 if (igt_live_test_end(&t)) {
455 kernel_context_close(ctx_lo);
457 kernel_context_close(ctx_hi);
459 igt_spinner_fini(&spin_lo);
461 igt_spinner_fini(&spin_hi);
463 igt_flush_test(i915, I915_WAIT_LOCKED);
464 intel_runtime_pm_put(i915, wakeref);
465 mutex_unlock(&i915->drm.struct_mutex);
469 igt_spinner_end(&spin_hi);
470 igt_spinner_end(&spin_lo);
471 i915_gem_set_wedged(i915);
476 struct preempt_client {
477 struct igt_spinner spin;
478 struct i915_gem_context *ctx;
481 static int preempt_client_init(struct drm_i915_private *i915,
482 struct preempt_client *c)
484 c->ctx = kernel_context(i915);
488 if (igt_spinner_init(&c->spin, i915))
494 kernel_context_close(c->ctx);
498 static void preempt_client_fini(struct preempt_client *c)
500 igt_spinner_fini(&c->spin);
501 kernel_context_close(c->ctx);
504 static int live_suppress_self_preempt(void *arg)
506 struct drm_i915_private *i915 = arg;
507 struct intel_engine_cs *engine;
508 struct i915_sched_attr attr = {
509 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
511 struct preempt_client a, b;
512 enum intel_engine_id id;
513 intel_wakeref_t wakeref;
517 * Verify that if a preemption request does not cause a change in
518 * the current execution order, the preempt-to-idle injection is
519 * skipped and that we do not accidentally apply it after the CS
523 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
526 if (USES_GUC_SUBMISSION(i915))
527 return 0; /* presume black blox */
529 mutex_lock(&i915->drm.struct_mutex);
530 wakeref = intel_runtime_pm_get(i915);
532 if (preempt_client_init(i915, &a))
534 if (preempt_client_init(i915, &b))
537 for_each_engine(engine, i915, id) {
538 struct i915_request *rq_a, *rq_b;
541 if (!intel_engine_has_preemption(engine))
544 engine->execlists.preempt_hang.count = 0;
546 rq_a = igt_spinner_create_request(&a.spin,
554 i915_request_add(rq_a);
555 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
556 pr_err("First client failed to start\n");
560 for (depth = 0; depth < 8; depth++) {
561 rq_b = igt_spinner_create_request(&b.spin,
568 i915_request_add(rq_b);
570 GEM_BUG_ON(i915_request_completed(rq_a));
571 engine->schedule(rq_a, &attr);
572 igt_spinner_end(&a.spin);
574 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
575 pr_err("Second client failed to start\n");
582 igt_spinner_end(&a.spin);
584 if (engine->execlists.preempt_hang.count) {
585 pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
586 engine->execlists.preempt_hang.count,
592 if (igt_flush_test(i915, I915_WAIT_LOCKED))
598 preempt_client_fini(&b);
600 preempt_client_fini(&a);
602 if (igt_flush_test(i915, I915_WAIT_LOCKED))
604 intel_runtime_pm_put(i915, wakeref);
605 mutex_unlock(&i915->drm.struct_mutex);
609 igt_spinner_end(&b.spin);
610 igt_spinner_end(&a.spin);
611 i915_gem_set_wedged(i915);
616 static int __i915_sw_fence_call
617 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
622 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
624 struct i915_request *rq;
626 rq = kzalloc(sizeof(*rq), GFP_KERNEL);
630 INIT_LIST_HEAD(&rq->active_list);
633 i915_sched_node_init(&rq->sched);
635 /* mark this request as permanently incomplete */
637 BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
638 rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
639 GEM_BUG_ON(i915_request_completed(rq));
641 i915_sw_fence_init(&rq->submit, dummy_notify);
642 i915_sw_fence_commit(&rq->submit);
647 static void dummy_request_free(struct i915_request *dummy)
649 i915_request_mark_complete(dummy);
650 i915_sched_node_fini(&dummy->sched);
651 i915_sw_fence_fini(&dummy->submit);
653 dma_fence_free(&dummy->fence);
656 static int live_suppress_wait_preempt(void *arg)
658 struct drm_i915_private *i915 = arg;
659 struct preempt_client client[4];
660 struct intel_engine_cs *engine;
661 enum intel_engine_id id;
662 intel_wakeref_t wakeref;
667 * Waiters are given a little priority nudge, but not enough
668 * to actually cause any preemption. Double check that we do
669 * not needlessly generate preempt-to-idle cycles.
672 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
675 mutex_lock(&i915->drm.struct_mutex);
676 wakeref = intel_runtime_pm_get(i915);
678 if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
680 if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
682 if (preempt_client_init(i915, &client[2])) /* head of queue */
684 if (preempt_client_init(i915, &client[3])) /* bystander */
687 for_each_engine(engine, i915, id) {
690 if (!intel_engine_has_preemption(engine))
693 if (!engine->emit_init_breadcrumb)
696 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
697 struct i915_request *rq[ARRAY_SIZE(client)];
698 struct i915_request *dummy;
700 engine->execlists.preempt_hang.count = 0;
702 dummy = dummy_request(engine);
706 for (i = 0; i < ARRAY_SIZE(client); i++) {
707 rq[i] = igt_spinner_create_request(&client[i].spin,
708 client[i].ctx, engine,
711 err = PTR_ERR(rq[i]);
715 /* Disable NEWCLIENT promotion */
716 __i915_active_request_set(&rq[i]->timeline->last_request,
718 i915_request_add(rq[i]);
721 dummy_request_free(dummy);
723 GEM_BUG_ON(i915_request_completed(rq[0]));
724 if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
725 pr_err("%s: First client failed to start\n",
729 GEM_BUG_ON(!i915_request_started(rq[0]));
731 if (i915_request_wait(rq[depth],
735 pr_err("%s: Waiter depth:%d completed!\n",
736 engine->name, depth);
740 for (i = 0; i < ARRAY_SIZE(client); i++)
741 igt_spinner_end(&client[i].spin);
743 if (igt_flush_test(i915, I915_WAIT_LOCKED))
746 if (engine->execlists.preempt_hang.count) {
747 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
749 engine->execlists.preempt_hang.count,
759 preempt_client_fini(&client[3]);
761 preempt_client_fini(&client[2]);
763 preempt_client_fini(&client[1]);
765 preempt_client_fini(&client[0]);
767 if (igt_flush_test(i915, I915_WAIT_LOCKED))
769 intel_runtime_pm_put(i915, wakeref);
770 mutex_unlock(&i915->drm.struct_mutex);
774 for (i = 0; i < ARRAY_SIZE(client); i++)
775 igt_spinner_end(&client[i].spin);
776 i915_gem_set_wedged(i915);
781 static int live_chain_preempt(void *arg)
783 struct drm_i915_private *i915 = arg;
784 struct intel_engine_cs *engine;
785 struct preempt_client hi, lo;
786 enum intel_engine_id id;
787 intel_wakeref_t wakeref;
791 * Build a chain AB...BA between two contexts (A, B) and request
792 * preemption of the last request. It should then complete before
793 * the previously submitted spinner in B.
796 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
799 mutex_lock(&i915->drm.struct_mutex);
800 wakeref = intel_runtime_pm_get(i915);
802 if (preempt_client_init(i915, &hi))
805 if (preempt_client_init(i915, &lo))
808 for_each_engine(engine, i915, id) {
809 struct i915_sched_attr attr = {
810 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
812 struct igt_live_test t;
813 struct i915_request *rq;
814 int ring_size, count, i;
816 if (!intel_engine_has_preemption(engine))
819 rq = igt_spinner_create_request(&lo.spin,
824 i915_request_add(rq);
826 ring_size = rq->wa_tail - rq->head;
828 ring_size += rq->ring->size;
829 ring_size = rq->ring->size / ring_size;
830 pr_debug("%s(%s): Using maximum of %d requests\n",
831 __func__, engine->name, ring_size);
833 igt_spinner_end(&lo.spin);
834 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
835 pr_err("Timed out waiting to flush %s\n", engine->name);
839 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
844 for_each_prime_number_from(count, 1, ring_size) {
845 rq = igt_spinner_create_request(&hi.spin,
850 i915_request_add(rq);
851 if (!igt_wait_for_spinner(&hi.spin, rq))
854 rq = igt_spinner_create_request(&lo.spin,
859 i915_request_add(rq);
861 for (i = 0; i < count; i++) {
862 rq = i915_request_alloc(engine, lo.ctx);
865 i915_request_add(rq);
868 rq = i915_request_alloc(engine, hi.ctx);
871 i915_request_add(rq);
872 engine->schedule(rq, &attr);
874 igt_spinner_end(&hi.spin);
875 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
876 struct drm_printer p =
877 drm_info_printer(i915->drm.dev);
879 pr_err("Failed to preempt over chain of %d\n",
881 intel_engine_dump(engine, &p,
882 "%s\n", engine->name);
885 igt_spinner_end(&lo.spin);
887 rq = i915_request_alloc(engine, lo.ctx);
890 i915_request_add(rq);
891 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
892 struct drm_printer p =
893 drm_info_printer(i915->drm.dev);
895 pr_err("Failed to flush low priority chain of %d requests\n",
897 intel_engine_dump(engine, &p,
898 "%s\n", engine->name);
903 if (igt_live_test_end(&t)) {
911 preempt_client_fini(&lo);
913 preempt_client_fini(&hi);
915 if (igt_flush_test(i915, I915_WAIT_LOCKED))
917 intel_runtime_pm_put(i915, wakeref);
918 mutex_unlock(&i915->drm.struct_mutex);
922 igt_spinner_end(&hi.spin);
923 igt_spinner_end(&lo.spin);
924 i915_gem_set_wedged(i915);
929 static int live_preempt_hang(void *arg)
931 struct drm_i915_private *i915 = arg;
932 struct i915_gem_context *ctx_hi, *ctx_lo;
933 struct igt_spinner spin_hi, spin_lo;
934 struct intel_engine_cs *engine;
935 enum intel_engine_id id;
936 intel_wakeref_t wakeref;
939 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
942 if (!intel_has_reset_engine(i915))
945 mutex_lock(&i915->drm.struct_mutex);
946 wakeref = intel_runtime_pm_get(i915);
948 if (igt_spinner_init(&spin_hi, i915))
951 if (igt_spinner_init(&spin_lo, i915))
954 ctx_hi = kernel_context(i915);
957 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
959 ctx_lo = kernel_context(i915);
962 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
964 for_each_engine(engine, i915, id) {
965 struct i915_request *rq;
967 if (!intel_engine_has_preemption(engine))
970 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
977 i915_request_add(rq);
978 if (!igt_wait_for_spinner(&spin_lo, rq)) {
979 GEM_TRACE("lo spinner failed to start\n");
981 i915_gem_set_wedged(i915);
986 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
989 igt_spinner_end(&spin_lo);
994 init_completion(&engine->execlists.preempt_hang.completion);
995 engine->execlists.preempt_hang.inject_hang = true;
997 i915_request_add(rq);
999 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1001 pr_err("Preemption did not occur within timeout!");
1003 i915_gem_set_wedged(i915);
1008 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1009 i915_reset_engine(engine, NULL);
1010 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1012 engine->execlists.preempt_hang.inject_hang = false;
1014 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1015 GEM_TRACE("hi spinner failed to start\n");
1017 i915_gem_set_wedged(i915);
1022 igt_spinner_end(&spin_hi);
1023 igt_spinner_end(&spin_lo);
1024 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1032 kernel_context_close(ctx_lo);
1034 kernel_context_close(ctx_hi);
1036 igt_spinner_fini(&spin_lo);
1038 igt_spinner_fini(&spin_hi);
1040 igt_flush_test(i915, I915_WAIT_LOCKED);
1041 intel_runtime_pm_put(i915, wakeref);
1042 mutex_unlock(&i915->drm.struct_mutex);
1046 static int random_range(struct rnd_state *rnd, int min, int max)
1048 return i915_prandom_u32_max_state(max - min, rnd) + min;
1051 static int random_priority(struct rnd_state *rnd)
1053 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1056 struct preempt_smoke {
1057 struct drm_i915_private *i915;
1058 struct i915_gem_context **contexts;
1059 struct intel_engine_cs *engine;
1060 struct drm_i915_gem_object *batch;
1061 unsigned int ncontext;
1062 struct rnd_state prng;
1063 unsigned long count;
1066 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1068 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1072 static int smoke_submit(struct preempt_smoke *smoke,
1073 struct i915_gem_context *ctx, int prio,
1074 struct drm_i915_gem_object *batch)
1076 struct i915_request *rq;
1077 struct i915_vma *vma = NULL;
1081 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
1083 return PTR_ERR(vma);
1085 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1090 ctx->sched.priority = prio;
1092 rq = i915_request_alloc(smoke->engine, ctx);
1099 err = rq->engine->emit_bb_start(rq,
1103 err = i915_vma_move_to_active(vma, rq, 0);
1106 i915_request_add(rq);
1110 i915_vma_unpin(vma);
1115 static int smoke_crescendo_thread(void *arg)
1117 struct preempt_smoke *smoke = arg;
1118 IGT_TIMEOUT(end_time);
1119 unsigned long count;
1123 struct i915_gem_context *ctx = smoke_context(smoke);
1126 mutex_lock(&smoke->i915->drm.struct_mutex);
1127 err = smoke_submit(smoke,
1128 ctx, count % I915_PRIORITY_MAX,
1130 mutex_unlock(&smoke->i915->drm.struct_mutex);
1135 } while (!__igt_timeout(end_time, NULL));
1137 smoke->count = count;
1141 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1142 #define BATCH BIT(0)
1144 struct task_struct *tsk[I915_NUM_ENGINES] = {};
1145 struct preempt_smoke arg[I915_NUM_ENGINES];
1146 struct intel_engine_cs *engine;
1147 enum intel_engine_id id;
1148 unsigned long count;
1151 mutex_unlock(&smoke->i915->drm.struct_mutex);
1153 for_each_engine(engine, smoke->i915, id) {
1155 arg[id].engine = engine;
1156 if (!(flags & BATCH))
1157 arg[id].batch = NULL;
1160 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1161 "igt/smoke:%d", id);
1162 if (IS_ERR(tsk[id])) {
1163 err = PTR_ERR(tsk[id]);
1166 get_task_struct(tsk[id]);
1170 for_each_engine(engine, smoke->i915, id) {
1173 if (IS_ERR_OR_NULL(tsk[id]))
1176 status = kthread_stop(tsk[id]);
1180 count += arg[id].count;
1182 put_task_struct(tsk[id]);
1185 mutex_lock(&smoke->i915->drm.struct_mutex);
1187 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1189 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1193 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1195 enum intel_engine_id id;
1196 IGT_TIMEOUT(end_time);
1197 unsigned long count;
1201 for_each_engine(smoke->engine, smoke->i915, id) {
1202 struct i915_gem_context *ctx = smoke_context(smoke);
1205 err = smoke_submit(smoke,
1206 ctx, random_priority(&smoke->prng),
1207 flags & BATCH ? smoke->batch : NULL);
1213 } while (!__igt_timeout(end_time, NULL));
1215 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1217 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1221 static int live_preempt_smoke(void *arg)
1223 struct preempt_smoke smoke = {
1225 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1228 const unsigned int phase[] = { 0, BATCH };
1229 intel_wakeref_t wakeref;
1230 struct igt_live_test t;
1235 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1238 smoke.contexts = kmalloc_array(smoke.ncontext,
1239 sizeof(*smoke.contexts),
1241 if (!smoke.contexts)
1244 mutex_lock(&smoke.i915->drm.struct_mutex);
1245 wakeref = intel_runtime_pm_get(smoke.i915);
1247 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1248 if (IS_ERR(smoke.batch)) {
1249 err = PTR_ERR(smoke.batch);
1253 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1258 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1259 cs[n] = MI_ARB_CHECK;
1260 cs[n] = MI_BATCH_BUFFER_END;
1261 i915_gem_object_flush_map(smoke.batch);
1262 i915_gem_object_unpin_map(smoke.batch);
1264 if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1269 for (n = 0; n < smoke.ncontext; n++) {
1270 smoke.contexts[n] = kernel_context(smoke.i915);
1271 if (!smoke.contexts[n])
1275 for (n = 0; n < ARRAY_SIZE(phase); n++) {
1276 err = smoke_crescendo(&smoke, phase[n]);
1280 err = smoke_random(&smoke, phase[n]);
1286 if (igt_live_test_end(&t))
1289 for (n = 0; n < smoke.ncontext; n++) {
1290 if (!smoke.contexts[n])
1292 kernel_context_close(smoke.contexts[n]);
1296 i915_gem_object_put(smoke.batch);
1298 intel_runtime_pm_put(smoke.i915, wakeref);
1299 mutex_unlock(&smoke.i915->drm.struct_mutex);
1300 kfree(smoke.contexts);
1305 int intel_execlists_live_selftests(struct drm_i915_private *i915)
1307 static const struct i915_subtest tests[] = {
1308 SUBTEST(live_sanitycheck),
1309 SUBTEST(live_busywait_preempt),
1310 SUBTEST(live_preempt),
1311 SUBTEST(live_late_preempt),
1312 SUBTEST(live_suppress_self_preempt),
1313 SUBTEST(live_suppress_wait_preempt),
1314 SUBTEST(live_chain_preempt),
1315 SUBTEST(live_preempt_hang),
1316 SUBTEST(live_preempt_smoke),
1319 if (!HAS_EXECLISTS(i915))
1322 if (i915_terminally_wedged(i915))
1325 return i915_subtests(tests, i915);