2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "gem/i915_gem_pm.h"
8 #include "i915_selftest.h"
9 #include "intel_reset.h"
11 #include "selftests/igt_flush_test.h"
12 #include "selftests/igt_reset.h"
13 #include "selftests/igt_spinner.h"
14 #include "selftests/igt_wedge_me.h"
15 #include "selftests/mock_drm.h"
17 #include "gem/selftests/igt_gem_utils.h"
18 #include "gem/selftests/mock_context.h"
20 static const struct wo_register {
21 enum intel_platform platform;
24 { INTEL_GEMINILAKE, 0x731c }
27 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
29 struct i915_wa_list gt_wa_list;
31 char name[REF_NAME_MAX];
32 struct i915_wa_list wa_list;
33 struct i915_wa_list ctx_wa_list;
34 } engine[I915_NUM_ENGINES];
38 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
40 struct intel_engine_cs *engine;
41 enum intel_engine_id id;
43 memset(lists, 0, sizeof(*lists));
45 wa_init_start(&lists->gt_wa_list, "GT_REF");
46 gt_init_workarounds(i915, &lists->gt_wa_list);
47 wa_init_finish(&lists->gt_wa_list);
49 for_each_engine(engine, i915, id) {
50 struct i915_wa_list *wal = &lists->engine[id].wa_list;
51 char *name = lists->engine[id].name;
53 snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
55 wa_init_start(wal, name);
56 engine_init_workarounds(engine, wal);
59 snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
61 __intel_engine_init_ctx_wa(engine,
62 &lists->engine[id].ctx_wa_list,
68 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
70 struct intel_engine_cs *engine;
71 enum intel_engine_id id;
73 for_each_engine(engine, i915, id)
74 intel_wa_list_free(&lists->engine[id].wa_list);
76 intel_wa_list_free(&lists->gt_wa_list);
79 static struct drm_i915_gem_object *
80 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
82 const u32 base = engine->mmio_base;
83 struct drm_i915_gem_object *result;
84 struct i915_request *rq;
90 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
94 i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
96 cs = i915_gem_object_pin_map(result, I915_MAP_WB);
101 memset(cs, 0xc5, PAGE_SIZE);
102 i915_gem_object_flush_map(result);
103 i915_gem_object_unpin_map(result);
105 vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
111 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
115 rq = igt_request_alloc(ctx, engine);
122 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
123 i915_vma_unlock(vma);
127 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
128 if (INTEL_GEN(ctx->i915) >= 8)
131 cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
137 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
139 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
140 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
143 intel_ring_advance(rq, cs);
145 i915_request_add(rq);
151 i915_request_add(rq);
155 i915_gem_object_put(result);
160 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
162 i915_reg_t reg = i < engine->whitelist.count ?
163 engine->whitelist.list[i].reg :
164 RING_NOPID(engine->mmio_base);
166 return i915_mmio_reg_offset(reg);
170 print_results(const struct intel_engine_cs *engine, const u32 *results)
174 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
175 u32 expected = get_whitelist_reg(engine, i);
176 u32 actual = results[i];
178 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
179 i, expected, actual);
183 static int check_whitelist(struct i915_gem_context *ctx,
184 struct intel_engine_cs *engine)
186 struct drm_i915_gem_object *results;
187 struct igt_wedge_me wedge;
192 results = read_nonprivs(ctx, engine);
194 return PTR_ERR(results);
197 i915_gem_object_lock(results);
198 igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
199 err = i915_gem_object_set_to_cpu_domain(results, false);
200 i915_gem_object_unlock(results);
201 if (i915_terminally_wedged(ctx->i915))
206 vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
208 err = PTR_ERR(vaddr);
212 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
213 u32 expected = get_whitelist_reg(engine, i);
214 u32 actual = vaddr[i];
216 if (expected != actual) {
217 print_results(engine, vaddr);
218 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
219 i, expected, actual);
226 i915_gem_object_unpin_map(results);
228 i915_gem_object_put(results);
232 static int do_device_reset(struct intel_engine_cs *engine)
234 i915_reset(engine->i915, engine->mask, "live_workarounds");
238 static int do_engine_reset(struct intel_engine_cs *engine)
240 return i915_reset_engine(engine, "live_workarounds");
244 switch_to_scratch_context(struct intel_engine_cs *engine,
245 struct igt_spinner *spin)
247 struct i915_gem_context *ctx;
248 struct i915_request *rq;
249 intel_wakeref_t wakeref;
252 ctx = kernel_context(engine->i915);
256 GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
258 rq = ERR_PTR(-ENODEV);
259 with_intel_runtime_pm(engine->i915, wakeref)
260 rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
262 kernel_context_close(ctx);
270 i915_request_add(rq);
272 if (spin && !igt_wait_for_spinner(spin, rq)) {
273 pr_err("Spinner failed to start\n");
279 igt_spinner_end(spin);
284 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
285 int (*reset)(struct intel_engine_cs *),
288 struct drm_i915_private *i915 = engine->i915;
289 struct i915_gem_context *ctx;
290 struct igt_spinner spin;
291 intel_wakeref_t wakeref;
294 pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
295 engine->whitelist.count, name);
297 err = igt_spinner_init(&spin, i915);
301 ctx = kernel_context(i915);
305 err = check_whitelist(ctx, engine);
307 pr_err("Invalid whitelist *before* %s reset!\n", name);
311 err = switch_to_scratch_context(engine, &spin);
315 with_intel_runtime_pm(i915, wakeref)
318 igt_spinner_end(&spin);
319 igt_spinner_fini(&spin);
322 pr_err("%s reset failed\n", name);
326 err = check_whitelist(ctx, engine);
328 pr_err("Whitelist not preserved in context across %s reset!\n",
333 kernel_context_close(ctx);
335 ctx = kernel_context(i915);
339 err = check_whitelist(ctx, engine);
341 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
347 kernel_context_close(ctx);
351 static struct i915_vma *create_batch(struct i915_gem_context *ctx)
353 struct drm_i915_gem_object *obj;
354 struct i915_vma *vma;
357 obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
359 return ERR_CAST(obj);
361 vma = i915_vma_instance(obj, ctx->vm, NULL);
367 err = i915_vma_pin(vma, 0, 0, PIN_USER);
371 i915_gem_object_lock(obj);
372 err = i915_gem_object_set_to_wc_domain(obj, true);
373 i915_gem_object_unlock(obj);
380 i915_gem_object_put(obj);
384 static u32 reg_write(u32 old, u32 new, u32 rsvd)
386 if (rsvd == 0x0000ffff) {
388 old |= new & (new >> 16);
397 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
399 enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
402 for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
403 if (wo_registers[i].platform == platform &&
404 wo_registers[i].reg == reg)
411 static int check_dirty_whitelist(struct i915_gem_context *ctx,
412 struct intel_engine_cs *engine)
414 const u32 values[] = {
440 struct i915_vma *scratch;
441 struct i915_vma *batch;
445 scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
447 return PTR_ERR(scratch);
449 batch = create_batch(ctx);
451 err = PTR_ERR(batch);
455 for (i = 0; i < engine->whitelist.count; i++) {
456 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
457 u64 addr = scratch->node.start;
458 struct i915_request *rq;
463 if (wo_register(engine, reg))
466 srm = MI_STORE_REGISTER_MEM;
467 lrm = MI_LOAD_REGISTER_MEM;
468 if (INTEL_GEN(ctx->i915) >= 8)
471 pr_debug("%s: Writing garbage to %x\n",
474 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
483 *cs++ = lower_32_bits(addr);
484 *cs++ = upper_32_bits(addr);
487 for (v = 0; v < ARRAY_SIZE(values); v++) {
489 *cs++ = MI_LOAD_REGISTER_IMM(1);
496 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
497 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
500 for (v = 0; v < ARRAY_SIZE(values); v++) {
502 *cs++ = MI_LOAD_REGISTER_IMM(1);
509 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
510 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
513 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
515 /* LRM original -- don't leave garbage in the context! */
518 *cs++ = lower_32_bits(addr);
519 *cs++ = upper_32_bits(addr);
521 *cs++ = MI_BATCH_BUFFER_END;
523 i915_gem_object_flush_map(batch->obj);
524 i915_gem_object_unpin_map(batch->obj);
525 i915_gem_chipset_flush(ctx->i915);
527 rq = igt_request_alloc(ctx, engine);
533 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
534 err = engine->emit_init_breadcrumb(rq);
539 err = engine->emit_bb_start(rq,
540 batch->node.start, PAGE_SIZE,
546 i915_request_add(rq);
550 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
551 pr_err("%s: Futzing %x timedout; cancelling test\n",
553 i915_gem_set_wedged(ctx->i915);
558 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
559 if (IS_ERR(results)) {
560 err = PTR_ERR(results);
564 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
565 rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
567 pr_err("%s: Unable to write to whitelisted register %x\n",
575 for (v = 0; v < ARRAY_SIZE(values); v++) {
576 expect = reg_write(expect, values[v], rsvd);
577 if (results[idx] != expect)
581 for (v = 0; v < ARRAY_SIZE(values); v++) {
582 expect = reg_write(expect, ~values[v], rsvd);
583 if (results[idx] != expect)
588 pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
589 engine->name, err, reg);
591 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
592 engine->name, reg, results[0], rsvd);
596 for (v = 0; v < ARRAY_SIZE(values); v++) {
599 expect = reg_write(expect, w, rsvd);
600 pr_info("Wrote %08x, read %08x, expect %08x\n",
601 w, results[idx], expect);
604 for (v = 0; v < ARRAY_SIZE(values); v++) {
607 expect = reg_write(expect, w, rsvd);
608 pr_info("Wrote %08x, read %08x, expect %08x\n",
609 w, results[idx], expect);
616 i915_gem_object_unpin_map(scratch->obj);
621 if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
624 i915_vma_unpin_and_release(&batch, 0);
626 i915_vma_unpin_and_release(&scratch, 0);
630 static int live_dirty_whitelist(void *arg)
632 struct drm_i915_private *i915 = arg;
633 struct intel_engine_cs *engine;
634 struct i915_gem_context *ctx;
635 enum intel_engine_id id;
636 intel_wakeref_t wakeref;
637 struct drm_file *file;
640 /* Can the user write to the whitelisted registers? */
642 if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
645 wakeref = intel_runtime_pm_get(i915);
647 mutex_unlock(&i915->drm.struct_mutex);
648 file = mock_file(i915);
649 mutex_lock(&i915->drm.struct_mutex);
655 ctx = live_context(i915, file);
661 for_each_engine(engine, i915, id) {
662 if (engine->whitelist.count == 0)
665 err = check_dirty_whitelist(ctx, engine);
671 mutex_unlock(&i915->drm.struct_mutex);
672 mock_file_free(i915, file);
673 mutex_lock(&i915->drm.struct_mutex);
675 intel_runtime_pm_put(i915, wakeref);
679 static int live_reset_whitelist(void *arg)
681 struct drm_i915_private *i915 = arg;
682 struct intel_engine_cs *engine = i915->engine[RCS0];
685 /* If we reset the gpu, we should not lose the RING_NONPRIV */
687 if (!engine || engine->whitelist.count == 0)
690 igt_global_reset_lock(i915);
692 if (intel_has_reset_engine(i915)) {
693 err = check_whitelist_across_reset(engine,
700 if (intel_has_gpu_reset(i915)) {
701 err = check_whitelist_across_reset(engine,
709 igt_global_reset_unlock(i915);
713 static int read_whitelisted_registers(struct i915_gem_context *ctx,
714 struct intel_engine_cs *engine,
715 struct i915_vma *results)
717 struct i915_request *rq;
721 rq = igt_request_alloc(ctx, engine);
725 srm = MI_STORE_REGISTER_MEM;
726 if (INTEL_GEN(ctx->i915) >= 8)
729 cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
735 for (i = 0; i < engine->whitelist.count; i++) {
736 u64 offset = results->node.start + sizeof(u32) * i;
739 *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
740 *cs++ = lower_32_bits(offset);
741 *cs++ = upper_32_bits(offset);
743 intel_ring_advance(rq, cs);
746 i915_request_add(rq);
748 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
754 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
755 struct intel_engine_cs *engine)
757 struct i915_request *rq;
758 struct i915_vma *batch;
762 batch = create_batch(ctx);
764 return PTR_ERR(batch);
766 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
772 *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
773 for (i = 0; i < engine->whitelist.count; i++) {
774 *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
777 *cs++ = MI_BATCH_BUFFER_END;
779 i915_gem_object_flush_map(batch->obj);
780 i915_gem_chipset_flush(ctx->i915);
782 rq = igt_request_alloc(ctx, engine);
788 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
789 err = engine->emit_init_breadcrumb(rq);
794 /* Perform the writes from an unprivileged "user" batch */
795 err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
798 i915_request_add(rq);
799 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
803 i915_gem_object_unpin_map(batch->obj);
805 i915_vma_unpin_and_release(&batch, 0);
811 unsigned long gen_mask;
814 static bool find_reg(struct drm_i915_private *i915,
816 const struct regmask *tbl,
819 u32 offset = i915_mmio_reg_offset(reg);
822 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
823 i915_mmio_reg_offset(tbl->reg) == offset)
831 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
833 /* Alas, we must pardon some whitelists. Mistakes already made */
834 static const struct regmask pardon[] = {
835 { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
836 { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
839 return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
842 static bool result_eq(struct intel_engine_cs *engine,
843 u32 a, u32 b, i915_reg_t reg)
845 if (a != b && !pardon_reg(engine->i915, reg)) {
846 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
847 i915_mmio_reg_offset(reg), a, b);
854 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
856 /* Some registers do not seem to behave and our writes unreadable */
857 static const struct regmask wo[] = {
858 { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
861 return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
864 static bool result_neq(struct intel_engine_cs *engine,
865 u32 a, u32 b, i915_reg_t reg)
867 if (a == b && !writeonly_reg(engine->i915, reg)) {
868 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
869 i915_mmio_reg_offset(reg), a);
877 check_whitelisted_registers(struct intel_engine_cs *engine,
880 bool (*fn)(struct intel_engine_cs *engine,
887 a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
891 b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
898 for (i = 0; i < engine->whitelist.count; i++) {
899 if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
903 i915_gem_object_unpin_map(B->obj);
905 i915_gem_object_unpin_map(A->obj);
909 static int live_isolated_whitelist(void *arg)
911 struct drm_i915_private *i915 = arg;
913 struct i915_gem_context *ctx;
914 struct i915_vma *scratch[2];
916 struct intel_engine_cs *engine;
917 enum intel_engine_id id;
921 * Check that a write into a whitelist register works, but
922 * invisible to a second context.
925 if (!intel_engines_has_context_isolation(i915))
928 if (!i915->kernel_context->vm)
931 for (i = 0; i < ARRAY_SIZE(client); i++) {
932 struct i915_gem_context *c;
934 c = kernel_context(i915);
940 client[i].scratch[0] = create_scratch(c->vm, 1024);
941 if (IS_ERR(client[i].scratch[0])) {
942 err = PTR_ERR(client[i].scratch[0]);
943 kernel_context_close(c);
947 client[i].scratch[1] = create_scratch(c->vm, 1024);
948 if (IS_ERR(client[i].scratch[1])) {
949 err = PTR_ERR(client[i].scratch[1]);
950 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
951 kernel_context_close(c);
958 for_each_engine(engine, i915, id) {
959 if (!engine->whitelist.count)
962 /* Read default values */
963 err = read_whitelisted_registers(client[0].ctx, engine,
964 client[0].scratch[0]);
968 /* Try to overwrite registers (should only affect ctx0) */
969 err = scrub_whitelisted_registers(client[0].ctx, engine);
973 /* Read values from ctx1, we expect these to be defaults */
974 err = read_whitelisted_registers(client[1].ctx, engine,
975 client[1].scratch[0]);
979 /* Verify that both reads return the same default values */
980 err = check_whitelisted_registers(engine,
981 client[0].scratch[0],
982 client[1].scratch[0],
987 /* Read back the updated values in ctx0 */
988 err = read_whitelisted_registers(client[0].ctx, engine,
989 client[0].scratch[1]);
993 /* User should be granted privilege to overwhite regs */
994 err = check_whitelisted_registers(engine,
995 client[0].scratch[0],
996 client[0].scratch[1],
1003 for (i = 0; i < ARRAY_SIZE(client); i++) {
1007 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1008 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1009 kernel_context_close(client[i].ctx);
1012 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1019 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1022 struct drm_i915_private *i915 = ctx->i915;
1023 struct i915_gem_engines_iter it;
1024 struct intel_context *ce;
1027 ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1029 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1030 enum intel_engine_id id = ce->engine->id;
1032 ok &= engine_wa_list_verify(ce,
1033 &lists->engine[id].wa_list,
1036 ok &= engine_wa_list_verify(ce,
1037 &lists->engine[id].ctx_wa_list,
1040 i915_gem_context_unlock_engines(ctx);
1046 live_gpu_reset_workarounds(void *arg)
1048 struct drm_i915_private *i915 = arg;
1049 struct i915_gem_context *ctx;
1050 intel_wakeref_t wakeref;
1051 struct wa_lists lists;
1054 if (!intel_has_gpu_reset(i915))
1057 ctx = kernel_context(i915);
1059 return PTR_ERR(ctx);
1061 pr_info("Verifying after GPU reset...\n");
1063 igt_global_reset_lock(i915);
1064 wakeref = intel_runtime_pm_get(i915);
1066 reference_lists_init(i915, &lists);
1068 ok = verify_wa_lists(ctx, &lists, "before reset");
1072 i915_reset(i915, ALL_ENGINES, "live_workarounds");
1074 ok = verify_wa_lists(ctx, &lists, "after reset");
1077 kernel_context_close(ctx);
1078 reference_lists_fini(i915, &lists);
1079 intel_runtime_pm_put(i915, wakeref);
1080 igt_global_reset_unlock(i915);
1082 return ok ? 0 : -ESRCH;
1086 live_engine_reset_workarounds(void *arg)
1088 struct drm_i915_private *i915 = arg;
1089 struct intel_engine_cs *engine;
1090 struct i915_gem_context *ctx;
1091 struct igt_spinner spin;
1092 enum intel_engine_id id;
1093 struct i915_request *rq;
1094 intel_wakeref_t wakeref;
1095 struct wa_lists lists;
1098 if (!intel_has_reset_engine(i915))
1101 ctx = kernel_context(i915);
1103 return PTR_ERR(ctx);
1105 igt_global_reset_lock(i915);
1106 wakeref = intel_runtime_pm_get(i915);
1108 reference_lists_init(i915, &lists);
1110 for_each_engine(engine, i915, id) {
1113 pr_info("Verifying after %s reset...\n", engine->name);
1115 ok = verify_wa_lists(ctx, &lists, "before reset");
1121 i915_reset_engine(engine, "live_workarounds");
1123 ok = verify_wa_lists(ctx, &lists, "after idle reset");
1129 ret = igt_spinner_init(&spin, i915);
1133 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
1136 igt_spinner_fini(&spin);
1140 i915_request_add(rq);
1142 if (!igt_wait_for_spinner(&spin, rq)) {
1143 pr_err("Spinner failed to start\n");
1144 igt_spinner_fini(&spin);
1149 i915_reset_engine(engine, "live_workarounds");
1151 igt_spinner_end(&spin);
1152 igt_spinner_fini(&spin);
1154 ok = verify_wa_lists(ctx, &lists, "after busy reset");
1162 reference_lists_fini(i915, &lists);
1163 intel_runtime_pm_put(i915, wakeref);
1164 igt_global_reset_unlock(i915);
1165 kernel_context_close(ctx);
1167 igt_flush_test(i915, I915_WAIT_LOCKED);
1172 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1174 static const struct i915_subtest tests[] = {
1175 SUBTEST(live_dirty_whitelist),
1176 SUBTEST(live_reset_whitelist),
1177 SUBTEST(live_isolated_whitelist),
1178 SUBTEST(live_gpu_reset_workarounds),
1179 SUBTEST(live_engine_reset_workarounds),
1183 if (i915_terminally_wedged(i915))
1186 mutex_lock(&i915->drm.struct_mutex);
1187 err = i915_subtests(tests, i915);
1188 mutex_unlock(&i915->drm.struct_mutex);