2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_reset.h"
11 #include "i915_selftest.h"
13 #include "gem/selftests/igt_gem_utils.h"
14 #include "selftests/i915_random.h"
15 #include "selftests/igt_flush_test.h"
16 #include "selftests/igt_live_test.h"
17 #include "selftests/igt_reset.h"
18 #include "selftests/igt_spinner.h"
19 #include "selftests/mock_drm.h"
20 #include "selftests/mock_gem_device.h"
22 #include "huge_gem_object.h"
23 #include "igt_gem_utils.h"
25 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
27 static int live_nop_switch(void *arg)
29 const unsigned int nctx = 1024;
30 struct drm_i915_private *i915 = arg;
31 struct intel_engine_cs *engine;
32 struct i915_gem_context **ctx;
33 enum intel_engine_id id;
34 intel_wakeref_t wakeref;
35 struct igt_live_test t;
36 struct drm_file *file;
41 * Create as many contexts as we can feasibly get away with
42 * and check we can switch between them rapidly.
44 * Serves as very simple stress test for submission and HW switching
48 if (!DRIVER_CAPS(i915)->has_logical_contexts)
51 file = mock_file(i915);
55 mutex_lock(&i915->drm.struct_mutex);
56 wakeref = intel_runtime_pm_get(i915);
58 ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
64 for (n = 0; n < nctx; n++) {
65 ctx[n] = live_context(i915, file);
67 err = PTR_ERR(ctx[n]);
72 for_each_engine(engine, i915, id) {
73 struct i915_request *rq;
74 unsigned long end_time, prime;
75 ktime_t times[2] = {};
77 times[0] = ktime_get_raw();
78 for (n = 0; n < nctx; n++) {
79 rq = igt_request_alloc(ctx[n], engine);
86 if (i915_request_wait(rq,
89 pr_err("Failed to populated %d contexts\n", nctx);
90 i915_gem_set_wedged(i915);
95 times[1] = ktime_get_raw();
97 pr_info("Populated %d contexts on %s in %lluns\n",
98 nctx, engine->name, ktime_to_ns(times[1] - times[0]));
100 err = igt_live_test_begin(&t, i915, __func__, engine->name);
104 end_time = jiffies + i915_selftest.timeout_jiffies;
105 for_each_prime_number_from(prime, 2, 8192) {
106 times[1] = ktime_get_raw();
108 for (n = 0; n < prime; n++) {
109 rq = igt_request_alloc(ctx[n % nctx], engine);
116 * This space is left intentionally blank.
118 * We do not actually want to perform any
119 * action with this request, we just want
120 * to measure the latency in allocation
121 * and submission of our breadcrumbs -
122 * ensuring that the bare request is sufficient
123 * for the system to work (i.e. proper HEAD
124 * tracking of the rings, interrupt handling,
125 * etc). It also gives us the lowest bounds
129 i915_request_add(rq);
131 if (i915_request_wait(rq,
134 pr_err("Switching between %ld contexts timed out\n",
136 i915_gem_set_wedged(i915);
140 times[1] = ktime_sub(ktime_get_raw(), times[1]);
144 if (__igt_timeout(end_time, NULL))
148 err = igt_live_test_end(&t);
152 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
154 ktime_to_ns(times[0]),
155 prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
159 intel_runtime_pm_put(i915, wakeref);
160 mutex_unlock(&i915->drm.struct_mutex);
161 mock_file_free(i915, file);
165 static struct i915_vma *
166 gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
168 struct drm_i915_gem_object *obj;
169 const int gen = INTEL_GEN(vma->vm->i915);
170 unsigned long n, size;
174 size = (4 * count + 1) * sizeof(u32);
175 size = round_up(size, PAGE_SIZE);
176 obj = i915_gem_object_create_internal(vma->vm->i915, size);
178 return ERR_CAST(obj);
180 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
186 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
187 offset += vma->node.start;
189 for (n = 0; n < count; n++) {
191 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
192 *cmd++ = lower_32_bits(offset);
193 *cmd++ = upper_32_bits(offset);
195 } else if (gen >= 4) {
196 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
197 (gen < 6 ? MI_USE_GGTT : 0);
202 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
208 *cmd = MI_BATCH_BUFFER_END;
209 i915_gem_object_flush_map(obj);
210 i915_gem_object_unpin_map(obj);
212 i915_gem_object_lock(obj);
213 err = i915_gem_object_set_to_gtt_domain(obj, false);
214 i915_gem_object_unlock(obj);
218 vma = i915_vma_instance(obj, vma->vm, NULL);
224 err = i915_vma_pin(vma, 0, 0, PIN_USER);
231 i915_gem_object_put(obj);
235 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
237 return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
240 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
242 return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
245 static int gpu_fill(struct drm_i915_gem_object *obj,
246 struct i915_gem_context *ctx,
247 struct intel_engine_cs *engine,
250 struct drm_i915_private *i915 = to_i915(obj->base.dev);
251 struct i915_address_space *vm =
252 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
253 struct i915_request *rq;
254 struct i915_vma *vma;
255 struct i915_vma *batch;
259 GEM_BUG_ON(obj->base.size > vm->total);
260 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
262 vma = i915_vma_instance(obj, vm, NULL);
266 i915_gem_object_lock(obj);
267 err = i915_gem_object_set_to_gtt_domain(obj, false);
268 i915_gem_object_unlock(obj);
272 err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
276 /* Within the GTT the huge objects maps every page onto
277 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
278 * We set the nth dword within the page using the nth
279 * mapping via the GTT - this should exercise the GTT mapping
280 * whilst checking that each context provides a unique view
283 batch = gpu_fill_dw(vma,
284 (dw * real_page_count(obj)) << PAGE_SHIFT |
286 real_page_count(obj),
289 err = PTR_ERR(batch);
293 rq = igt_request_alloc(ctx, engine);
300 if (INTEL_GEN(vm->i915) <= 5)
301 flags |= I915_DISPATCH_SECURE;
303 err = engine->emit_bb_start(rq,
304 batch->node.start, batch->node.size,
309 i915_vma_lock(batch);
310 err = i915_vma_move_to_active(batch, rq, 0);
311 i915_vma_unlock(batch);
316 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
317 i915_vma_unlock(vma);
321 i915_request_add(rq);
323 i915_vma_unpin(batch);
324 i915_vma_close(batch);
332 i915_request_skip(rq, err);
334 i915_request_add(rq);
336 i915_vma_unpin(batch);
343 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
345 const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
346 unsigned int n, m, need_flush;
349 err = i915_gem_object_prepare_write(obj, &need_flush);
353 for (n = 0; n < real_page_count(obj); n++) {
356 map = kmap_atomic(i915_gem_object_get_page(obj, n));
357 for (m = 0; m < DW_PER_PAGE; m++)
360 drm_clflush_virt_range(map, PAGE_SIZE);
364 i915_gem_object_finish_access(obj);
365 obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
366 obj->write_domain = 0;
370 static noinline int cpu_check(struct drm_i915_gem_object *obj,
371 unsigned int idx, unsigned int max)
373 unsigned int n, m, needs_flush;
376 err = i915_gem_object_prepare_read(obj, &needs_flush);
380 for (n = 0; n < real_page_count(obj); n++) {
383 map = kmap_atomic(i915_gem_object_get_page(obj, n));
384 if (needs_flush & CLFLUSH_BEFORE)
385 drm_clflush_virt_range(map, PAGE_SIZE);
387 for (m = 0; m < max; m++) {
389 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
390 __builtin_return_address(0), idx,
391 n, real_page_count(obj), m, max,
398 for (; m < DW_PER_PAGE; m++) {
399 if (map[m] != STACK_MAGIC) {
400 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
401 __builtin_return_address(0), idx, n, m,
402 map[m], STACK_MAGIC);
414 i915_gem_object_finish_access(obj);
418 static int file_add_object(struct drm_file *file,
419 struct drm_i915_gem_object *obj)
423 GEM_BUG_ON(obj->base.handle_count);
425 /* tie the object to the drm_file for easy reaping */
426 err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
430 i915_gem_object_get(obj);
431 obj->base.handle_count++;
435 static struct drm_i915_gem_object *
436 create_test_object(struct i915_gem_context *ctx,
437 struct drm_file *file,
438 struct list_head *objects)
440 struct drm_i915_gem_object *obj;
441 struct i915_address_space *vm =
442 ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
446 size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
447 size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
449 obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
453 err = file_add_object(file, obj);
454 i915_gem_object_put(obj);
458 err = cpu_fill(obj, STACK_MAGIC);
460 pr_err("Failed to fill object with cpu, err=%d\n",
465 list_add_tail(&obj->st_link, objects);
469 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
471 unsigned long npages = fake_page_count(obj);
473 GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
474 return npages / DW_PER_PAGE;
477 static int igt_ctx_exec(void *arg)
479 struct drm_i915_private *i915 = arg;
480 struct intel_engine_cs *engine;
481 enum intel_engine_id id;
485 * Create a few different contexts (with different mm) and write
486 * through each ctx/mm using the GPU making sure those writes end
487 * up in the expected pages of our obj.
490 if (!DRIVER_CAPS(i915)->has_logical_contexts)
493 for_each_engine(engine, i915, id) {
494 struct drm_i915_gem_object *obj = NULL;
495 unsigned long ncontexts, ndwords, dw;
496 struct igt_live_test t;
497 struct drm_file *file;
498 IGT_TIMEOUT(end_time);
501 if (!intel_engine_can_store_dword(engine))
504 if (!engine->context_size)
505 continue; /* No logical context support in HW */
507 file = mock_file(i915);
509 return PTR_ERR(file);
511 mutex_lock(&i915->drm.struct_mutex);
513 err = igt_live_test_begin(&t, i915, __func__, engine->name);
520 while (!time_after(jiffies, end_time)) {
521 struct i915_gem_context *ctx;
522 intel_wakeref_t wakeref;
524 ctx = live_context(i915, file);
531 obj = create_test_object(ctx, file, &objects);
538 with_intel_runtime_pm(i915, wakeref)
539 err = gpu_fill(obj, ctx, engine, dw);
541 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
542 ndwords, dw, max_dwords(obj),
543 engine->name, ctx->hw_id,
544 yesno(!!ctx->ppgtt), err);
548 if (++dw == max_dwords(obj)) {
557 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
558 ncontexts, engine->name, ndwords);
561 list_for_each_entry(obj, &objects, st_link) {
563 min_t(unsigned int, ndwords - dw, max_dwords(obj));
565 err = cpu_check(obj, ncontexts++, rem);
573 if (igt_live_test_end(&t))
575 mutex_unlock(&i915->drm.struct_mutex);
577 mock_file_free(i915, file);
585 static int igt_shared_ctx_exec(void *arg)
587 struct drm_i915_private *i915 = arg;
588 struct i915_gem_context *parent;
589 struct intel_engine_cs *engine;
590 enum intel_engine_id id;
591 struct igt_live_test t;
592 struct drm_file *file;
596 * Create a few different contexts with the same mm and write
597 * through each ctx using the GPU making sure those writes end
598 * up in the expected pages of our obj.
600 if (!DRIVER_CAPS(i915)->has_logical_contexts)
603 file = mock_file(i915);
605 return PTR_ERR(file);
607 mutex_lock(&i915->drm.struct_mutex);
609 parent = live_context(i915, file);
610 if (IS_ERR(parent)) {
611 err = PTR_ERR(parent);
615 if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
620 err = igt_live_test_begin(&t, i915, __func__, "");
624 for_each_engine(engine, i915, id) {
625 unsigned long ncontexts, ndwords, dw;
626 struct drm_i915_gem_object *obj = NULL;
627 IGT_TIMEOUT(end_time);
630 if (!intel_engine_can_store_dword(engine))
636 while (!time_after(jiffies, end_time)) {
637 struct i915_gem_context *ctx;
638 intel_wakeref_t wakeref;
640 ctx = kernel_context(i915);
646 __assign_ppgtt(ctx, parent->ppgtt);
649 obj = create_test_object(parent, file, &objects);
652 kernel_context_close(ctx);
658 with_intel_runtime_pm(i915, wakeref)
659 err = gpu_fill(obj, ctx, engine, dw);
661 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
662 ndwords, dw, max_dwords(obj),
663 engine->name, ctx->hw_id,
664 yesno(!!ctx->ppgtt), err);
665 kernel_context_close(ctx);
669 if (++dw == max_dwords(obj)) {
677 kernel_context_close(ctx);
679 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
680 ncontexts, engine->name, ndwords);
683 list_for_each_entry(obj, &objects, st_link) {
685 min_t(unsigned int, ndwords - dw, max_dwords(obj));
687 err = cpu_check(obj, ncontexts++, rem);
695 if (igt_live_test_end(&t))
698 mutex_unlock(&i915->drm.struct_mutex);
700 mock_file_free(i915, file);
704 static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
706 struct drm_i915_gem_object *obj;
710 if (INTEL_GEN(vma->vm->i915) < 8)
711 return ERR_PTR(-EINVAL);
713 obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
715 return ERR_CAST(obj);
717 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
723 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
724 *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
725 *cmd++ = lower_32_bits(vma->node.start);
726 *cmd++ = upper_32_bits(vma->node.start);
727 *cmd = MI_BATCH_BUFFER_END;
729 __i915_gem_object_flush_map(obj, 0, 64);
730 i915_gem_object_unpin_map(obj);
732 vma = i915_vma_instance(obj, vma->vm, NULL);
738 err = i915_vma_pin(vma, 0, 0, PIN_USER);
745 i915_gem_object_put(obj);
750 emit_rpcs_query(struct drm_i915_gem_object *obj,
751 struct intel_context *ce,
752 struct i915_request **rq_out)
754 struct i915_request *rq;
755 struct i915_vma *batch;
756 struct i915_vma *vma;
759 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
761 vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL);
765 i915_gem_object_lock(obj);
766 err = i915_gem_object_set_to_gtt_domain(obj, false);
767 i915_gem_object_unlock(obj);
771 err = i915_vma_pin(vma, 0, 0, PIN_USER);
775 batch = rpcs_query_batch(vma);
777 err = PTR_ERR(batch);
781 rq = i915_request_create(ce);
787 err = rq->engine->emit_bb_start(rq,
788 batch->node.start, batch->node.size,
793 i915_vma_lock(batch);
794 err = i915_vma_move_to_active(batch, rq, 0);
795 i915_vma_unlock(batch);
800 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
801 i915_vma_unlock(vma);
805 i915_vma_unpin(batch);
806 i915_vma_close(batch);
811 *rq_out = i915_request_get(rq);
813 i915_request_add(rq);
818 i915_request_skip(rq, err);
820 i915_request_add(rq);
822 i915_vma_unpin(batch);
830 #define TEST_IDLE BIT(0)
831 #define TEST_BUSY BIT(1)
832 #define TEST_RESET BIT(2)
835 __sseu_prepare(struct drm_i915_private *i915,
838 struct intel_context *ce,
839 struct igt_spinner **spin)
841 struct i915_request *rq;
845 if (!(flags & (TEST_BUSY | TEST_RESET)))
848 *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
852 ret = igt_spinner_init(*spin, i915);
856 rq = igt_spinner_create_request(*spin,
865 i915_request_add(rq);
867 if (!igt_wait_for_spinner(*spin, rq)) {
868 pr_err("%s: Spinner failed to start!\n", name);
876 igt_spinner_end(*spin);
878 igt_spinner_fini(*spin);
880 kfree(fetch_and_zero(spin));
885 __read_slice_count(struct drm_i915_private *i915,
886 struct intel_context *ce,
887 struct drm_i915_gem_object *obj,
888 struct igt_spinner *spin,
891 struct i915_request *rq = NULL;
897 ret = emit_rpcs_query(obj, ce, &rq);
902 igt_spinner_end(spin);
904 ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
905 i915_request_put(rq);
909 buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
915 if (INTEL_GEN(i915) >= 11) {
916 s_mask = GEN11_RPCS_S_CNT_MASK;
917 s_shift = GEN11_RPCS_S_CNT_SHIFT;
919 s_mask = GEN8_RPCS_S_CNT_MASK;
920 s_shift = GEN8_RPCS_S_CNT_SHIFT;
924 cnt = (val & s_mask) >> s_shift;
927 i915_gem_object_unpin_map(obj);
933 __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
934 const char *prefix, const char *suffix)
936 if (slices == expected)
940 pr_err("%s: %s read slice count failed with %d%s\n",
941 name, prefix, slices, suffix);
945 pr_err("%s: %s slice count %d is not %u%s\n",
946 name, prefix, slices, expected, suffix);
948 pr_info("RPCS=0x%x; %u%sx%u%s\n",
950 (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
951 (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
952 (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
958 __sseu_finish(struct drm_i915_private *i915,
961 struct intel_context *ce,
962 struct drm_i915_gem_object *obj,
963 unsigned int expected,
964 struct igt_spinner *spin)
966 unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
970 if (flags & TEST_RESET) {
971 ret = i915_reset_engine(ce->engine, "sseu");
976 ret = __read_slice_count(i915, ce, obj,
977 flags & TEST_RESET ? NULL : spin, &rpcs);
978 ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
982 ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
984 ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
988 igt_spinner_end(spin);
990 if ((flags & TEST_IDLE) && ret == 0) {
991 ret = i915_gem_wait_for_idle(i915,
993 MAX_SCHEDULE_TIMEOUT);
997 ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
998 ret = __check_rpcs(name, rpcs, ret, expected,
999 "Context", " after idle!");
1006 __sseu_test(struct drm_i915_private *i915,
1009 struct intel_context *ce,
1010 struct drm_i915_gem_object *obj,
1011 struct intel_sseu sseu)
1013 struct igt_spinner *spin = NULL;
1016 ret = __sseu_prepare(i915, name, flags, ce, &spin);
1020 ret = __intel_context_reconfigure_sseu(ce, sseu);
1024 ret = __sseu_finish(i915, name, flags, ce, obj,
1025 hweight32(sseu.slice_mask), spin);
1029 igt_spinner_end(spin);
1030 igt_spinner_fini(spin);
1037 __igt_ctx_sseu(struct drm_i915_private *i915,
1041 struct intel_engine_cs *engine = i915->engine[RCS0];
1042 struct intel_sseu default_sseu = engine->sseu;
1043 struct drm_i915_gem_object *obj;
1044 struct i915_gem_context *ctx;
1045 struct intel_context *ce;
1046 struct intel_sseu pg_sseu;
1047 intel_wakeref_t wakeref;
1048 struct drm_file *file;
1051 if (INTEL_GEN(i915) < 9)
1054 if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
1057 if (hweight32(default_sseu.slice_mask) < 2)
1061 * Gen11 VME friendly power-gated configuration with half enabled
1064 pg_sseu = default_sseu;
1065 pg_sseu.slice_mask = 1;
1066 pg_sseu.subslice_mask =
1067 ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
1069 pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1070 name, flags, hweight32(default_sseu.slice_mask),
1071 hweight32(pg_sseu.slice_mask));
1073 file = mock_file(i915);
1075 return PTR_ERR(file);
1077 if (flags & TEST_RESET)
1078 igt_global_reset_lock(i915);
1080 mutex_lock(&i915->drm.struct_mutex);
1082 ctx = live_context(i915, file);
1087 i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
1089 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1095 wakeref = intel_runtime_pm_get(i915);
1097 ce = i915_gem_context_get_engine(ctx, RCS0);
1103 ret = intel_context_pin(ce);
1107 /* First set the default mask. */
1108 ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
1112 /* Then set a power-gated configuration. */
1113 ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
1117 /* Back to defaults. */
1118 ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
1122 /* One last power-gated configuration for the road. */
1123 ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
1128 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1131 intel_context_unpin(ce);
1133 intel_context_put(ce);
1135 intel_runtime_pm_put(i915, wakeref);
1136 i915_gem_object_put(obj);
1139 mutex_unlock(&i915->drm.struct_mutex);
1141 if (flags & TEST_RESET)
1142 igt_global_reset_unlock(i915);
1144 mock_file_free(i915, file);
1147 pr_err("%s: Failed with %d!\n", name, ret);
1152 static int igt_ctx_sseu(void *arg)
1157 } *phase, phases[] = {
1158 { .name = "basic", .flags = 0 },
1159 { .name = "idle", .flags = TEST_IDLE },
1160 { .name = "busy", .flags = TEST_BUSY },
1161 { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1162 { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1163 { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1168 for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1170 ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1175 static int igt_ctx_readonly(void *arg)
1177 struct drm_i915_private *i915 = arg;
1178 struct drm_i915_gem_object *obj = NULL;
1179 struct i915_gem_context *ctx;
1180 struct i915_hw_ppgtt *ppgtt;
1181 unsigned long idx, ndwords, dw;
1182 struct igt_live_test t;
1183 struct drm_file *file;
1184 I915_RND_STATE(prng);
1185 IGT_TIMEOUT(end_time);
1190 * Create a few read-only objects (with the occasional writable object)
1191 * and try to write into these object checking that the GPU discards
1192 * any write to a read-only object.
1195 file = mock_file(i915);
1197 return PTR_ERR(file);
1199 mutex_lock(&i915->drm.struct_mutex);
1201 err = igt_live_test_begin(&t, i915, __func__, "");
1205 ctx = live_context(i915, file);
1211 ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
1212 if (!ppgtt || !ppgtt->vm.has_read_only) {
1219 while (!time_after(jiffies, end_time)) {
1220 struct intel_engine_cs *engine;
1223 for_each_engine(engine, i915, id) {
1224 intel_wakeref_t wakeref;
1226 if (!intel_engine_can_store_dword(engine))
1230 obj = create_test_object(ctx, file, &objects);
1236 if (prandom_u32_state(&prng) & 1)
1237 i915_gem_object_set_readonly(obj);
1241 with_intel_runtime_pm(i915, wakeref)
1242 err = gpu_fill(obj, ctx, engine, dw);
1244 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
1245 ndwords, dw, max_dwords(obj),
1246 engine->name, ctx->hw_id,
1247 yesno(!!ctx->ppgtt), err);
1251 if (++dw == max_dwords(obj)) {
1258 pr_info("Submitted %lu dwords (across %u engines)\n",
1259 ndwords, RUNTIME_INFO(i915)->num_engines);
1263 list_for_each_entry(obj, &objects, st_link) {
1265 min_t(unsigned int, ndwords - dw, max_dwords(obj));
1266 unsigned int num_writes;
1269 if (i915_gem_object_is_readonly(obj))
1272 err = cpu_check(obj, idx++, num_writes);
1280 if (igt_live_test_end(&t))
1282 mutex_unlock(&i915->drm.struct_mutex);
1284 mock_file_free(i915, file);
1288 static int check_scratch(struct i915_gem_context *ctx, u64 offset)
1290 struct drm_mm_node *node =
1291 __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
1292 offset, offset + sizeof(u32) - 1);
1293 if (!node || node->start > offset)
1296 GEM_BUG_ON(offset >= node->start + node->size);
1298 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1299 upper_32_bits(offset), lower_32_bits(offset));
1303 static int write_to_scratch(struct i915_gem_context *ctx,
1304 struct intel_engine_cs *engine,
1305 u64 offset, u32 value)
1307 struct drm_i915_private *i915 = ctx->i915;
1308 struct drm_i915_gem_object *obj;
1309 struct i915_request *rq;
1310 struct i915_vma *vma;
1314 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1316 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1318 return PTR_ERR(obj);
1320 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1326 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
1327 if (INTEL_GEN(i915) >= 8) {
1328 *cmd++ = lower_32_bits(offset);
1329 *cmd++ = upper_32_bits(offset);
1335 *cmd = MI_BATCH_BUFFER_END;
1336 __i915_gem_object_flush_map(obj, 0, 64);
1337 i915_gem_object_unpin_map(obj);
1339 vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
1345 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1349 err = check_scratch(ctx, offset);
1353 rq = igt_request_alloc(ctx, engine);
1359 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1364 err = i915_vma_move_to_active(vma, rq, 0);
1365 i915_vma_unlock(vma);
1369 i915_vma_unpin(vma);
1370 i915_vma_close(vma);
1373 i915_request_add(rq);
1378 i915_request_skip(rq, err);
1380 i915_request_add(rq);
1382 i915_vma_unpin(vma);
1384 i915_gem_object_put(obj);
1388 static int read_from_scratch(struct i915_gem_context *ctx,
1389 struct intel_engine_cs *engine,
1390 u64 offset, u32 *value)
1392 struct drm_i915_private *i915 = ctx->i915;
1393 struct drm_i915_gem_object *obj;
1394 const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
1395 const u32 result = 0x100;
1396 struct i915_request *rq;
1397 struct i915_vma *vma;
1401 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1403 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1405 return PTR_ERR(obj);
1407 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1413 memset(cmd, POISON_INUSE, PAGE_SIZE);
1414 if (INTEL_GEN(i915) >= 8) {
1415 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1417 *cmd++ = lower_32_bits(offset);
1418 *cmd++ = upper_32_bits(offset);
1419 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1424 *cmd++ = MI_LOAD_REGISTER_MEM;
1427 *cmd++ = MI_STORE_REGISTER_MEM;
1431 *cmd = MI_BATCH_BUFFER_END;
1433 i915_gem_object_flush_map(obj);
1434 i915_gem_object_unpin_map(obj);
1436 vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
1442 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1446 err = check_scratch(ctx, offset);
1450 rq = igt_request_alloc(ctx, engine);
1456 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1461 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1462 i915_vma_unlock(vma);
1466 i915_vma_unpin(vma);
1467 i915_vma_close(vma);
1469 i915_request_add(rq);
1471 i915_gem_object_lock(obj);
1472 err = i915_gem_object_set_to_cpu_domain(obj, false);
1473 i915_gem_object_unlock(obj);
1477 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1483 *value = cmd[result / sizeof(*cmd)];
1484 i915_gem_object_unpin_map(obj);
1485 i915_gem_object_put(obj);
1490 i915_request_skip(rq, err);
1492 i915_request_add(rq);
1494 i915_vma_unpin(vma);
1496 i915_gem_object_put(obj);
1500 static int igt_vm_isolation(void *arg)
1502 struct drm_i915_private *i915 = arg;
1503 struct i915_gem_context *ctx_a, *ctx_b;
1504 struct intel_engine_cs *engine;
1505 intel_wakeref_t wakeref;
1506 struct igt_live_test t;
1507 struct drm_file *file;
1508 I915_RND_STATE(prng);
1509 unsigned long count;
1514 if (INTEL_GEN(i915) < 7)
1518 * The simple goal here is that a write into one context is not
1519 * observed in a second (separate page tables and scratch).
1522 file = mock_file(i915);
1524 return PTR_ERR(file);
1526 mutex_lock(&i915->drm.struct_mutex);
1528 err = igt_live_test_begin(&t, i915, __func__, "");
1532 ctx_a = live_context(i915, file);
1533 if (IS_ERR(ctx_a)) {
1534 err = PTR_ERR(ctx_a);
1538 ctx_b = live_context(i915, file);
1539 if (IS_ERR(ctx_b)) {
1540 err = PTR_ERR(ctx_b);
1544 /* We can only test vm isolation, if the vm are distinct */
1545 if (ctx_a->ppgtt == ctx_b->ppgtt)
1548 vm_total = ctx_a->ppgtt->vm.total;
1549 GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
1550 vm_total -= I915_GTT_PAGE_SIZE;
1552 wakeref = intel_runtime_pm_get(i915);
1555 for_each_engine(engine, i915, id) {
1556 IGT_TIMEOUT(end_time);
1557 unsigned long this = 0;
1559 if (!intel_engine_can_store_dword(engine))
1562 while (!__igt_timeout(end_time, NULL)) {
1563 u32 value = 0xc5c5c5c5;
1566 div64_u64_rem(i915_prandom_u64_state(&prng),
1568 offset &= -sizeof(u32);
1569 offset += I915_GTT_PAGE_SIZE;
1571 err = write_to_scratch(ctx_a, engine,
1572 offset, 0xdeadbeef);
1574 err = read_from_scratch(ctx_b, engine,
1580 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1581 engine->name, value,
1582 upper_32_bits(offset),
1583 lower_32_bits(offset),
1593 pr_info("Checked %lu scratch offsets across %d engines\n",
1594 count, RUNTIME_INFO(i915)->num_engines);
1597 intel_runtime_pm_put(i915, wakeref);
1599 if (igt_live_test_end(&t))
1601 mutex_unlock(&i915->drm.struct_mutex);
1603 mock_file_free(i915, file);
1607 static __maybe_unused const char *
1608 __engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
1610 struct intel_engine_cs *engine;
1611 intel_engine_mask_t tmp;
1613 if (engines == ALL_ENGINES)
1616 for_each_engine_masked(engine, i915, engines, tmp)
1617 return engine->name;
1622 static void mock_barrier_task(void *data)
1624 unsigned int *counter = data;
1629 static int mock_context_barrier(void *arg)
1632 #define pr_fmt(x) "context_barrier_task():" # x
1633 struct drm_i915_private *i915 = arg;
1634 struct i915_gem_context *ctx;
1635 struct i915_request *rq;
1636 unsigned int counter;
1640 * The context barrier provides us with a callback after it emits
1641 * a request; useful for retiring old state after loading new.
1644 mutex_lock(&i915->drm.struct_mutex);
1646 ctx = mock_context(i915, "mock");
1653 err = context_barrier_task(ctx, 0,
1654 NULL, mock_barrier_task, &counter);
1656 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1660 pr_err("Did not retire immediately with 0 engines\n");
1666 err = context_barrier_task(ctx, ALL_ENGINES,
1667 NULL, mock_barrier_task, &counter);
1669 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1673 pr_err("Did not retire immediately for all unused engines\n");
1678 rq = igt_request_alloc(ctx, i915->engine[RCS0]);
1680 pr_err("Request allocation failed!\n");
1683 i915_request_add(rq);
1686 context_barrier_inject_fault = BIT(RCS0);
1687 err = context_barrier_task(ctx, ALL_ENGINES,
1688 NULL, mock_barrier_task, &counter);
1689 context_barrier_inject_fault = 0;
1693 pr_err("Did not hit fault injection!\n");
1695 pr_err("Invoked callback on error!\n");
1702 err = context_barrier_task(ctx, ALL_ENGINES,
1703 NULL, mock_barrier_task, &counter);
1705 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1708 mock_device_flush(i915);
1710 pr_err("Did not retire on each active engines\n");
1716 mock_context_close(ctx);
1718 mutex_unlock(&i915->drm.struct_mutex);
1724 int i915_gem_context_mock_selftests(void)
1726 static const struct i915_subtest tests[] = {
1727 SUBTEST(mock_context_barrier),
1729 struct drm_i915_private *i915;
1732 i915 = mock_gem_device();
1736 err = i915_subtests(tests, i915);
1738 drm_dev_put(&i915->drm);
1742 int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
1744 static const struct i915_subtest tests[] = {
1745 SUBTEST(live_nop_switch),
1746 SUBTEST(igt_ctx_exec),
1747 SUBTEST(igt_ctx_readonly),
1748 SUBTEST(igt_ctx_sseu),
1749 SUBTEST(igt_shared_ctx_exec),
1750 SUBTEST(igt_vm_isolation),
1753 if (i915_terminally_wedged(dev_priv))
1756 return i915_subtests(tests, dev_priv);