]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/i915/selftests/igt_spinner.c
drm/i915/selftests: Spin on all engines simultaneously
[linux.git] / drivers / gpu / drm / i915 / selftests / igt_spinner.c
index ee8450b871da97185b1c769d3b674308a5e5d19a..e8a58fe49c3963ccc81ec1a679650d13cbe2d4ba 100644 (file)
@@ -15,8 +15,6 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
        void *vaddr;
        int err;
 
-       GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
-
        memset(spin, 0, sizeof(*spin));
        spin->gt = gt;
 
@@ -95,11 +93,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
        struct intel_engine_cs *engine = ce->engine;
        struct i915_request *rq = NULL;
        struct i915_vma *hws, *vma;
+       unsigned int flags;
        u32 *batch;
        int err;
 
        GEM_BUG_ON(spin->gt != ce->vm->gt);
 
+       if (!intel_engine_can_store_dword(ce->engine))
+               return ERR_PTR(-ENODEV);
+
        vma = i915_vma_instance(spin->obj, ce->vm, NULL);
        if (IS_ERR(vma))
                return ERR_CAST(vma);
@@ -132,16 +134,37 @@ igt_spinner_create_request(struct igt_spinner *spin,
 
        batch = spin->batch;
 
-       *batch++ = MI_STORE_DWORD_IMM_GEN4;
-       *batch++ = lower_32_bits(hws_address(hws, rq));
-       *batch++ = upper_32_bits(hws_address(hws, rq));
+       if (INTEL_GEN(rq->i915) >= 8) {
+               *batch++ = MI_STORE_DWORD_IMM_GEN4;
+               *batch++ = lower_32_bits(hws_address(hws, rq));
+               *batch++ = upper_32_bits(hws_address(hws, rq));
+       } else if (INTEL_GEN(rq->i915) >= 6) {
+               *batch++ = MI_STORE_DWORD_IMM_GEN4;
+               *batch++ = 0;
+               *batch++ = hws_address(hws, rq);
+       } else if (INTEL_GEN(rq->i915) >= 4) {
+               *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+               *batch++ = 0;
+               *batch++ = hws_address(hws, rq);
+       } else {
+               *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+               *batch++ = hws_address(hws, rq);
+       }
        *batch++ = rq->fence.seqno;
 
        *batch++ = arbitration_command;
 
-       *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+       if (INTEL_GEN(rq->i915) >= 8)
+               *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+       else if (IS_HASWELL(rq->i915))
+               *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
+       else if (INTEL_GEN(rq->i915) >= 6)
+               *batch++ = MI_BATCH_BUFFER_START;
+       else
+               *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
        *batch++ = lower_32_bits(vma->node.start);
        *batch++ = upper_32_bits(vma->node.start);
+
        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
 
        intel_gt_chipset_flush(engine->gt);
@@ -153,7 +176,10 @@ igt_spinner_create_request(struct igt_spinner *spin,
                        goto cancel_rq;
        }
 
-       err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+       flags = 0;
+       if (INTEL_GEN(rq->i915) <= 5)
+               flags |= I915_DISPATCH_SECURE;
+       err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
 
 cancel_rq:
        if (err) {