2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
6 #include "gt/intel_gt.h"
8 #include "gem/selftests/igt_gem_utils.h"
10 #include "igt_spinner.h"
12 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
18 GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
20 memset(spin, 0, sizeof(*spin));
23 spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
24 if (IS_ERR(spin->hws)) {
25 err = PTR_ERR(spin->hws);
29 spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
30 if (IS_ERR(spin->obj)) {
31 err = PTR_ERR(spin->obj);
35 i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
36 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
41 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
43 mode = i915_coherent_map_type(gt->i915);
44 vaddr = i915_gem_object_pin_map(spin->obj, mode);
54 i915_gem_object_unpin_map(spin->hws);
56 i915_gem_object_put(spin->obj);
58 i915_gem_object_put(spin->hws);
63 static unsigned int seqno_offset(u64 fence)
65 return offset_in_page(sizeof(u32) * fence);
68 static u64 hws_address(const struct i915_vma *hws,
69 const struct i915_request *rq)
71 return hws->node.start + seqno_offset(rq->fence.context);
74 static int move_to_active(struct i915_vma *vma,
75 struct i915_request *rq,
81 err = i915_request_await_object(rq, vma->obj,
82 flags & EXEC_OBJECT_WRITE);
84 err = i915_vma_move_to_active(vma, rq, flags);
91 igt_spinner_create_request(struct igt_spinner *spin,
92 struct intel_context *ce,
93 u32 arbitration_command)
95 struct intel_engine_cs *engine = ce->engine;
96 struct i915_request *rq = NULL;
97 struct i915_vma *hws, *vma;
101 GEM_BUG_ON(spin->gt != ce->vm->gt);
103 vma = i915_vma_instance(spin->obj, ce->vm, NULL);
105 return ERR_CAST(vma);
107 hws = i915_vma_instance(spin->hws, ce->vm, NULL);
109 return ERR_CAST(hws);
111 err = i915_vma_pin(vma, 0, 0, PIN_USER);
115 err = i915_vma_pin(hws, 0, 0, PIN_USER);
119 rq = intel_context_create_request(ce);
125 err = move_to_active(vma, rq, 0);
129 err = move_to_active(hws, rq, 0);
135 *batch++ = MI_STORE_DWORD_IMM_GEN4;
136 *batch++ = lower_32_bits(hws_address(hws, rq));
137 *batch++ = upper_32_bits(hws_address(hws, rq));
138 *batch++ = rq->fence.seqno;
140 *batch++ = arbitration_command;
142 *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
143 *batch++ = lower_32_bits(vma->node.start);
144 *batch++ = upper_32_bits(vma->node.start);
145 *batch++ = MI_BATCH_BUFFER_END; /* not reached */
147 intel_gt_chipset_flush(engine->gt);
149 if (engine->emit_init_breadcrumb &&
150 i915_request_timeline(rq)->has_initial_breadcrumb) {
151 err = engine->emit_init_breadcrumb(rq);
156 err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
160 i915_request_skip(rq, err);
161 i915_request_add(rq);
167 return err ? ERR_PTR(err) : rq;
171 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
173 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
175 return READ_ONCE(*seqno);
178 void igt_spinner_end(struct igt_spinner *spin)
180 *spin->batch = MI_BATCH_BUFFER_END;
181 intel_gt_chipset_flush(spin->gt);
184 void igt_spinner_fini(struct igt_spinner *spin)
186 igt_spinner_end(spin);
188 i915_gem_object_unpin_map(spin->obj);
189 i915_gem_object_put(spin->obj);
191 i915_gem_object_unpin_map(spin->hws);
192 i915_gem_object_put(spin->hws);
195 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
197 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
200 wait_for(i915_seqno_passed(hws_seqno(spin, rq),