2 * SPDX-License-Identifier: GPL-2.0
4 * Copyright © 2019 Intel Corporation
7 #include "i915_selftest.h"
8 #include "intel_engine_pm.h"
11 #include "gem/selftests/mock_context.h"
12 #include "selftests/igt_flush_test.h"
13 #include "selftests/mock_drm.h"
15 static int request_sync(struct i915_request *rq)
17 struct intel_timeline *tl = i915_request_timeline(rq);
21 intel_timeline_get(tl);
24 /* Opencode i915_request_add() so we can keep the timeline locked. */
25 __i915_request_commit(rq);
26 __i915_request_queue(rq, NULL);
28 timeout = i915_request_wait(rq, 0, HZ / 10);
32 i915_request_retire_upto(rq);
34 lockdep_unpin_lock(&tl->mutex, rq->cookie);
35 mutex_unlock(&tl->mutex);
38 intel_timeline_put(tl);
43 static int context_sync(struct intel_context *ce)
45 struct intel_timeline *tl = ce->timeline;
48 mutex_lock(&tl->mutex);
50 struct dma_fence *fence;
53 fence = i915_active_fence_get(&tl->last_request);
57 timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
61 i915_request_retire_upto(to_request(fence));
65 mutex_unlock(&tl->mutex);
70 static int __live_context_size(struct intel_engine_cs *engine,
71 struct i915_gem_context *fixme)
73 struct intel_context *ce;
74 struct i915_request *rq;
78 ce = intel_context_create(fixme, engine);
82 err = intel_context_pin(ce);
86 vaddr = i915_gem_object_pin_map(ce->state->obj,
87 i915_coherent_map_type(engine->i915));
90 intel_context_unpin(ce);
95 * Note that execlists also applies a redzone which it checks on
96 * context unpin when debugging. We are using the same location
97 * and same poison value so that our checks overlap. Despite the
98 * redundancy, we want to keep this little selftest so that we
99 * get coverage of any and all submission backends, and we can
100 * always extend this test to ensure we trick the HW into a
101 * compromising position wrt to the various sections that need
102 * to be written into the context state.
104 * TLDR; this overlaps with the execlists redzone.
106 vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
107 memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
109 rq = intel_context_create_request(ce);
110 intel_context_unpin(ce);
116 err = request_sync(rq);
120 /* Force the context switch */
121 rq = i915_request_create(engine->kernel_context);
126 err = request_sync(rq);
130 if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
131 pr_err("%s context overwrote trailing red-zone!", engine->name);
136 i915_gem_object_unpin_map(ce->state->obj);
138 intel_context_put(ce);
142 static int live_context_size(void *arg)
144 struct intel_gt *gt = arg;
145 struct intel_engine_cs *engine;
146 struct i915_gem_context *fixme;
147 enum intel_engine_id id;
151 * Check that our context sizes are correct by seeing if the
152 * HW tries to write past the end of one.
155 fixme = kernel_context(gt->i915);
157 return PTR_ERR(fixme);
159 for_each_engine(engine, gt, id) {
161 struct drm_i915_gem_object *state;
165 if (!engine->context_size)
168 intel_engine_pm_get(engine);
171 * Hide the old default state -- we lie about the context size
172 * and get confused when the default state is smaller than
173 * expected. For our do nothing request, inheriting the
174 * active state is sufficient, we are only checking that we
175 * don't use more than we planned.
177 saved.state = fetch_and_zero(&engine->default_state);
178 saved.pinned = fetch_and_zero(&engine->pinned_default_state);
180 /* Overlaps with the execlists redzone */
181 engine->context_size += I915_GTT_PAGE_SIZE;
183 err = __live_context_size(engine, fixme);
185 engine->context_size -= I915_GTT_PAGE_SIZE;
187 engine->pinned_default_state = saved.pinned;
188 engine->default_state = saved.state;
190 intel_engine_pm_put(engine);
196 kernel_context_close(fixme);
200 static int __live_active_context(struct intel_engine_cs *engine,
201 struct i915_gem_context *fixme)
203 struct intel_context *ce;
208 * We keep active contexts alive until after a subsequent context
209 * switch as the final write from the context-save will be after
210 * we retire the final request. We track when we unpin the context,
211 * under the presumption that the final pin is from the last request,
212 * and instead of immediately unpinning the context, we add a task
213 * to unpin the context from the next idle-barrier.
215 * This test makes sure that the context is kept alive until a
216 * subsequent idle-barrier (emitted when the engine wakeref hits 0
217 * with no more outstanding requests).
220 if (intel_engine_pm_is_awake(engine)) {
221 pr_err("%s is awake before starting %s!\n",
222 engine->name, __func__);
226 ce = intel_context_create(fixme, engine);
230 for (pass = 0; pass <= 2; pass++) {
231 struct i915_request *rq;
233 rq = intel_context_create_request(ce);
239 err = request_sync(rq);
243 /* Context will be kept active until after an idle-barrier. */
244 if (i915_active_is_idle(&ce->active)) {
245 pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
251 if (!intel_engine_pm_is_awake(engine)) {
252 pr_err("%s is asleep before idle-barrier\n",
259 /* Now make sure our idle-barriers are flushed */
260 err = context_sync(engine->kernel_context);
264 if (!i915_active_is_idle(&ce->active)) {
265 pr_err("context is still active!");
269 if (intel_engine_pm_is_awake(engine)) {
270 struct drm_printer p = drm_debug_printer(__func__);
272 intel_engine_dump(engine, &p,
273 "%s is still awake after idle-barriers\n",
282 intel_context_put(ce);
286 static int live_active_context(void *arg)
288 struct intel_gt *gt = arg;
289 struct intel_engine_cs *engine;
290 struct i915_gem_context *fixme;
291 enum intel_engine_id id;
292 struct drm_file *file;
295 file = mock_file(gt->i915);
297 return PTR_ERR(file);
299 fixme = live_context(gt->i915, file);
301 err = PTR_ERR(fixme);
305 for_each_engine(engine, gt, id) {
306 err = __live_active_context(engine, fixme);
310 err = igt_flush_test(gt->i915);
316 mock_file_free(gt->i915, file);
320 static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
322 struct i915_request *rq;
325 err = intel_context_pin(remote);
329 rq = intel_context_create_request(ce);
335 err = intel_context_prepare_remote_request(remote, rq);
337 i915_request_add(rq);
341 err = request_sync(rq);
344 intel_context_unpin(remote);
348 static int __live_remote_context(struct intel_engine_cs *engine,
349 struct i915_gem_context *fixme)
351 struct intel_context *local, *remote;
356 * Check that our idle barriers do not interfere with normal
357 * activity tracking. In particular, check that operating
358 * on the context image remotely (intel_context_prepare_remote_request),
359 * which inserts foreign fences into intel_context.active, does not
360 * clobber the idle-barrier.
363 remote = intel_context_create(fixme, engine);
365 return PTR_ERR(remote);
367 local = intel_context_create(fixme, engine);
369 err = PTR_ERR(local);
373 for (pass = 0; pass <= 2; pass++) {
374 err = __remote_sync(local, remote);
378 err = __remote_sync(engine->kernel_context, remote);
382 if (i915_active_is_idle(&remote->active)) {
383 pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
390 intel_context_put(local);
392 intel_context_put(remote);
396 static int live_remote_context(void *arg)
398 struct intel_gt *gt = arg;
399 struct intel_engine_cs *engine;
400 struct i915_gem_context *fixme;
401 enum intel_engine_id id;
402 struct drm_file *file;
405 file = mock_file(gt->i915);
407 return PTR_ERR(file);
409 fixme = live_context(gt->i915, file);
411 err = PTR_ERR(fixme);
415 for_each_engine(engine, gt, id) {
416 err = __live_remote_context(engine, fixme);
420 err = igt_flush_test(gt->i915);
426 mock_file_free(gt->i915, file);
430 int intel_context_live_selftests(struct drm_i915_private *i915)
432 static const struct i915_subtest tests[] = {
433 SUBTEST(live_context_size),
434 SUBTEST(live_active_context),
435 SUBTEST(live_remote_context),
437 struct intel_gt *gt = &i915->gt;
439 if (intel_gt_is_wedged(gt))
442 return intel_gt_live_subtests(tests, gt);