2 * SPDX-License-Identifier: GPL-2.0
4 * Copyright © 2019 Intel Corporation
7 #include "i915_selftest.h"
8 #include "intel_engine_pm.h"
11 #include "gem/selftests/mock_context.h"
12 #include "selftests/igt_flush_test.h"
13 #include "selftests/mock_drm.h"
15 static int request_sync(struct i915_request *rq)
23 timeout = i915_request_wait(rq, 0, HZ / 10);
27 mutex_lock(&rq->timeline->mutex);
28 i915_request_retire_upto(rq);
29 mutex_unlock(&rq->timeline->mutex);
37 static int context_sync(struct intel_context *ce)
39 struct intel_timeline *tl = ce->timeline;
42 mutex_lock(&tl->mutex);
44 struct i915_request *rq;
48 rq = rcu_dereference(tl->last_request.request);
50 rq = i915_request_get_rcu(rq);
55 timeout = i915_request_wait(rq, 0, HZ / 10);
59 i915_request_retire_upto(rq);
63 mutex_unlock(&tl->mutex);
68 static int __live_context_size(struct intel_engine_cs *engine,
69 struct i915_gem_context *fixme)
71 struct intel_context *ce;
72 struct i915_request *rq;
76 ce = intel_context_create(fixme, engine);
80 err = intel_context_pin(ce);
84 vaddr = i915_gem_object_pin_map(ce->state->obj,
85 i915_coherent_map_type(engine->i915));
88 intel_context_unpin(ce);
93 * Note that execlists also applies a redzone which it checks on
94 * context unpin when debugging. We are using the same location
95 * and same poison value so that our checks overlap. Despite the
96 * redundancy, we want to keep this little selftest so that we
97 * get coverage of any and all submission backends, and we can
98 * always extend this test to ensure we trick the HW into a
99 * compromising position wrt to the various sections that need
100 * to be written into the context state.
102 * TLDR; this overlaps with the execlists redzone.
104 if (HAS_EXECLISTS(engine->i915))
105 vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
107 vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
108 memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
110 rq = intel_context_create_request(ce);
111 intel_context_unpin(ce);
117 err = request_sync(rq);
121 /* Force the context switch */
122 rq = i915_request_create(engine->kernel_context);
127 err = request_sync(rq);
131 if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
132 pr_err("%s context overwrote trailing red-zone!", engine->name);
137 i915_gem_object_unpin_map(ce->state->obj);
139 intel_context_put(ce);
143 static int live_context_size(void *arg)
145 struct intel_gt *gt = arg;
146 struct intel_engine_cs *engine;
147 struct i915_gem_context *fixme;
148 enum intel_engine_id id;
152 * Check that our context sizes are correct by seeing if the
153 * HW tries to write past the end of one.
156 mutex_lock(>->i915->drm.struct_mutex);
158 fixme = kernel_context(gt->i915);
160 err = PTR_ERR(fixme);
164 for_each_engine(engine, gt->i915, id) {
166 struct drm_i915_gem_object *state;
170 if (!engine->context_size)
173 intel_engine_pm_get(engine);
176 * Hide the old default state -- we lie about the context size
177 * and get confused when the default state is smaller than
178 * expected. For our do nothing request, inheriting the
179 * active state is sufficient, we are only checking that we
180 * don't use more than we planned.
182 saved.state = fetch_and_zero(&engine->default_state);
183 saved.pinned = fetch_and_zero(&engine->pinned_default_state);
185 /* Overlaps with the execlists redzone */
186 engine->context_size += I915_GTT_PAGE_SIZE;
188 err = __live_context_size(engine, fixme);
190 engine->context_size -= I915_GTT_PAGE_SIZE;
192 engine->pinned_default_state = saved.pinned;
193 engine->default_state = saved.state;
195 intel_engine_pm_put(engine);
201 kernel_context_close(fixme);
203 mutex_unlock(>->i915->drm.struct_mutex);
207 static int __live_active_context(struct intel_engine_cs *engine,
208 struct i915_gem_context *fixme)
210 struct intel_context *ce;
215 * We keep active contexts alive until after a subsequent context
216 * switch as the final write from the context-save will be after
217 * we retire the final request. We track when we unpin the context,
218 * under the presumption that the final pin is from the last request,
219 * and instead of immediately unpinning the context, we add a task
220 * to unpin the context from the next idle-barrier.
222 * This test makes sure that the context is kept alive until a
223 * subsequent idle-barrier (emitted when the engine wakeref hits 0
224 * with no more outstanding requests).
227 if (intel_engine_pm_is_awake(engine)) {
228 pr_err("%s is awake before starting %s!\n",
229 engine->name, __func__);
233 ce = intel_context_create(fixme, engine);
237 for (pass = 0; pass <= 2; pass++) {
238 struct i915_request *rq;
240 rq = intel_context_create_request(ce);
246 err = request_sync(rq);
250 /* Context will be kept active until after an idle-barrier. */
251 if (i915_active_is_idle(&ce->active)) {
252 pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
258 if (!intel_engine_pm_is_awake(engine)) {
259 pr_err("%s is asleep before idle-barrier\n",
266 /* Now make sure our idle-barriers are flushed */
267 err = context_sync(engine->kernel_context);
271 if (!i915_active_is_idle(&ce->active)) {
272 pr_err("context is still active!");
276 if (intel_engine_pm_is_awake(engine)) {
277 struct drm_printer p = drm_debug_printer(__func__);
279 intel_engine_dump(engine, &p,
280 "%s is still awake after idle-barriers\n",
289 intel_context_put(ce);
293 static int live_active_context(void *arg)
295 struct intel_gt *gt = arg;
296 struct intel_engine_cs *engine;
297 struct i915_gem_context *fixme;
298 enum intel_engine_id id;
299 struct drm_file *file;
302 file = mock_file(gt->i915);
304 return PTR_ERR(file);
306 mutex_lock(>->i915->drm.struct_mutex);
308 fixme = live_context(gt->i915, file);
310 err = PTR_ERR(fixme);
314 for_each_engine(engine, gt->i915, id) {
315 err = __live_active_context(engine, fixme);
319 err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
325 mutex_unlock(>->i915->drm.struct_mutex);
326 mock_file_free(gt->i915, file);
330 static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
332 struct i915_request *rq;
335 err = intel_context_pin(remote);
339 rq = intel_context_create_request(ce);
345 err = intel_context_prepare_remote_request(remote, rq);
347 i915_request_add(rq);
351 err = request_sync(rq);
354 intel_context_unpin(remote);
358 static int __live_remote_context(struct intel_engine_cs *engine,
359 struct i915_gem_context *fixme)
361 struct intel_context *local, *remote;
366 * Check that our idle barriers do not interfere with normal
367 * activity tracking. In particular, check that operating
368 * on the context image remotely (intel_context_prepare_remote_request),
369 * which inserts foreign fences into intel_context.active, does not
370 * clobber the idle-barrier.
373 remote = intel_context_create(fixme, engine);
375 return PTR_ERR(remote);
377 local = intel_context_create(fixme, engine);
379 err = PTR_ERR(local);
383 for (pass = 0; pass <= 2; pass++) {
384 err = __remote_sync(local, remote);
388 err = __remote_sync(engine->kernel_context, remote);
392 if (i915_active_is_idle(&remote->active)) {
393 pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
400 intel_context_put(local);
402 intel_context_put(remote);
406 static int live_remote_context(void *arg)
408 struct intel_gt *gt = arg;
409 struct intel_engine_cs *engine;
410 struct i915_gem_context *fixme;
411 enum intel_engine_id id;
412 struct drm_file *file;
415 file = mock_file(gt->i915);
417 return PTR_ERR(file);
419 mutex_lock(>->i915->drm.struct_mutex);
421 fixme = live_context(gt->i915, file);
423 err = PTR_ERR(fixme);
427 for_each_engine(engine, gt->i915, id) {
428 err = __live_remote_context(engine, fixme);
432 err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
438 mutex_unlock(>->i915->drm.struct_mutex);
439 mock_file_free(gt->i915, file);
443 int intel_context_live_selftests(struct drm_i915_private *i915)
445 static const struct i915_subtest tests[] = {
446 SUBTEST(live_context_size),
447 SUBTEST(live_active_context),
448 SUBTEST(live_remote_context),
450 struct intel_gt *gt = &i915->gt;
452 if (intel_gt_is_wedged(gt))
455 return intel_gt_live_subtests(tests, gt);