2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/sort.h>
11 #include "intel_gt_requests.h"
12 #include "i915_selftest.h"
14 static int timeline_sync(struct intel_timeline *tl)
16 struct dma_fence *fence;
19 fence = i915_active_fence_get(&tl->last_request);
23 timeout = dma_fence_wait_timeout(fence, true, HZ / 2);
31 static int engine_sync_barrier(struct intel_engine_cs *engine)
33 return timeline_sync(engine->kernel_context->timeline);
37 struct i915_active active;
41 static int pulse_active(struct i915_active *active)
43 kref_get(&container_of(active, struct pulse, active)->kref);
47 static void pulse_free(struct kref *kref)
49 kfree(container_of(kref, struct pulse, kref));
52 static void pulse_put(struct pulse *p)
54 kref_put(&p->kref, pulse_free);
57 static void pulse_retire(struct i915_active *active)
59 pulse_put(container_of(active, struct pulse, active));
62 static struct pulse *pulse_create(void)
66 p = kmalloc(sizeof(*p), GFP_KERNEL);
71 i915_active_init(&p->active, pulse_active, pulse_retire);
76 static void pulse_unlock_wait(struct pulse *p)
78 i915_active_unlock_wait(&p->active);
81 static int __live_idle_pulse(struct intel_engine_cs *engine,
82 int (*fn)(struct intel_engine_cs *cs))
87 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
93 err = i915_active_acquire(&p->active);
97 err = i915_active_acquire_preallocate_barrier(&p->active, engine);
99 i915_active_release(&p->active);
103 i915_active_acquire_barrier(&p->active);
104 i915_active_release(&p->active);
106 GEM_BUG_ON(i915_active_is_idle(&p->active));
107 GEM_BUG_ON(llist_empty(&engine->barrier_tasks));
113 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
115 if (engine_sync_barrier(engine)) {
116 struct drm_printer m = drm_err_printer("pulse");
118 pr_err("%s: no heartbeat pulse?\n", engine->name);
119 intel_engine_dump(engine, &m, "%s", engine->name);
125 GEM_BUG_ON(READ_ONCE(engine->serial) != engine->wakeref_serial);
127 pulse_unlock_wait(p); /* synchronize with the retirement callback */
129 if (!i915_active_is_idle(&p->active)) {
130 struct drm_printer m = drm_err_printer("pulse");
132 pr_err("%s: heartbeat pulse did not flush idle tasks\n",
134 i915_active_print(&p->active, &m);
145 static int live_idle_flush(void *arg)
147 struct intel_gt *gt = arg;
148 struct intel_engine_cs *engine;
149 enum intel_engine_id id;
152 /* Check that we can flush the idle barriers */
154 for_each_engine(engine, gt, id) {
155 intel_engine_pm_get(engine);
156 err = __live_idle_pulse(engine, intel_engine_flush_barriers);
157 intel_engine_pm_put(engine);
165 static int live_idle_pulse(void *arg)
167 struct intel_gt *gt = arg;
168 struct intel_engine_cs *engine;
169 enum intel_engine_id id;
172 /* Check that heartbeat pulses flush the idle barriers */
174 for_each_engine(engine, gt, id) {
175 intel_engine_pm_get(engine);
176 err = __live_idle_pulse(engine, intel_engine_pulse);
177 intel_engine_pm_put(engine);
178 if (err && err != -ENODEV)
187 static int cmp_u32(const void *_a, const void *_b)
189 const u32 *a = _a, *b = _b;
194 static int __live_heartbeat_fast(struct intel_engine_cs *engine)
196 struct intel_context *ce;
197 struct i915_request *rq;
203 ce = intel_context_create(engine);
207 intel_engine_pm_get(engine);
209 err = intel_engine_set_heartbeat(engine, 1);
213 for (i = 0; i < ARRAY_SIZE(times); i++) {
214 /* Manufacture a tick */
216 while (READ_ONCE(engine->heartbeat.systole))
217 flush_delayed_work(&engine->heartbeat.work);
219 engine->serial++; /* quick, pretend we are not idle! */
220 flush_delayed_work(&engine->heartbeat.work);
221 if (!delayed_work_pending(&engine->heartbeat.work)) {
222 pr_err("%s: heartbeat did not start\n",
229 rq = READ_ONCE(engine->heartbeat.systole);
231 rq = i915_request_get_rcu(rq);
236 while (rq == READ_ONCE(engine->heartbeat.systole))
237 yield(); /* work is on the local cpu! */
240 i915_request_put(rq);
241 times[i] = ktime_us_delta(t1, t0);
244 sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL);
246 pr_info("%s: Heartbeat delay: %uus [%u, %u]\n",
248 times[ARRAY_SIZE(times) / 2],
250 times[ARRAY_SIZE(times) - 1]);
252 /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */
253 if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) {
254 pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
256 times[ARRAY_SIZE(times) / 2],
257 jiffies_to_usecs(6));
261 intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
263 intel_engine_pm_put(engine);
264 intel_context_put(ce);
268 static int live_heartbeat_fast(void *arg)
270 struct intel_gt *gt = arg;
271 struct intel_engine_cs *engine;
272 enum intel_engine_id id;
275 /* Check that the heartbeat ticks at the desired rate. */
276 if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
279 for_each_engine(engine, gt, id) {
280 err = __live_heartbeat_fast(engine);
288 static int __live_heartbeat_off(struct intel_engine_cs *engine)
292 intel_engine_pm_get(engine);
295 flush_delayed_work(&engine->heartbeat.work);
296 if (!delayed_work_pending(&engine->heartbeat.work)) {
297 pr_err("%s: heartbeat not running\n",
303 err = intel_engine_set_heartbeat(engine, 0);
308 flush_delayed_work(&engine->heartbeat.work);
309 if (delayed_work_pending(&engine->heartbeat.work)) {
310 pr_err("%s: heartbeat still running\n",
316 if (READ_ONCE(engine->heartbeat.systole)) {
317 pr_err("%s: heartbeat still allocated\n",
324 intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
326 intel_engine_pm_put(engine);
330 static int live_heartbeat_off(void *arg)
332 struct intel_gt *gt = arg;
333 struct intel_engine_cs *engine;
334 enum intel_engine_id id;
337 /* Check that we can turn off heartbeat and not interrupt VIP */
338 if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
341 for_each_engine(engine, gt, id) {
342 if (!intel_engine_has_preemption(engine))
345 err = __live_heartbeat_off(engine);
353 int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
355 static const struct i915_subtest tests[] = {
356 SUBTEST(live_idle_flush),
357 SUBTEST(live_idle_pulse),
358 SUBTEST(live_heartbeat_fast),
359 SUBTEST(live_heartbeat_off),
364 if (intel_gt_is_wedged(&i915->gt))
367 saved_hangcheck = i915_modparams.enable_hangcheck;
368 i915_modparams.enable_hangcheck = INT_MAX;
370 err = intel_gt_live_subtests(tests, &i915->gt);
372 i915_modparams.enable_hangcheck = saved_hangcheck;