2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
9 #include "intel_engine.h"
10 #include "intel_engine_pm.h"
11 #include "intel_engine_pool.h"
13 #include "intel_gt_pm.h"
15 static int __engine_unpark(struct intel_wakeref *wf)
17 struct intel_engine_cs *engine =
18 container_of(wf, typeof(*engine), wakeref);
21 GEM_TRACE("%s\n", engine->name);
23 intel_gt_pm_get(engine->gt);
25 /* Pin the default state for fast resets from atomic context. */
27 if (engine->default_state)
28 map = i915_gem_object_pin_map(engine->default_state,
30 if (!IS_ERR_OR_NULL(map))
31 engine->pinned_default_state = map;
34 engine->unpark(engine);
36 intel_engine_init_hangcheck(engine);
40 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
42 struct i915_request *rq;
44 /* Already inside the kernel context, safe to power down. */
45 if (engine->wakeref_serial == engine->serial)
48 /* GPU is pointing to the void, as good as in the kernel context. */
49 if (intel_gt_is_wedged(engine->gt))
53 * Note, we do this without taking the timeline->mutex. We cannot
54 * as we may be called while retiring the kernel context and so
55 * already underneath the timeline->mutex. Instead we rely on the
56 * exclusive property of the __engine_park that prevents anyone
57 * else from creating a request on this engine. This also requires
58 * that the ring is empty and we avoid any waits while constructing
59 * the context, as they assume protection by the timeline->mutex.
60 * This should hold true as we can only park the engine after
61 * retiring the last request, thus all rings should be empty and
64 rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
66 /* Context switch failed, hope for the best! Maybe reset? */
69 /* Check again on the next retirement. */
70 engine->wakeref_serial = engine->serial + 1;
72 i915_request_add_active_barriers(rq);
73 __i915_request_commit(rq);
78 static int __engine_park(struct intel_wakeref *wf)
80 struct intel_engine_cs *engine =
81 container_of(wf, typeof(*engine), wakeref);
83 engine->saturated = 0;
86 * If one and only one request is completed between pm events,
87 * we know that we are inside the kernel context and it is
88 * safe to power down. (We are paranoid in case that runtime
89 * suspend causes corruption to the active context image, and
90 * want to avoid that impacting userspace.)
92 if (!switch_to_kernel_context(engine))
95 GEM_TRACE("%s\n", engine->name);
97 intel_engine_disarm_breadcrumbs(engine);
98 intel_engine_pool_park(&engine->pool);
100 /* Must be reset upon idling, or we may miss the busy wakeup. */
101 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
104 engine->park(engine);
106 if (engine->pinned_default_state) {
107 i915_gem_object_unpin_map(engine->default_state);
108 engine->pinned_default_state = NULL;
111 engine->execlists.no_priolist = false;
113 intel_gt_pm_put(engine->gt);
117 static const struct intel_wakeref_ops wf_ops = {
118 .get = __engine_unpark,
119 .put = __engine_park,
122 void intel_engine_init__pm(struct intel_engine_cs *engine)
124 struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
126 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
129 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
130 #include "selftest_engine_pm.c"