2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_gt_pm.h"
11 #include "i915_globals.h"
13 static void call_idle_barriers(struct intel_engine_cs *engine)
15 struct llist_node *node, *next;
17 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
18 struct i915_active_request *active =
19 container_of((struct list_head *)node,
20 typeof(*active), link);
22 INIT_LIST_HEAD(&active->link);
23 RCU_INIT_POINTER(active->request, NULL);
25 active->retire(active, NULL);
29 static void i915_gem_park(struct drm_i915_private *i915)
31 struct intel_engine_cs *engine;
32 enum intel_engine_id id;
34 lockdep_assert_held(&i915->drm.struct_mutex);
36 for_each_engine(engine, i915, id) {
37 call_idle_barriers(engine); /* cleanup after wedging */
38 i915_gem_batch_pool_fini(&engine->batch_pool);
41 intel_timelines_park(i915);
42 i915_vma_parked(i915);
47 static void idle_work_handler(struct work_struct *work)
49 struct drm_i915_private *i915 =
50 container_of(work, typeof(*i915), gem.idle_work);
53 cancel_delayed_work_sync(&i915->gem.retire_work);
54 mutex_lock(&i915->drm.struct_mutex);
56 intel_wakeref_lock(&i915->gt.wakeref);
57 park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
58 intel_wakeref_unlock(&i915->gt.wakeref);
62 queue_delayed_work(i915->wq,
63 &i915->gem.retire_work,
64 round_jiffies_up_relative(HZ));
66 mutex_unlock(&i915->drm.struct_mutex);
69 static void retire_work_handler(struct work_struct *work)
71 struct drm_i915_private *i915 =
72 container_of(work, typeof(*i915), gem.retire_work.work);
74 /* Come back later if the device is busy... */
75 if (mutex_trylock(&i915->drm.struct_mutex)) {
76 i915_retire_requests(i915);
77 mutex_unlock(&i915->drm.struct_mutex);
80 queue_delayed_work(i915->wq,
81 &i915->gem.retire_work,
82 round_jiffies_up_relative(HZ));
85 static int pm_notifier(struct notifier_block *nb,
89 struct drm_i915_private *i915 =
90 container_of(nb, typeof(*i915), gem.pm_notifier);
94 i915_globals_unpark();
95 queue_delayed_work(i915->wq,
96 &i915->gem.retire_work,
97 round_jiffies_up_relative(HZ));
101 queue_work(i915->wq, &i915->gem.idle_work);
108 static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
110 bool result = !i915_terminally_wedged(i915);
113 if (i915_gem_wait_for_idle(i915,
115 I915_WAIT_FOR_IDLE_BOOST,
116 I915_GEM_IDLE_TIMEOUT) == -ETIME) {
117 /* XXX hide warning from gem_eio */
118 if (i915_modparams.reset) {
119 dev_err(i915->drm.dev,
120 "Failed to idle engines, declaring wedged!\n");
125 * Forcibly cancel outstanding work and leave
128 i915_gem_set_wedged(i915);
131 } while (i915_retire_requests(i915) && result);
133 GEM_BUG_ON(i915->gt.awake);
137 bool i915_gem_load_power_context(struct drm_i915_private *i915)
139 return switch_to_kernel_context_sync(i915);
142 void i915_gem_suspend(struct drm_i915_private *i915)
146 intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
147 flush_workqueue(i915->wq);
149 mutex_lock(&i915->drm.struct_mutex);
152 * We have to flush all the executing contexts to main memory so
153 * that they can saved in the hibernation image. To ensure the last
154 * context image is coherent, we have to switch away from it. That
155 * leaves the i915->kernel_context still active when
156 * we actually suspend, and its image in memory may not match the GPU
157 * state. Fortunately, the kernel_context is disposable and we do
158 * not rely on its state.
160 switch_to_kernel_context_sync(i915);
162 mutex_unlock(&i915->drm.struct_mutex);
165 * Assert that we successfully flushed all the work and
166 * reset the GPU back to its idle, low power state.
168 GEM_BUG_ON(i915->gt.awake);
169 flush_work(&i915->gem.idle_work);
171 cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
173 i915_gem_drain_freed_objects(i915);
175 intel_uc_suspend(i915);
178 static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
180 return list_first_entry_or_null(list,
181 struct drm_i915_gem_object,
185 void i915_gem_suspend_late(struct drm_i915_private *i915)
187 struct drm_i915_gem_object *obj;
188 struct list_head *phases[] = {
189 &i915->mm.shrink_list,
190 &i915->mm.purge_list,
196 * Neither the BIOS, ourselves or any other kernel
197 * expects the system to be in execlists mode on startup,
198 * so we need to reset the GPU back to legacy mode. And the only
199 * known way to disable logical contexts is through a GPU reset.
201 * So in order to leave the system in a known default configuration,
202 * always reset the GPU upon unload and suspend. Afterwards we then
203 * clean up the GEM state tracking, flushing off the requests and
204 * leaving the system in a known idle state.
206 * Note that is of the upmost importance that the GPU is idle and
207 * all stray writes are flushed *before* we dismantle the backing
208 * storage for the pinned objects.
210 * However, since we are uncertain that resetting the GPU on older
211 * machines is a good idea, we don't - just in case it leaves the
212 * machine in an unusable condition.
215 spin_lock_irqsave(&i915->mm.obj_lock, flags);
216 for (phase = phases; *phase; phase++) {
219 while ((obj = first_mm_object(*phase))) {
220 list_move_tail(&obj->mm.link, &keep);
222 /* Beware the background _i915_gem_free_objects */
223 if (!kref_get_unless_zero(&obj->base.refcount))
226 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
228 i915_gem_object_lock(obj);
229 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
230 i915_gem_object_unlock(obj);
231 i915_gem_object_put(obj);
233 spin_lock_irqsave(&i915->mm.obj_lock, flags);
236 list_splice_tail(&keep, *phase);
238 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
240 intel_uc_sanitize(i915);
241 i915_gem_sanitize(i915);
244 void i915_gem_resume(struct drm_i915_private *i915)
248 WARN_ON(i915->gt.awake);
250 mutex_lock(&i915->drm.struct_mutex);
251 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
253 i915_gem_restore_gtt_mappings(i915);
254 i915_gem_restore_fences(i915);
257 * As we didn't flush the kernel context before suspend, we cannot
258 * guarantee that the context image is complete. So let's just reset
259 * it and start again.
261 intel_gt_resume(i915);
263 if (i915_gem_init_hw(i915))
266 intel_uc_resume(i915);
268 /* Always reload a context for powersaving. */
269 if (!i915_gem_load_power_context(i915))
273 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
274 mutex_unlock(&i915->drm.struct_mutex);
278 if (!i915_reset_failed(i915)) {
279 dev_err(i915->drm.dev,
280 "Failed to re-initialize GPU, declaring it wedged!\n");
281 i915_gem_set_wedged(i915);
286 void i915_gem_init__pm(struct drm_i915_private *i915)
288 INIT_WORK(&i915->gem.idle_work, idle_work_handler);
289 INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
291 i915->gem.pm_notifier.notifier_call = pm_notifier;
292 blocking_notifier_chain_register(&i915->gt.pm_notifications,
293 &i915->gem.pm_notifier);