]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gt/intel_gt_pm.c
d2e80ba64d69a9671e785f4c1725ba1ccf9c0e50
[linux.git] / drivers / gpu / drm / i915 / gt / intel_gt_pm.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "i915_params.h"
9 #include "intel_context.h"
10 #include "intel_engine_pm.h"
11 #include "intel_gt.h"
12 #include "intel_gt_pm.h"
13 #include "intel_gt_requests.h"
14 #include "intel_pm.h"
15 #include "intel_rc6.h"
16 #include "intel_wakeref.h"
17
18 static void pm_notify(struct intel_gt *gt, int state)
19 {
20         blocking_notifier_call_chain(&gt->pm_notifications, state, gt->i915);
21 }
22
23 static int __gt_unpark(struct intel_wakeref *wf)
24 {
25         struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
26         struct drm_i915_private *i915 = gt->i915;
27
28         GEM_TRACE("\n");
29
30         /*
31          * It seems that the DMC likes to transition between the DC states a lot
32          * when there are no connected displays (no active power domains) during
33          * command submission.
34          *
35          * This activity has negative impact on the performance of the chip with
36          * huge latencies observed in the interrupt handler and elsewhere.
37          *
38          * Work around it by grabbing a GT IRQ power domain whilst there is any
39          * GT activity, preventing any DC state transitions.
40          */
41         gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
42         GEM_BUG_ON(!gt->awake);
43
44         intel_enable_gt_powersave(i915);
45
46         i915_update_gfx_val(i915);
47         if (INTEL_GEN(i915) >= 6)
48                 gen6_rps_busy(i915);
49
50         i915_pmu_gt_unparked(i915);
51
52         intel_gt_queue_hangcheck(gt);
53         intel_gt_unpark_requests(gt);
54
55         pm_notify(gt, INTEL_GT_UNPARK);
56
57         return 0;
58 }
59
60 static int __gt_park(struct intel_wakeref *wf)
61 {
62         struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
63         intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
64         struct drm_i915_private *i915 = gt->i915;
65
66         GEM_TRACE("\n");
67
68         pm_notify(gt, INTEL_GT_PARK);
69         intel_gt_park_requests(gt);
70
71         i915_pmu_gt_parked(i915);
72         if (INTEL_GEN(i915) >= 6)
73                 gen6_rps_idle(i915);
74
75         /* Everything switched off, flush any residual interrupt just in case */
76         intel_synchronize_irq(i915);
77
78         GEM_BUG_ON(!wakeref);
79         intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
80
81         return 0;
82 }
83
84 static const struct intel_wakeref_ops wf_ops = {
85         .get = __gt_unpark,
86         .put = __gt_park,
87         .flags = INTEL_WAKEREF_PUT_ASYNC,
88 };
89
90 void intel_gt_pm_init_early(struct intel_gt *gt)
91 {
92         intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
93
94         BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
95 }
96
97 void intel_gt_pm_init(struct intel_gt *gt)
98 {
99         /*
100          * Enabling power-management should be "self-healing". If we cannot
101          * enable a feature, simply leave it disabled with a notice to the
102          * user.
103          */
104         intel_rc6_init(&gt->rc6);
105 }
106
107 static bool reset_engines(struct intel_gt *gt)
108 {
109         if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
110                 return false;
111
112         return __intel_gt_reset(gt, ALL_ENGINES) == 0;
113 }
114
115 /**
116  * intel_gt_sanitize: called after the GPU has lost power
117  * @gt: the i915 GT container
118  * @force: ignore a failed reset and sanitize engine state anyway
119  *
120  * Anytime we reset the GPU, either with an explicit GPU reset or through a
121  * PCI power cycle, the GPU loses state and we must reset our state tracking
122  * to match. Note that calling intel_gt_sanitize() if the GPU has not
123  * been reset results in much confusion!
124  */
125 void intel_gt_sanitize(struct intel_gt *gt, bool force)
126 {
127         struct intel_engine_cs *engine;
128         enum intel_engine_id id;
129
130         GEM_TRACE("\n");
131
132         intel_uc_sanitize(&gt->uc);
133
134         if (!reset_engines(gt) && !force)
135                 return;
136
137         for_each_engine(engine, gt->i915, id)
138                 __intel_engine_reset(engine, false);
139 }
140
141 void intel_gt_pm_disable(struct intel_gt *gt)
142 {
143         if (!is_mock_gt(gt))
144                 intel_sanitize_gt_powersave(gt->i915);
145 }
146
147 void intel_gt_pm_fini(struct intel_gt *gt)
148 {
149         intel_rc6_fini(&gt->rc6);
150 }
151
152 int intel_gt_resume(struct intel_gt *gt)
153 {
154         struct intel_engine_cs *engine;
155         enum intel_engine_id id;
156         int err = 0;
157
158         /*
159          * After resume, we may need to poke into the pinned kernel
160          * contexts to paper over any damage caused by the sudden suspend.
161          * Only the kernel contexts should remain pinned over suspend,
162          * allowing us to fixup the user contexts on their first pin.
163          */
164         intel_gt_pm_get(gt);
165         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
166         intel_rc6_sanitize(&gt->rc6);
167
168         for_each_engine(engine, gt->i915, id) {
169                 struct intel_context *ce;
170
171                 intel_engine_pm_get(engine);
172
173                 ce = engine->kernel_context;
174                 if (ce) {
175                         GEM_BUG_ON(!intel_context_is_pinned(ce));
176                         mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
177                         ce->ops->reset(ce);
178                         mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
179                 }
180
181                 engine->serial++; /* kernel context lost */
182                 err = engine->resume(engine);
183
184                 intel_engine_pm_put(engine);
185                 if (err) {
186                         dev_err(gt->i915->drm.dev,
187                                 "Failed to restart %s (%d)\n",
188                                 engine->name, err);
189                         break;
190                 }
191         }
192
193         intel_rc6_enable(&gt->rc6);
194         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
195         intel_gt_pm_put(gt);
196
197         return err;
198 }
199
200 static void wait_for_idle(struct intel_gt *gt)
201 {
202         if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
203                 /*
204                  * Forcibly cancel outstanding work and leave
205                  * the gpu quiet.
206                  */
207                 intel_gt_set_wedged(gt);
208         }
209
210         intel_gt_pm_wait_for_idle(gt);
211 }
212
213 void intel_gt_suspend(struct intel_gt *gt)
214 {
215         intel_wakeref_t wakeref;
216
217         /* We expect to be idle already; but also want to be independent */
218         wait_for_idle(gt);
219
220         with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
221                 intel_rc6_disable(&gt->rc6);
222 }
223
224 void intel_gt_runtime_suspend(struct intel_gt *gt)
225 {
226         intel_uc_runtime_suspend(&gt->uc);
227 }
228
229 int intel_gt_runtime_resume(struct intel_gt *gt)
230 {
231         intel_gt_init_swizzling(gt);
232
233         return intel_uc_runtime_resume(&gt->uc);
234 }
235
236 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
237 #include "selftest_gt_pm.c"
238 #endif