]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/intel_runtime_pm.c
drm/i915/cnl: Implement CNL display init/unit sequence
[linux.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53                                     int power_well_id);
54
55 static struct i915_power_well *
56 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
57
58 const char *
59 intel_display_power_domain_str(enum intel_display_power_domain domain)
60 {
61         switch (domain) {
62         case POWER_DOMAIN_PIPE_A:
63                 return "PIPE_A";
64         case POWER_DOMAIN_PIPE_B:
65                 return "PIPE_B";
66         case POWER_DOMAIN_PIPE_C:
67                 return "PIPE_C";
68         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
69                 return "PIPE_A_PANEL_FITTER";
70         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
71                 return "PIPE_B_PANEL_FITTER";
72         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
73                 return "PIPE_C_PANEL_FITTER";
74         case POWER_DOMAIN_TRANSCODER_A:
75                 return "TRANSCODER_A";
76         case POWER_DOMAIN_TRANSCODER_B:
77                 return "TRANSCODER_B";
78         case POWER_DOMAIN_TRANSCODER_C:
79                 return "TRANSCODER_C";
80         case POWER_DOMAIN_TRANSCODER_EDP:
81                 return "TRANSCODER_EDP";
82         case POWER_DOMAIN_TRANSCODER_DSI_A:
83                 return "TRANSCODER_DSI_A";
84         case POWER_DOMAIN_TRANSCODER_DSI_C:
85                 return "TRANSCODER_DSI_C";
86         case POWER_DOMAIN_PORT_DDI_A_LANES:
87                 return "PORT_DDI_A_LANES";
88         case POWER_DOMAIN_PORT_DDI_B_LANES:
89                 return "PORT_DDI_B_LANES";
90         case POWER_DOMAIN_PORT_DDI_C_LANES:
91                 return "PORT_DDI_C_LANES";
92         case POWER_DOMAIN_PORT_DDI_D_LANES:
93                 return "PORT_DDI_D_LANES";
94         case POWER_DOMAIN_PORT_DDI_E_LANES:
95                 return "PORT_DDI_E_LANES";
96         case POWER_DOMAIN_PORT_DDI_A_IO:
97                 return "PORT_DDI_A_IO";
98         case POWER_DOMAIN_PORT_DDI_B_IO:
99                 return "PORT_DDI_B_IO";
100         case POWER_DOMAIN_PORT_DDI_C_IO:
101                 return "PORT_DDI_C_IO";
102         case POWER_DOMAIN_PORT_DDI_D_IO:
103                 return "PORT_DDI_D_IO";
104         case POWER_DOMAIN_PORT_DDI_E_IO:
105                 return "PORT_DDI_E_IO";
106         case POWER_DOMAIN_PORT_DSI:
107                 return "PORT_DSI";
108         case POWER_DOMAIN_PORT_CRT:
109                 return "PORT_CRT";
110         case POWER_DOMAIN_PORT_OTHER:
111                 return "PORT_OTHER";
112         case POWER_DOMAIN_VGA:
113                 return "VGA";
114         case POWER_DOMAIN_AUDIO:
115                 return "AUDIO";
116         case POWER_DOMAIN_PLLS:
117                 return "PLLS";
118         case POWER_DOMAIN_AUX_A:
119                 return "AUX_A";
120         case POWER_DOMAIN_AUX_B:
121                 return "AUX_B";
122         case POWER_DOMAIN_AUX_C:
123                 return "AUX_C";
124         case POWER_DOMAIN_AUX_D:
125                 return "AUX_D";
126         case POWER_DOMAIN_GMBUS:
127                 return "GMBUS";
128         case POWER_DOMAIN_INIT:
129                 return "INIT";
130         case POWER_DOMAIN_MODESET:
131                 return "MODESET";
132         default:
133                 MISSING_CASE(domain);
134                 return "?";
135         }
136 }
137
138 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
139                                     struct i915_power_well *power_well)
140 {
141         DRM_DEBUG_KMS("enabling %s\n", power_well->name);
142         power_well->ops->enable(dev_priv, power_well);
143         power_well->hw_enabled = true;
144 }
145
146 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
147                                      struct i915_power_well *power_well)
148 {
149         DRM_DEBUG_KMS("disabling %s\n", power_well->name);
150         power_well->hw_enabled = false;
151         power_well->ops->disable(dev_priv, power_well);
152 }
153
154 static void intel_power_well_get(struct drm_i915_private *dev_priv,
155                                  struct i915_power_well *power_well)
156 {
157         if (!power_well->count++)
158                 intel_power_well_enable(dev_priv, power_well);
159 }
160
161 static void intel_power_well_put(struct drm_i915_private *dev_priv,
162                                  struct i915_power_well *power_well)
163 {
164         WARN(!power_well->count, "Use count on power well %s is already zero",
165              power_well->name);
166
167         if (!--power_well->count)
168                 intel_power_well_disable(dev_priv, power_well);
169 }
170
171 /*
172  * We should only use the power well if we explicitly asked the hardware to
173  * enable it, so check if it's enabled and also check if we've requested it to
174  * be enabled.
175  */
176 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
177                                    struct i915_power_well *power_well)
178 {
179         return I915_READ(HSW_PWR_WELL_DRIVER) ==
180                      (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
181 }
182
183 /**
184  * __intel_display_power_is_enabled - unlocked check for a power domain
185  * @dev_priv: i915 device instance
186  * @domain: power domain to check
187  *
188  * This is the unlocked version of intel_display_power_is_enabled() and should
189  * only be used from error capture and recovery code where deadlocks are
190  * possible.
191  *
192  * Returns:
193  * True when the power domain is enabled, false otherwise.
194  */
195 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
196                                       enum intel_display_power_domain domain)
197 {
198         struct i915_power_well *power_well;
199         bool is_enabled;
200
201         if (dev_priv->pm.suspended)
202                 return false;
203
204         is_enabled = true;
205
206         for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
207                 if (power_well->always_on)
208                         continue;
209
210                 if (!power_well->hw_enabled) {
211                         is_enabled = false;
212                         break;
213                 }
214         }
215
216         return is_enabled;
217 }
218
219 /**
220  * intel_display_power_is_enabled - check for a power domain
221  * @dev_priv: i915 device instance
222  * @domain: power domain to check
223  *
224  * This function can be used to check the hw power domain state. It is mostly
225  * used in hardware state readout functions. Everywhere else code should rely
226  * upon explicit power domain reference counting to ensure that the hardware
227  * block is powered up before accessing it.
228  *
229  * Callers must hold the relevant modesetting locks to ensure that concurrent
230  * threads can't disable the power well while the caller tries to read a few
231  * registers.
232  *
233  * Returns:
234  * True when the power domain is enabled, false otherwise.
235  */
236 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
237                                     enum intel_display_power_domain domain)
238 {
239         struct i915_power_domains *power_domains;
240         bool ret;
241
242         power_domains = &dev_priv->power_domains;
243
244         mutex_lock(&power_domains->lock);
245         ret = __intel_display_power_is_enabled(dev_priv, domain);
246         mutex_unlock(&power_domains->lock);
247
248         return ret;
249 }
250
251 /**
252  * intel_display_set_init_power - set the initial power domain state
253  * @dev_priv: i915 device instance
254  * @enable: whether to enable or disable the initial power domain state
255  *
256  * For simplicity our driver load/unload and system suspend/resume code assumes
257  * that all power domains are always enabled. This functions controls the state
258  * of this little hack. While the initial power domain state is enabled runtime
259  * pm is effectively disabled.
260  */
261 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
262                                   bool enable)
263 {
264         if (dev_priv->power_domains.init_power_on == enable)
265                 return;
266
267         if (enable)
268                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
269         else
270                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
271
272         dev_priv->power_domains.init_power_on = enable;
273 }
274
275 /*
276  * Starting with Haswell, we have a "Power Down Well" that can be turned off
277  * when not needed anymore. We have 4 registers that can request the power well
278  * to be enabled, and it will only be disabled if none of the registers is
279  * requesting it to be enabled.
280  */
281 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
282 {
283         struct pci_dev *pdev = dev_priv->drm.pdev;
284
285         /*
286          * After we re-enable the power well, if we touch VGA register 0x3d5
287          * we'll get unclaimed register interrupts. This stops after we write
288          * anything to the VGA MSR register. The vgacon module uses this
289          * register all the time, so if we unbind our driver and, as a
290          * consequence, bind vgacon, we'll get stuck in an infinite loop at
291          * console_unlock(). So make here we touch the VGA MSR register, making
292          * sure vgacon can keep working normally without triggering interrupts
293          * and error messages.
294          */
295         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
296         outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
297         vga_put(pdev, VGA_RSRC_LEGACY_IO);
298
299         if (IS_BROADWELL(dev_priv))
300                 gen8_irq_power_well_post_enable(dev_priv,
301                                                 1 << PIPE_C | 1 << PIPE_B);
302 }
303
304 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
305 {
306         if (IS_BROADWELL(dev_priv))
307                 gen8_irq_power_well_pre_disable(dev_priv,
308                                                 1 << PIPE_C | 1 << PIPE_B);
309 }
310
311 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
312                                        struct i915_power_well *power_well)
313 {
314         struct pci_dev *pdev = dev_priv->drm.pdev;
315
316         /*
317          * After we re-enable the power well, if we touch VGA register 0x3d5
318          * we'll get unclaimed register interrupts. This stops after we write
319          * anything to the VGA MSR register. The vgacon module uses this
320          * register all the time, so if we unbind our driver and, as a
321          * consequence, bind vgacon, we'll get stuck in an infinite loop at
322          * console_unlock(). So make here we touch the VGA MSR register, making
323          * sure vgacon can keep working normally without triggering interrupts
324          * and error messages.
325          */
326         if (power_well->id == SKL_DISP_PW_2) {
327                 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
328                 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
329                 vga_put(pdev, VGA_RSRC_LEGACY_IO);
330
331                 gen8_irq_power_well_post_enable(dev_priv,
332                                                 1 << PIPE_C | 1 << PIPE_B);
333         }
334 }
335
336 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
337                                        struct i915_power_well *power_well)
338 {
339         if (power_well->id == SKL_DISP_PW_2)
340                 gen8_irq_power_well_pre_disable(dev_priv,
341                                                 1 << PIPE_C | 1 << PIPE_B);
342 }
343
344 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
345                                struct i915_power_well *power_well, bool enable)
346 {
347         bool is_enabled, enable_requested;
348         uint32_t tmp;
349
350         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
351         is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
352         enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
353
354         if (enable) {
355                 if (!enable_requested)
356                         I915_WRITE(HSW_PWR_WELL_DRIVER,
357                                    HSW_PWR_WELL_ENABLE_REQUEST);
358
359                 if (!is_enabled) {
360                         DRM_DEBUG_KMS("Enabling power well\n");
361                         if (intel_wait_for_register(dev_priv,
362                                                     HSW_PWR_WELL_DRIVER,
363                                                     HSW_PWR_WELL_STATE_ENABLED,
364                                                     HSW_PWR_WELL_STATE_ENABLED,
365                                                     20))
366                                 DRM_ERROR("Timeout enabling power well\n");
367                         hsw_power_well_post_enable(dev_priv);
368                 }
369
370         } else {
371                 if (enable_requested) {
372                         hsw_power_well_pre_disable(dev_priv);
373                         I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
374                         POSTING_READ(HSW_PWR_WELL_DRIVER);
375                         DRM_DEBUG_KMS("Requesting to disable the power well\n");
376                 }
377         }
378 }
379
380 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
381         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
382         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
383         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
384         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
385         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
386         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
387         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
388         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
389         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
390         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
391         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
392         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
393         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
394         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
395         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
396         BIT_ULL(POWER_DOMAIN_VGA) |                             \
397         BIT_ULL(POWER_DOMAIN_INIT))
398 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (          \
399         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
400         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
401         BIT_ULL(POWER_DOMAIN_INIT))
402 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
403         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
404         BIT_ULL(POWER_DOMAIN_INIT))
405 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
406         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
407         BIT_ULL(POWER_DOMAIN_INIT))
408 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (            \
409         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
410         BIT_ULL(POWER_DOMAIN_INIT))
411 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
412         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
413         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
414         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
415         BIT_ULL(POWER_DOMAIN_INIT))
416
417 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
418         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
419         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
420         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
421         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
422         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
423         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
424         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
425         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
426         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
427         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
428         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
429         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
430         BIT_ULL(POWER_DOMAIN_VGA) |                             \
431         BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
432         BIT_ULL(POWER_DOMAIN_INIT))
433 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (              \
434         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
435         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
436         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
437         BIT_ULL(POWER_DOMAIN_INIT))
438 #define BXT_DPIO_CMN_A_POWER_DOMAINS (                  \
439         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
440         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
441         BIT_ULL(POWER_DOMAIN_INIT))
442 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (                 \
443         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
444         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
445         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
446         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
447         BIT_ULL(POWER_DOMAIN_INIT))
448
449 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
450         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
451         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
452         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
453         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
454         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
455         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
456         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
457         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
458         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
459         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
460         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
461         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
462         BIT_ULL(POWER_DOMAIN_VGA) |                             \
463         BIT_ULL(POWER_DOMAIN_INIT))
464 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (            \
465         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
466 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
467         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
468 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
469         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
470 #define GLK_DPIO_CMN_A_POWER_DOMAINS (                  \
471         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
472         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
473         BIT_ULL(POWER_DOMAIN_INIT))
474 #define GLK_DPIO_CMN_B_POWER_DOMAINS (                  \
475         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
476         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
477         BIT_ULL(POWER_DOMAIN_INIT))
478 #define GLK_DPIO_CMN_C_POWER_DOMAINS (                  \
479         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
480         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
481         BIT_ULL(POWER_DOMAIN_INIT))
482 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (               \
483         BIT_ULL(POWER_DOMAIN_AUX_A) |           \
484         BIT_ULL(POWER_DOMAIN_INIT))
485 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (               \
486         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
487         BIT_ULL(POWER_DOMAIN_INIT))
488 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (               \
489         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
490         BIT_ULL(POWER_DOMAIN_INIT))
491 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (              \
492         GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
493         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
494         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
495         BIT_ULL(POWER_DOMAIN_INIT))
496
497 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
498         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
499         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
500         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
501         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
502         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
503         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
504         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
505         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
506         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
507         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
508         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
509         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
510         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
511         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
512         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
513         BIT_ULL(POWER_DOMAIN_VGA) |                             \
514         BIT_ULL(POWER_DOMAIN_INIT))
515 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (            \
516         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
517         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
518         BIT_ULL(POWER_DOMAIN_INIT))
519 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (            \
520         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
521         BIT_ULL(POWER_DOMAIN_INIT))
522 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (            \
523         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
524         BIT_ULL(POWER_DOMAIN_INIT))
525 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (            \
526         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
527         BIT_ULL(POWER_DOMAIN_INIT))
528 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (               \
529         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
530         BIT_ULL(POWER_DOMAIN_INIT))
531 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (               \
532         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
533         BIT_ULL(POWER_DOMAIN_INIT))
534 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (               \
535         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
536         BIT_ULL(POWER_DOMAIN_INIT))
537 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (               \
538         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
539         BIT_ULL(POWER_DOMAIN_INIT))
540 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
541         CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
542         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
543         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
544         BIT_ULL(POWER_DOMAIN_INIT))
545
546 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
547 {
548         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
549                   "DC9 already programmed to be enabled.\n");
550         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
551                   "DC5 still not disabled to enable DC9.\n");
552         WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
553         WARN_ONCE(intel_irqs_enabled(dev_priv),
554                   "Interrupts not disabled yet.\n");
555
556          /*
557           * TODO: check for the following to verify the conditions to enter DC9
558           * state are satisfied:
559           * 1] Check relevant display engine registers to verify if mode set
560           * disable sequence was followed.
561           * 2] Check if display uninitialize sequence is initialized.
562           */
563 }
564
565 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
566 {
567         WARN_ONCE(intel_irqs_enabled(dev_priv),
568                   "Interrupts not disabled yet.\n");
569         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
570                   "DC5 still not disabled.\n");
571
572          /*
573           * TODO: check for the following to verify DC9 state was indeed
574           * entered before programming to disable it:
575           * 1] Check relevant display engine registers to verify if mode
576           *  set disable sequence was followed.
577           * 2] Check if display uninitialize sequence is initialized.
578           */
579 }
580
581 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
582                                 u32 state)
583 {
584         int rewrites = 0;
585         int rereads = 0;
586         u32 v;
587
588         I915_WRITE(DC_STATE_EN, state);
589
590         /* It has been observed that disabling the dc6 state sometimes
591          * doesn't stick and dmc keeps returning old value. Make sure
592          * the write really sticks enough times and also force rewrite until
593          * we are confident that state is exactly what we want.
594          */
595         do  {
596                 v = I915_READ(DC_STATE_EN);
597
598                 if (v != state) {
599                         I915_WRITE(DC_STATE_EN, state);
600                         rewrites++;
601                         rereads = 0;
602                 } else if (rereads++ > 5) {
603                         break;
604                 }
605
606         } while (rewrites < 100);
607
608         if (v != state)
609                 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
610                           state, v);
611
612         /* Most of the times we need one retry, avoid spam */
613         if (rewrites > 1)
614                 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
615                               state, rewrites);
616 }
617
618 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
619 {
620         u32 mask;
621
622         mask = DC_STATE_EN_UPTO_DC5;
623         if (IS_GEN9_LP(dev_priv))
624                 mask |= DC_STATE_EN_DC9;
625         else
626                 mask |= DC_STATE_EN_UPTO_DC6;
627
628         return mask;
629 }
630
631 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
632 {
633         u32 val;
634
635         val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
636
637         DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
638                       dev_priv->csr.dc_state, val);
639         dev_priv->csr.dc_state = val;
640 }
641
642 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
643 {
644         uint32_t val;
645         uint32_t mask;
646
647         if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
648                 state &= dev_priv->csr.allowed_dc_mask;
649
650         val = I915_READ(DC_STATE_EN);
651         mask = gen9_dc_mask(dev_priv);
652         DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
653                       val & mask, state);
654
655         /* Check if DMC is ignoring our DC state requests */
656         if ((val & mask) != dev_priv->csr.dc_state)
657                 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
658                           dev_priv->csr.dc_state, val & mask);
659
660         val &= ~mask;
661         val |= state;
662
663         gen9_write_dc_state(dev_priv, val);
664
665         dev_priv->csr.dc_state = val & mask;
666 }
667
668 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
669 {
670         assert_can_enable_dc9(dev_priv);
671
672         DRM_DEBUG_KMS("Enabling DC9\n");
673
674         intel_power_sequencer_reset(dev_priv);
675         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
676 }
677
678 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
679 {
680         assert_can_disable_dc9(dev_priv);
681
682         DRM_DEBUG_KMS("Disabling DC9\n");
683
684         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
685
686         intel_pps_unlock_regs_wa(dev_priv);
687 }
688
689 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
690 {
691         WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
692                   "CSR program storage start is NULL\n");
693         WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
694         WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
695 }
696
697 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
698 {
699         bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
700                                         SKL_DISP_PW_2);
701
702         WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
703
704         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
705                   "DC5 already programmed to be enabled.\n");
706         assert_rpm_wakelock_held(dev_priv);
707
708         assert_csr_loaded(dev_priv);
709 }
710
711 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
712 {
713         assert_can_enable_dc5(dev_priv);
714
715         DRM_DEBUG_KMS("Enabling DC5\n");
716
717         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
718 }
719
720 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
721 {
722         WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
723                   "Backlight is not disabled.\n");
724         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
725                   "DC6 already programmed to be enabled.\n");
726
727         assert_csr_loaded(dev_priv);
728 }
729
730 void skl_enable_dc6(struct drm_i915_private *dev_priv)
731 {
732         assert_can_enable_dc6(dev_priv);
733
734         DRM_DEBUG_KMS("Enabling DC6\n");
735
736         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
737
738 }
739
740 void skl_disable_dc6(struct drm_i915_private *dev_priv)
741 {
742         DRM_DEBUG_KMS("Disabling DC6\n");
743
744         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
745 }
746
747 static void
748 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
749                                   struct i915_power_well *power_well)
750 {
751         enum skl_disp_power_wells power_well_id = power_well->id;
752         u32 val;
753         u32 mask;
754
755         mask = SKL_POWER_WELL_REQ(power_well_id);
756
757         val = I915_READ(HSW_PWR_WELL_KVMR);
758         if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
759                       power_well->name))
760                 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
761
762         val = I915_READ(HSW_PWR_WELL_BIOS);
763         val |= I915_READ(HSW_PWR_WELL_DEBUG);
764
765         if (!(val & mask))
766                 return;
767
768         /*
769          * DMC is known to force on the request bits for power well 1 on SKL
770          * and BXT and the misc IO power well on SKL but we don't expect any
771          * other request bits to be set, so WARN for those.
772          */
773         if (power_well_id == SKL_DISP_PW_1 ||
774             (IS_GEN9_BC(dev_priv) &&
775              power_well_id == SKL_DISP_PW_MISC_IO))
776                 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
777                                  "by DMC\n", power_well->name);
778         else
779                 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
780                           power_well->name);
781
782         I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
783         I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
784 }
785
786 static void skl_set_power_well(struct drm_i915_private *dev_priv,
787                                struct i915_power_well *power_well, bool enable)
788 {
789         uint32_t tmp, fuse_status;
790         uint32_t req_mask, state_mask;
791         bool is_enabled, enable_requested, check_fuse_status = false;
792
793         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
794         fuse_status = I915_READ(SKL_FUSE_STATUS);
795
796         switch (power_well->id) {
797         case SKL_DISP_PW_1:
798                 if (intel_wait_for_register(dev_priv,
799                                             SKL_FUSE_STATUS,
800                                             SKL_FUSE_PG0_DIST_STATUS,
801                                             SKL_FUSE_PG0_DIST_STATUS,
802                                             1)) {
803                         DRM_ERROR("PG0 not enabled\n");
804                         return;
805                 }
806                 break;
807         case SKL_DISP_PW_2:
808                 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
809                         DRM_ERROR("PG1 in disabled state\n");
810                         return;
811                 }
812                 break;
813         case SKL_DISP_PW_MISC_IO:
814         case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A, CNL_DISP_PW_DDI_A */
815         case SKL_DISP_PW_DDI_B:
816         case SKL_DISP_PW_DDI_C:
817         case SKL_DISP_PW_DDI_D:
818         case GLK_DISP_PW_AUX_A: /* CNL_DISP_PW_AUX_A */
819         case GLK_DISP_PW_AUX_B: /* CNL_DISP_PW_AUX_B */
820         case GLK_DISP_PW_AUX_C: /* CNL_DISP_PW_AUX_C */
821         case CNL_DISP_PW_AUX_D:
822                 break;
823         default:
824                 WARN(1, "Unknown power well %lu\n", power_well->id);
825                 return;
826         }
827
828         req_mask = SKL_POWER_WELL_REQ(power_well->id);
829         enable_requested = tmp & req_mask;
830         state_mask = SKL_POWER_WELL_STATE(power_well->id);
831         is_enabled = tmp & state_mask;
832
833         if (!enable && enable_requested)
834                 skl_power_well_pre_disable(dev_priv, power_well);
835
836         if (enable) {
837                 if (!enable_requested) {
838                         WARN((tmp & state_mask) &&
839                                 !I915_READ(HSW_PWR_WELL_BIOS),
840                                 "Invalid for power well status to be enabled, unless done by the BIOS, \
841                                 when request is to disable!\n");
842                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
843                 }
844
845                 if (!is_enabled) {
846                         DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
847                         check_fuse_status = true;
848                 }
849         } else {
850                 if (enable_requested) {
851                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
852                         POSTING_READ(HSW_PWR_WELL_DRIVER);
853                         DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
854                 }
855
856                 gen9_sanitize_power_well_requests(dev_priv, power_well);
857         }
858
859         if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
860                      1))
861                 DRM_ERROR("%s %s timeout\n",
862                           power_well->name, enable ? "enable" : "disable");
863
864         if (check_fuse_status) {
865                 if (power_well->id == SKL_DISP_PW_1) {
866                         if (intel_wait_for_register(dev_priv,
867                                                     SKL_FUSE_STATUS,
868                                                     SKL_FUSE_PG1_DIST_STATUS,
869                                                     SKL_FUSE_PG1_DIST_STATUS,
870                                                     1))
871                                 DRM_ERROR("PG1 distributing status timeout\n");
872                 } else if (power_well->id == SKL_DISP_PW_2) {
873                         if (intel_wait_for_register(dev_priv,
874                                                     SKL_FUSE_STATUS,
875                                                     SKL_FUSE_PG2_DIST_STATUS,
876                                                     SKL_FUSE_PG2_DIST_STATUS,
877                                                     1))
878                                 DRM_ERROR("PG2 distributing status timeout\n");
879                 }
880         }
881
882         if (enable && !is_enabled)
883                 skl_power_well_post_enable(dev_priv, power_well);
884 }
885
886 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
887                                    struct i915_power_well *power_well)
888 {
889         /* Take over the request bit if set by BIOS. */
890         if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) {
891                 if (!(I915_READ(HSW_PWR_WELL_DRIVER) &
892                       HSW_PWR_WELL_ENABLE_REQUEST))
893                         I915_WRITE(HSW_PWR_WELL_DRIVER,
894                                    HSW_PWR_WELL_ENABLE_REQUEST);
895                 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
896         }
897 }
898
899 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
900                                   struct i915_power_well *power_well)
901 {
902         hsw_set_power_well(dev_priv, power_well, true);
903 }
904
905 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
906                                    struct i915_power_well *power_well)
907 {
908         hsw_set_power_well(dev_priv, power_well, false);
909 }
910
911 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
912                                         struct i915_power_well *power_well)
913 {
914         uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
915                 SKL_POWER_WELL_STATE(power_well->id);
916
917         return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
918 }
919
920 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
921                                 struct i915_power_well *power_well)
922 {
923         uint32_t mask = SKL_POWER_WELL_REQ(power_well->id);
924         uint32_t bios_req = I915_READ(HSW_PWR_WELL_BIOS);
925
926         /* Take over the request bit if set by BIOS. */
927         if (bios_req & mask) {
928                 uint32_t drv_req = I915_READ(HSW_PWR_WELL_DRIVER);
929
930                 if (!(drv_req & mask))
931                         I915_WRITE(HSW_PWR_WELL_DRIVER, drv_req | mask);
932                 I915_WRITE(HSW_PWR_WELL_BIOS, bios_req & ~mask);
933         }
934 }
935
936 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
937                                 struct i915_power_well *power_well)
938 {
939         skl_set_power_well(dev_priv, power_well, true);
940 }
941
942 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
943                                 struct i915_power_well *power_well)
944 {
945         skl_set_power_well(dev_priv, power_well, false);
946 }
947
948 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
949                                            struct i915_power_well *power_well)
950 {
951         bxt_ddi_phy_init(dev_priv, power_well->data);
952 }
953
954 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
955                                             struct i915_power_well *power_well)
956 {
957         bxt_ddi_phy_uninit(dev_priv, power_well->data);
958 }
959
960 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
961                                             struct i915_power_well *power_well)
962 {
963         return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
964 }
965
966 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
967 {
968         struct i915_power_well *power_well;
969
970         power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
971         if (power_well->count > 0)
972                 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
973
974         power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
975         if (power_well->count > 0)
976                 bxt_ddi_phy_verify_state(dev_priv, power_well->data);
977
978         if (IS_GEMINILAKE(dev_priv)) {
979                 power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
980                 if (power_well->count > 0)
981                         bxt_ddi_phy_verify_state(dev_priv, power_well->data);
982         }
983 }
984
985 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
986                                            struct i915_power_well *power_well)
987 {
988         return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
989 }
990
991 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
992 {
993         u32 tmp = I915_READ(DBUF_CTL);
994
995         WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
996              (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
997              "Unexpected DBuf power power state (0x%08x)\n", tmp);
998 }
999
1000 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1001                                           struct i915_power_well *power_well)
1002 {
1003         struct intel_cdclk_state cdclk_state = {};
1004
1005         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1006
1007         dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1008         WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
1009
1010         gen9_assert_dbuf_enabled(dev_priv);
1011
1012         if (IS_GEN9_LP(dev_priv))
1013                 bxt_verify_ddi_phy_power_wells(dev_priv);
1014 }
1015
1016 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1017                                            struct i915_power_well *power_well)
1018 {
1019         if (!dev_priv->csr.dmc_payload)
1020                 return;
1021
1022         if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1023                 skl_enable_dc6(dev_priv);
1024         else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1025                 gen9_enable_dc5(dev_priv);
1026 }
1027
1028 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1029                                          struct i915_power_well *power_well)
1030 {
1031 }
1032
1033 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1034                                            struct i915_power_well *power_well)
1035 {
1036 }
1037
1038 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1039                                              struct i915_power_well *power_well)
1040 {
1041         return true;
1042 }
1043
1044 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1045                                struct i915_power_well *power_well, bool enable)
1046 {
1047         enum punit_power_well power_well_id = power_well->id;
1048         u32 mask;
1049         u32 state;
1050         u32 ctrl;
1051
1052         mask = PUNIT_PWRGT_MASK(power_well_id);
1053         state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
1054                          PUNIT_PWRGT_PWR_GATE(power_well_id);
1055
1056         mutex_lock(&dev_priv->rps.hw_lock);
1057
1058 #define COND \
1059         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1060
1061         if (COND)
1062                 goto out;
1063
1064         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1065         ctrl &= ~mask;
1066         ctrl |= state;
1067         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1068
1069         if (wait_for(COND, 100))
1070                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1071                           state,
1072                           vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1073
1074 #undef COND
1075
1076 out:
1077         mutex_unlock(&dev_priv->rps.hw_lock);
1078 }
1079
1080 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1081                                   struct i915_power_well *power_well)
1082 {
1083         vlv_set_power_well(dev_priv, power_well, true);
1084 }
1085
1086 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1087                                    struct i915_power_well *power_well)
1088 {
1089         vlv_set_power_well(dev_priv, power_well, false);
1090 }
1091
1092 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1093                                    struct i915_power_well *power_well)
1094 {
1095         int power_well_id = power_well->id;
1096         bool enabled = false;
1097         u32 mask;
1098         u32 state;
1099         u32 ctrl;
1100
1101         mask = PUNIT_PWRGT_MASK(power_well_id);
1102         ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
1103
1104         mutex_lock(&dev_priv->rps.hw_lock);
1105
1106         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1107         /*
1108          * We only ever set the power-on and power-gate states, anything
1109          * else is unexpected.
1110          */
1111         WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
1112                 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
1113         if (state == ctrl)
1114                 enabled = true;
1115
1116         /*
1117          * A transient state at this point would mean some unexpected party
1118          * is poking at the power controls too.
1119          */
1120         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1121         WARN_ON(ctrl != state);
1122
1123         mutex_unlock(&dev_priv->rps.hw_lock);
1124
1125         return enabled;
1126 }
1127
1128 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1129 {
1130         u32 val;
1131
1132         /*
1133          * On driver load, a pipe may be active and driving a DSI display.
1134          * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1135          * (and never recovering) in this case. intel_dsi_post_disable() will
1136          * clear it when we turn off the display.
1137          */
1138         val = I915_READ(DSPCLK_GATE_D);
1139         val &= DPOUNIT_CLOCK_GATE_DISABLE;
1140         val |= VRHUNIT_CLOCK_GATE_DISABLE;
1141         I915_WRITE(DSPCLK_GATE_D, val);
1142
1143         /*
1144          * Disable trickle feed and enable pnd deadline calculation
1145          */
1146         I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1147         I915_WRITE(CBR1_VLV, 0);
1148
1149         WARN_ON(dev_priv->rawclk_freq == 0);
1150
1151         I915_WRITE(RAWCLK_FREQ_VLV,
1152                    DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1153 }
1154
1155 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1156 {
1157         struct intel_encoder *encoder;
1158         enum pipe pipe;
1159
1160         /*
1161          * Enable the CRI clock source so we can get at the
1162          * display and the reference clock for VGA
1163          * hotplug / manual detection. Supposedly DSI also
1164          * needs the ref clock up and running.
1165          *
1166          * CHV DPLL B/C have some issues if VGA mode is enabled.
1167          */
1168         for_each_pipe(dev_priv, pipe) {
1169                 u32 val = I915_READ(DPLL(pipe));
1170
1171                 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1172                 if (pipe != PIPE_A)
1173                         val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1174
1175                 I915_WRITE(DPLL(pipe), val);
1176         }
1177
1178         vlv_init_display_clock_gating(dev_priv);
1179
1180         spin_lock_irq(&dev_priv->irq_lock);
1181         valleyview_enable_display_irqs(dev_priv);
1182         spin_unlock_irq(&dev_priv->irq_lock);
1183
1184         /*
1185          * During driver initialization/resume we can avoid restoring the
1186          * part of the HW/SW state that will be inited anyway explicitly.
1187          */
1188         if (dev_priv->power_domains.initializing)
1189                 return;
1190
1191         intel_hpd_init(dev_priv);
1192
1193         /* Re-enable the ADPA, if we have one */
1194         for_each_intel_encoder(&dev_priv->drm, encoder) {
1195                 if (encoder->type == INTEL_OUTPUT_ANALOG)
1196                         intel_crt_reset(&encoder->base);
1197         }
1198
1199         i915_redisable_vga_power_on(dev_priv);
1200
1201         intel_pps_unlock_regs_wa(dev_priv);
1202 }
1203
1204 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1205 {
1206         spin_lock_irq(&dev_priv->irq_lock);
1207         valleyview_disable_display_irqs(dev_priv);
1208         spin_unlock_irq(&dev_priv->irq_lock);
1209
1210         /* make sure we're done processing display irqs */
1211         synchronize_irq(dev_priv->drm.irq);
1212
1213         intel_power_sequencer_reset(dev_priv);
1214
1215         /* Prevent us from re-enabling polling on accident in late suspend */
1216         if (!dev_priv->drm.dev->power.is_suspended)
1217                 intel_hpd_poll_init(dev_priv);
1218 }
1219
1220 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1221                                           struct i915_power_well *power_well)
1222 {
1223         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1224
1225         vlv_set_power_well(dev_priv, power_well, true);
1226
1227         vlv_display_power_well_init(dev_priv);
1228 }
1229
1230 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1231                                            struct i915_power_well *power_well)
1232 {
1233         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1234
1235         vlv_display_power_well_deinit(dev_priv);
1236
1237         vlv_set_power_well(dev_priv, power_well, false);
1238 }
1239
1240 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1241                                            struct i915_power_well *power_well)
1242 {
1243         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1244
1245         /* since ref/cri clock was enabled */
1246         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1247
1248         vlv_set_power_well(dev_priv, power_well, true);
1249
1250         /*
1251          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1252          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
1253          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
1254          *   b. The other bits such as sfr settings / modesel may all
1255          *      be set to 0.
1256          *
1257          * This should only be done on init and resume from S3 with
1258          * both PLLs disabled, or we risk losing DPIO and PLL
1259          * synchronization.
1260          */
1261         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1262 }
1263
1264 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1265                                             struct i915_power_well *power_well)
1266 {
1267         enum pipe pipe;
1268
1269         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1270
1271         for_each_pipe(dev_priv, pipe)
1272                 assert_pll_disabled(dev_priv, pipe);
1273
1274         /* Assert common reset */
1275         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1276
1277         vlv_set_power_well(dev_priv, power_well, false);
1278 }
1279
1280 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1281
1282 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1283                                                  int power_well_id)
1284 {
1285         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1286         int i;
1287
1288         for (i = 0; i < power_domains->power_well_count; i++) {
1289                 struct i915_power_well *power_well;
1290
1291                 power_well = &power_domains->power_wells[i];
1292                 if (power_well->id == power_well_id)
1293                         return power_well;
1294         }
1295
1296         return NULL;
1297 }
1298
1299 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1300
1301 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1302 {
1303         struct i915_power_well *cmn_bc =
1304                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1305         struct i915_power_well *cmn_d =
1306                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1307         u32 phy_control = dev_priv->chv_phy_control;
1308         u32 phy_status = 0;
1309         u32 phy_status_mask = 0xffffffff;
1310
1311         /*
1312          * The BIOS can leave the PHY is some weird state
1313          * where it doesn't fully power down some parts.
1314          * Disable the asserts until the PHY has been fully
1315          * reset (ie. the power well has been disabled at
1316          * least once).
1317          */
1318         if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1319                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1320                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1321                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1322                                      PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1323                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1324                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1325
1326         if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1327                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1328                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1329                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1330
1331         if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1332                 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1333
1334                 /* this assumes override is only used to enable lanes */
1335                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1336                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1337
1338                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1339                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1340
1341                 /* CL1 is on whenever anything is on in either channel */
1342                 if (BITS_SET(phy_control,
1343                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1344                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1345                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1346
1347                 /*
1348                  * The DPLLB check accounts for the pipe B + port A usage
1349                  * with CL2 powered up but all the lanes in the second channel
1350                  * powered down.
1351                  */
1352                 if (BITS_SET(phy_control,
1353                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1354                     (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1355                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1356
1357                 if (BITS_SET(phy_control,
1358                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1359                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1360                 if (BITS_SET(phy_control,
1361                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1362                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1363
1364                 if (BITS_SET(phy_control,
1365                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1366                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1367                 if (BITS_SET(phy_control,
1368                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1369                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1370         }
1371
1372         if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1373                 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1374
1375                 /* this assumes override is only used to enable lanes */
1376                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1377                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1378
1379                 if (BITS_SET(phy_control,
1380                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1381                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1382
1383                 if (BITS_SET(phy_control,
1384                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1385                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1386                 if (BITS_SET(phy_control,
1387                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1388                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1389         }
1390
1391         phy_status &= phy_status_mask;
1392
1393         /*
1394          * The PHY may be busy with some initial calibration and whatnot,
1395          * so the power state can take a while to actually change.
1396          */
1397         if (intel_wait_for_register(dev_priv,
1398                                     DISPLAY_PHY_STATUS,
1399                                     phy_status_mask,
1400                                     phy_status,
1401                                     10))
1402                 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1403                           I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1404                            phy_status, dev_priv->chv_phy_control);
1405 }
1406
1407 #undef BITS_SET
1408
1409 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1410                                            struct i915_power_well *power_well)
1411 {
1412         enum dpio_phy phy;
1413         enum pipe pipe;
1414         uint32_t tmp;
1415
1416         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1417                      power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1418
1419         if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1420                 pipe = PIPE_A;
1421                 phy = DPIO_PHY0;
1422         } else {
1423                 pipe = PIPE_C;
1424                 phy = DPIO_PHY1;
1425         }
1426
1427         /* since ref/cri clock was enabled */
1428         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1429         vlv_set_power_well(dev_priv, power_well, true);
1430
1431         /* Poll for phypwrgood signal */
1432         if (intel_wait_for_register(dev_priv,
1433                                     DISPLAY_PHY_STATUS,
1434                                     PHY_POWERGOOD(phy),
1435                                     PHY_POWERGOOD(phy),
1436                                     1))
1437                 DRM_ERROR("Display PHY %d is not power up\n", phy);
1438
1439         mutex_lock(&dev_priv->sb_lock);
1440
1441         /* Enable dynamic power down */
1442         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1443         tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1444                 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1445         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1446
1447         if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1448                 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1449                 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1450                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1451         } else {
1452                 /*
1453                  * Force the non-existing CL2 off. BXT does this
1454                  * too, so maybe it saves some power even though
1455                  * CL2 doesn't exist?
1456                  */
1457                 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1458                 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1459                 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1460         }
1461
1462         mutex_unlock(&dev_priv->sb_lock);
1463
1464         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1465         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1466
1467         DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1468                       phy, dev_priv->chv_phy_control);
1469
1470         assert_chv_phy_status(dev_priv);
1471 }
1472
1473 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1474                                             struct i915_power_well *power_well)
1475 {
1476         enum dpio_phy phy;
1477
1478         WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1479                      power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1480
1481         if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1482                 phy = DPIO_PHY0;
1483                 assert_pll_disabled(dev_priv, PIPE_A);
1484                 assert_pll_disabled(dev_priv, PIPE_B);
1485         } else {
1486                 phy = DPIO_PHY1;
1487                 assert_pll_disabled(dev_priv, PIPE_C);
1488         }
1489
1490         dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1491         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1492
1493         vlv_set_power_well(dev_priv, power_well, false);
1494
1495         DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1496                       phy, dev_priv->chv_phy_control);
1497
1498         /* PHY is fully reset now, so we can enable the PHY state asserts */
1499         dev_priv->chv_phy_assert[phy] = true;
1500
1501         assert_chv_phy_status(dev_priv);
1502 }
1503
1504 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1505                                      enum dpio_channel ch, bool override, unsigned int mask)
1506 {
1507         enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1508         u32 reg, val, expected, actual;
1509
1510         /*
1511          * The BIOS can leave the PHY is some weird state
1512          * where it doesn't fully power down some parts.
1513          * Disable the asserts until the PHY has been fully
1514          * reset (ie. the power well has been disabled at
1515          * least once).
1516          */
1517         if (!dev_priv->chv_phy_assert[phy])
1518                 return;
1519
1520         if (ch == DPIO_CH0)
1521                 reg = _CHV_CMN_DW0_CH0;
1522         else
1523                 reg = _CHV_CMN_DW6_CH1;
1524
1525         mutex_lock(&dev_priv->sb_lock);
1526         val = vlv_dpio_read(dev_priv, pipe, reg);
1527         mutex_unlock(&dev_priv->sb_lock);
1528
1529         /*
1530          * This assumes !override is only used when the port is disabled.
1531          * All lanes should power down even without the override when
1532          * the port is disabled.
1533          */
1534         if (!override || mask == 0xf) {
1535                 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1536                 /*
1537                  * If CH1 common lane is not active anymore
1538                  * (eg. for pipe B DPLL) the entire channel will
1539                  * shut down, which causes the common lane registers
1540                  * to read as 0. That means we can't actually check
1541                  * the lane power down status bits, but as the entire
1542                  * register reads as 0 it's a good indication that the
1543                  * channel is indeed entirely powered down.
1544                  */
1545                 if (ch == DPIO_CH1 && val == 0)
1546                         expected = 0;
1547         } else if (mask != 0x0) {
1548                 expected = DPIO_ANYDL_POWERDOWN;
1549         } else {
1550                 expected = 0;
1551         }
1552
1553         if (ch == DPIO_CH0)
1554                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1555         else
1556                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1557         actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1558
1559         WARN(actual != expected,
1560              "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1561              !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1562              !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1563              reg, val);
1564 }
1565
1566 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1567                           enum dpio_channel ch, bool override)
1568 {
1569         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1570         bool was_override;
1571
1572         mutex_lock(&power_domains->lock);
1573
1574         was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1575
1576         if (override == was_override)
1577                 goto out;
1578
1579         if (override)
1580                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1581         else
1582                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1583
1584         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1585
1586         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1587                       phy, ch, dev_priv->chv_phy_control);
1588
1589         assert_chv_phy_status(dev_priv);
1590
1591 out:
1592         mutex_unlock(&power_domains->lock);
1593
1594         return was_override;
1595 }
1596
1597 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1598                              bool override, unsigned int mask)
1599 {
1600         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1601         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1602         enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1603         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1604
1605         mutex_lock(&power_domains->lock);
1606
1607         dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1608         dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1609
1610         if (override)
1611                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1612         else
1613                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1614
1615         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1616
1617         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1618                       phy, ch, mask, dev_priv->chv_phy_control);
1619
1620         assert_chv_phy_status(dev_priv);
1621
1622         assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1623
1624         mutex_unlock(&power_domains->lock);
1625 }
1626
1627 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1628                                         struct i915_power_well *power_well)
1629 {
1630         enum pipe pipe = power_well->id;
1631         bool enabled;
1632         u32 state, ctrl;
1633
1634         mutex_lock(&dev_priv->rps.hw_lock);
1635
1636         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1637         /*
1638          * We only ever set the power-on and power-gate states, anything
1639          * else is unexpected.
1640          */
1641         WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1642         enabled = state == DP_SSS_PWR_ON(pipe);
1643
1644         /*
1645          * A transient state at this point would mean some unexpected party
1646          * is poking at the power controls too.
1647          */
1648         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1649         WARN_ON(ctrl << 16 != state);
1650
1651         mutex_unlock(&dev_priv->rps.hw_lock);
1652
1653         return enabled;
1654 }
1655
1656 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1657                                     struct i915_power_well *power_well,
1658                                     bool enable)
1659 {
1660         enum pipe pipe = power_well->id;
1661         u32 state;
1662         u32 ctrl;
1663
1664         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1665
1666         mutex_lock(&dev_priv->rps.hw_lock);
1667
1668 #define COND \
1669         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1670
1671         if (COND)
1672                 goto out;
1673
1674         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1675         ctrl &= ~DP_SSC_MASK(pipe);
1676         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1677         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1678
1679         if (wait_for(COND, 100))
1680                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1681                           state,
1682                           vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1683
1684 #undef COND
1685
1686 out:
1687         mutex_unlock(&dev_priv->rps.hw_lock);
1688 }
1689
1690 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1691                                        struct i915_power_well *power_well)
1692 {
1693         WARN_ON_ONCE(power_well->id != PIPE_A);
1694
1695         chv_set_pipe_power_well(dev_priv, power_well, true);
1696
1697         vlv_display_power_well_init(dev_priv);
1698 }
1699
1700 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1701                                         struct i915_power_well *power_well)
1702 {
1703         WARN_ON_ONCE(power_well->id != PIPE_A);
1704
1705         vlv_display_power_well_deinit(dev_priv);
1706
1707         chv_set_pipe_power_well(dev_priv, power_well, false);
1708 }
1709
1710 static void
1711 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1712                                  enum intel_display_power_domain domain)
1713 {
1714         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1715         struct i915_power_well *power_well;
1716
1717         for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1718                 intel_power_well_get(dev_priv, power_well);
1719
1720         power_domains->domain_use_count[domain]++;
1721 }
1722
1723 /**
1724  * intel_display_power_get - grab a power domain reference
1725  * @dev_priv: i915 device instance
1726  * @domain: power domain to reference
1727  *
1728  * This function grabs a power domain reference for @domain and ensures that the
1729  * power domain and all its parents are powered up. Therefore users should only
1730  * grab a reference to the innermost power domain they need.
1731  *
1732  * Any power domain reference obtained by this function must have a symmetric
1733  * call to intel_display_power_put() to release the reference again.
1734  */
1735 void intel_display_power_get(struct drm_i915_private *dev_priv,
1736                              enum intel_display_power_domain domain)
1737 {
1738         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1739
1740         intel_runtime_pm_get(dev_priv);
1741
1742         mutex_lock(&power_domains->lock);
1743
1744         __intel_display_power_get_domain(dev_priv, domain);
1745
1746         mutex_unlock(&power_domains->lock);
1747 }
1748
1749 /**
1750  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1751  * @dev_priv: i915 device instance
1752  * @domain: power domain to reference
1753  *
1754  * This function grabs a power domain reference for @domain and ensures that the
1755  * power domain and all its parents are powered up. Therefore users should only
1756  * grab a reference to the innermost power domain they need.
1757  *
1758  * Any power domain reference obtained by this function must have a symmetric
1759  * call to intel_display_power_put() to release the reference again.
1760  */
1761 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1762                                         enum intel_display_power_domain domain)
1763 {
1764         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1765         bool is_enabled;
1766
1767         if (!intel_runtime_pm_get_if_in_use(dev_priv))
1768                 return false;
1769
1770         mutex_lock(&power_domains->lock);
1771
1772         if (__intel_display_power_is_enabled(dev_priv, domain)) {
1773                 __intel_display_power_get_domain(dev_priv, domain);
1774                 is_enabled = true;
1775         } else {
1776                 is_enabled = false;
1777         }
1778
1779         mutex_unlock(&power_domains->lock);
1780
1781         if (!is_enabled)
1782                 intel_runtime_pm_put(dev_priv);
1783
1784         return is_enabled;
1785 }
1786
1787 /**
1788  * intel_display_power_put - release a power domain reference
1789  * @dev_priv: i915 device instance
1790  * @domain: power domain to reference
1791  *
1792  * This function drops the power domain reference obtained by
1793  * intel_display_power_get() and might power down the corresponding hardware
1794  * block right away if this is the last reference.
1795  */
1796 void intel_display_power_put(struct drm_i915_private *dev_priv,
1797                              enum intel_display_power_domain domain)
1798 {
1799         struct i915_power_domains *power_domains;
1800         struct i915_power_well *power_well;
1801
1802         power_domains = &dev_priv->power_domains;
1803
1804         mutex_lock(&power_domains->lock);
1805
1806         WARN(!power_domains->domain_use_count[domain],
1807              "Use count on domain %s is already zero\n",
1808              intel_display_power_domain_str(domain));
1809         power_domains->domain_use_count[domain]--;
1810
1811         for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1812                 intel_power_well_put(dev_priv, power_well);
1813
1814         mutex_unlock(&power_domains->lock);
1815
1816         intel_runtime_pm_put(dev_priv);
1817 }
1818
1819 #define HSW_DISPLAY_POWER_DOMAINS (                     \
1820         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
1821         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
1822         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |             \
1823         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
1824         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
1825         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
1826         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
1827         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
1828         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
1829         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
1830         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
1831         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
1832         BIT_ULL(POWER_DOMAIN_VGA) |                             \
1833         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
1834         BIT_ULL(POWER_DOMAIN_INIT))
1835
1836 #define BDW_DISPLAY_POWER_DOMAINS (                     \
1837         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
1838         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
1839         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
1840         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
1841         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
1842         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
1843         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
1844         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
1845         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
1846         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
1847         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
1848         BIT_ULL(POWER_DOMAIN_VGA) |                             \
1849         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
1850         BIT_ULL(POWER_DOMAIN_INIT))
1851
1852 #define VLV_DISPLAY_POWER_DOMAINS (             \
1853         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
1854         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
1855         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
1856         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
1857         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
1858         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
1859         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1860         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1861         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
1862         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
1863         BIT_ULL(POWER_DOMAIN_VGA) |                     \
1864         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
1865         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1866         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1867         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
1868         BIT_ULL(POWER_DOMAIN_INIT))
1869
1870 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
1871         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1872         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1873         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
1874         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1875         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1876         BIT_ULL(POWER_DOMAIN_INIT))
1877
1878 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
1879         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1880         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1881         BIT_ULL(POWER_DOMAIN_INIT))
1882
1883 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
1884         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1885         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1886         BIT_ULL(POWER_DOMAIN_INIT))
1887
1888 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
1889         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1890         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1891         BIT_ULL(POWER_DOMAIN_INIT))
1892
1893 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
1894         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1895         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1896         BIT_ULL(POWER_DOMAIN_INIT))
1897
1898 #define CHV_DISPLAY_POWER_DOMAINS (             \
1899         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
1900         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
1901         BIT_ULL(POWER_DOMAIN_PIPE_C) |          \
1902         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
1903         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
1904         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
1905         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
1906         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
1907         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |    \
1908         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1909         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1910         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
1911         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
1912         BIT_ULL(POWER_DOMAIN_VGA) |                     \
1913         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
1914         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1915         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1916         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
1917         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
1918         BIT_ULL(POWER_DOMAIN_INIT))
1919
1920 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
1921         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
1922         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
1923         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
1924         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
1925         BIT_ULL(POWER_DOMAIN_INIT))
1926
1927 #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
1928         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
1929         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
1930         BIT_ULL(POWER_DOMAIN_INIT))
1931
1932 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1933         .sync_hw = i9xx_power_well_sync_hw_noop,
1934         .enable = i9xx_always_on_power_well_noop,
1935         .disable = i9xx_always_on_power_well_noop,
1936         .is_enabled = i9xx_always_on_power_well_enabled,
1937 };
1938
1939 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1940         .sync_hw = i9xx_power_well_sync_hw_noop,
1941         .enable = chv_pipe_power_well_enable,
1942         .disable = chv_pipe_power_well_disable,
1943         .is_enabled = chv_pipe_power_well_enabled,
1944 };
1945
1946 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1947         .sync_hw = i9xx_power_well_sync_hw_noop,
1948         .enable = chv_dpio_cmn_power_well_enable,
1949         .disable = chv_dpio_cmn_power_well_disable,
1950         .is_enabled = vlv_power_well_enabled,
1951 };
1952
1953 static struct i915_power_well i9xx_always_on_power_well[] = {
1954         {
1955                 .name = "always-on",
1956                 .always_on = 1,
1957                 .domains = POWER_DOMAIN_MASK,
1958                 .ops = &i9xx_always_on_power_well_ops,
1959         },
1960 };
1961
1962 static const struct i915_power_well_ops hsw_power_well_ops = {
1963         .sync_hw = hsw_power_well_sync_hw,
1964         .enable = hsw_power_well_enable,
1965         .disable = hsw_power_well_disable,
1966         .is_enabled = hsw_power_well_enabled,
1967 };
1968
1969 static const struct i915_power_well_ops skl_power_well_ops = {
1970         .sync_hw = skl_power_well_sync_hw,
1971         .enable = skl_power_well_enable,
1972         .disable = skl_power_well_disable,
1973         .is_enabled = skl_power_well_enabled,
1974 };
1975
1976 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1977         .sync_hw = i9xx_power_well_sync_hw_noop,
1978         .enable = gen9_dc_off_power_well_enable,
1979         .disable = gen9_dc_off_power_well_disable,
1980         .is_enabled = gen9_dc_off_power_well_enabled,
1981 };
1982
1983 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1984         .sync_hw = i9xx_power_well_sync_hw_noop,
1985         .enable = bxt_dpio_cmn_power_well_enable,
1986         .disable = bxt_dpio_cmn_power_well_disable,
1987         .is_enabled = bxt_dpio_cmn_power_well_enabled,
1988 };
1989
1990 static struct i915_power_well hsw_power_wells[] = {
1991         {
1992                 .name = "always-on",
1993                 .always_on = 1,
1994                 .domains = POWER_DOMAIN_MASK,
1995                 .ops = &i9xx_always_on_power_well_ops,
1996         },
1997         {
1998                 .name = "display",
1999                 .domains = HSW_DISPLAY_POWER_DOMAINS,
2000                 .ops = &hsw_power_well_ops,
2001         },
2002 };
2003
2004 static struct i915_power_well bdw_power_wells[] = {
2005         {
2006                 .name = "always-on",
2007                 .always_on = 1,
2008                 .domains = POWER_DOMAIN_MASK,
2009                 .ops = &i9xx_always_on_power_well_ops,
2010         },
2011         {
2012                 .name = "display",
2013                 .domains = BDW_DISPLAY_POWER_DOMAINS,
2014                 .ops = &hsw_power_well_ops,
2015         },
2016 };
2017
2018 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2019         .sync_hw = i9xx_power_well_sync_hw_noop,
2020         .enable = vlv_display_power_well_enable,
2021         .disable = vlv_display_power_well_disable,
2022         .is_enabled = vlv_power_well_enabled,
2023 };
2024
2025 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2026         .sync_hw = i9xx_power_well_sync_hw_noop,
2027         .enable = vlv_dpio_cmn_power_well_enable,
2028         .disable = vlv_dpio_cmn_power_well_disable,
2029         .is_enabled = vlv_power_well_enabled,
2030 };
2031
2032 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2033         .sync_hw = i9xx_power_well_sync_hw_noop,
2034         .enable = vlv_power_well_enable,
2035         .disable = vlv_power_well_disable,
2036         .is_enabled = vlv_power_well_enabled,
2037 };
2038
2039 static struct i915_power_well vlv_power_wells[] = {
2040         {
2041                 .name = "always-on",
2042                 .always_on = 1,
2043                 .domains = POWER_DOMAIN_MASK,
2044                 .ops = &i9xx_always_on_power_well_ops,
2045                 .id = PUNIT_POWER_WELL_ALWAYS_ON,
2046         },
2047         {
2048                 .name = "display",
2049                 .domains = VLV_DISPLAY_POWER_DOMAINS,
2050                 .id = PUNIT_POWER_WELL_DISP2D,
2051                 .ops = &vlv_display_power_well_ops,
2052         },
2053         {
2054                 .name = "dpio-tx-b-01",
2055                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2056                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2057                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2058                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2059                 .ops = &vlv_dpio_power_well_ops,
2060                 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2061         },
2062         {
2063                 .name = "dpio-tx-b-23",
2064                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2065                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2066                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2067                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2068                 .ops = &vlv_dpio_power_well_ops,
2069                 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2070         },
2071         {
2072                 .name = "dpio-tx-c-01",
2073                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2074                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2075                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2076                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2077                 .ops = &vlv_dpio_power_well_ops,
2078                 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2079         },
2080         {
2081                 .name = "dpio-tx-c-23",
2082                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2083                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2084                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2085                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2086                 .ops = &vlv_dpio_power_well_ops,
2087                 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2088         },
2089         {
2090                 .name = "dpio-common",
2091                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2092                 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2093                 .ops = &vlv_dpio_cmn_power_well_ops,
2094         },
2095 };
2096
2097 static struct i915_power_well chv_power_wells[] = {
2098         {
2099                 .name = "always-on",
2100                 .always_on = 1,
2101                 .domains = POWER_DOMAIN_MASK,
2102                 .ops = &i9xx_always_on_power_well_ops,
2103         },
2104         {
2105                 .name = "display",
2106                 /*
2107                  * Pipe A power well is the new disp2d well. Pipe B and C
2108                  * power wells don't actually exist. Pipe A power well is
2109                  * required for any pipe to work.
2110                  */
2111                 .domains = CHV_DISPLAY_POWER_DOMAINS,
2112                 .id = PIPE_A,
2113                 .ops = &chv_pipe_power_well_ops,
2114         },
2115         {
2116                 .name = "dpio-common-bc",
2117                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2118                 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2119                 .ops = &chv_dpio_cmn_power_well_ops,
2120         },
2121         {
2122                 .name = "dpio-common-d",
2123                 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2124                 .id = PUNIT_POWER_WELL_DPIO_CMN_D,
2125                 .ops = &chv_dpio_cmn_power_well_ops,
2126         },
2127 };
2128
2129 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2130                                     int power_well_id)
2131 {
2132         struct i915_power_well *power_well;
2133         bool ret;
2134
2135         power_well = lookup_power_well(dev_priv, power_well_id);
2136         ret = power_well->ops->is_enabled(dev_priv, power_well);
2137
2138         return ret;
2139 }
2140
2141 static struct i915_power_well skl_power_wells[] = {
2142         {
2143                 .name = "always-on",
2144                 .always_on = 1,
2145                 .domains = POWER_DOMAIN_MASK,
2146                 .ops = &i9xx_always_on_power_well_ops,
2147                 .id = SKL_DISP_PW_ALWAYS_ON,
2148         },
2149         {
2150                 .name = "power well 1",
2151                 /* Handled by the DMC firmware */
2152                 .domains = 0,
2153                 .ops = &skl_power_well_ops,
2154                 .id = SKL_DISP_PW_1,
2155         },
2156         {
2157                 .name = "MISC IO power well",
2158                 /* Handled by the DMC firmware */
2159                 .domains = 0,
2160                 .ops = &skl_power_well_ops,
2161                 .id = SKL_DISP_PW_MISC_IO,
2162         },
2163         {
2164                 .name = "DC off",
2165                 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2166                 .ops = &gen9_dc_off_power_well_ops,
2167                 .id = SKL_DISP_PW_DC_OFF,
2168         },
2169         {
2170                 .name = "power well 2",
2171                 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2172                 .ops = &skl_power_well_ops,
2173                 .id = SKL_DISP_PW_2,
2174         },
2175         {
2176                 .name = "DDI A/E IO power well",
2177                 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2178                 .ops = &skl_power_well_ops,
2179                 .id = SKL_DISP_PW_DDI_A_E,
2180         },
2181         {
2182                 .name = "DDI B IO power well",
2183                 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2184                 .ops = &skl_power_well_ops,
2185                 .id = SKL_DISP_PW_DDI_B,
2186         },
2187         {
2188                 .name = "DDI C IO power well",
2189                 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2190                 .ops = &skl_power_well_ops,
2191                 .id = SKL_DISP_PW_DDI_C,
2192         },
2193         {
2194                 .name = "DDI D IO power well",
2195                 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2196                 .ops = &skl_power_well_ops,
2197                 .id = SKL_DISP_PW_DDI_D,
2198         },
2199 };
2200
2201 static struct i915_power_well bxt_power_wells[] = {
2202         {
2203                 .name = "always-on",
2204                 .always_on = 1,
2205                 .domains = POWER_DOMAIN_MASK,
2206                 .ops = &i9xx_always_on_power_well_ops,
2207         },
2208         {
2209                 .name = "power well 1",
2210                 .domains = 0,
2211                 .ops = &skl_power_well_ops,
2212                 .id = SKL_DISP_PW_1,
2213         },
2214         {
2215                 .name = "DC off",
2216                 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2217                 .ops = &gen9_dc_off_power_well_ops,
2218                 .id = SKL_DISP_PW_DC_OFF,
2219         },
2220         {
2221                 .name = "power well 2",
2222                 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2223                 .ops = &skl_power_well_ops,
2224                 .id = SKL_DISP_PW_2,
2225         },
2226         {
2227                 .name = "dpio-common-a",
2228                 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2229                 .ops = &bxt_dpio_cmn_power_well_ops,
2230                 .id = BXT_DPIO_CMN_A,
2231                 .data = DPIO_PHY1,
2232         },
2233         {
2234                 .name = "dpio-common-bc",
2235                 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2236                 .ops = &bxt_dpio_cmn_power_well_ops,
2237                 .id = BXT_DPIO_CMN_BC,
2238                 .data = DPIO_PHY0,
2239         },
2240 };
2241
2242 static struct i915_power_well glk_power_wells[] = {
2243         {
2244                 .name = "always-on",
2245                 .always_on = 1,
2246                 .domains = POWER_DOMAIN_MASK,
2247                 .ops = &i9xx_always_on_power_well_ops,
2248         },
2249         {
2250                 .name = "power well 1",
2251                 /* Handled by the DMC firmware */
2252                 .domains = 0,
2253                 .ops = &skl_power_well_ops,
2254                 .id = SKL_DISP_PW_1,
2255         },
2256         {
2257                 .name = "DC off",
2258                 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2259                 .ops = &gen9_dc_off_power_well_ops,
2260                 .id = SKL_DISP_PW_DC_OFF,
2261         },
2262         {
2263                 .name = "power well 2",
2264                 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2265                 .ops = &skl_power_well_ops,
2266                 .id = SKL_DISP_PW_2,
2267         },
2268         {
2269                 .name = "dpio-common-a",
2270                 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2271                 .ops = &bxt_dpio_cmn_power_well_ops,
2272                 .id = BXT_DPIO_CMN_A,
2273                 .data = DPIO_PHY1,
2274         },
2275         {
2276                 .name = "dpio-common-b",
2277                 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2278                 .ops = &bxt_dpio_cmn_power_well_ops,
2279                 .id = BXT_DPIO_CMN_BC,
2280                 .data = DPIO_PHY0,
2281         },
2282         {
2283                 .name = "dpio-common-c",
2284                 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2285                 .ops = &bxt_dpio_cmn_power_well_ops,
2286                 .id = GLK_DPIO_CMN_C,
2287                 .data = DPIO_PHY2,
2288         },
2289         {
2290                 .name = "AUX A",
2291                 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2292                 .ops = &skl_power_well_ops,
2293                 .id = GLK_DISP_PW_AUX_A,
2294         },
2295         {
2296                 .name = "AUX B",
2297                 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2298                 .ops = &skl_power_well_ops,
2299                 .id = GLK_DISP_PW_AUX_B,
2300         },
2301         {
2302                 .name = "AUX C",
2303                 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2304                 .ops = &skl_power_well_ops,
2305                 .id = GLK_DISP_PW_AUX_C,
2306         },
2307         {
2308                 .name = "DDI A IO power well",
2309                 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2310                 .ops = &skl_power_well_ops,
2311                 .id = GLK_DISP_PW_DDI_A,
2312         },
2313         {
2314                 .name = "DDI B IO power well",
2315                 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2316                 .ops = &skl_power_well_ops,
2317                 .id = SKL_DISP_PW_DDI_B,
2318         },
2319         {
2320                 .name = "DDI C IO power well",
2321                 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2322                 .ops = &skl_power_well_ops,
2323                 .id = SKL_DISP_PW_DDI_C,
2324         },
2325 };
2326
2327 static struct i915_power_well cnl_power_wells[] = {
2328         {
2329                 .name = "always-on",
2330                 .always_on = 1,
2331                 .domains = POWER_DOMAIN_MASK,
2332                 .ops = &i9xx_always_on_power_well_ops,
2333         },
2334         {
2335                 .name = "power well 1",
2336                 /* Handled by the DMC firmware */
2337                 .domains = 0,
2338                 .ops = &skl_power_well_ops,
2339                 .id = SKL_DISP_PW_1,
2340         },
2341         {
2342                 .name = "AUX A",
2343                 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2344                 .ops = &skl_power_well_ops,
2345                 .id = CNL_DISP_PW_AUX_A,
2346         },
2347         {
2348                 .name = "AUX B",
2349                 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2350                 .ops = &skl_power_well_ops,
2351                 .id = CNL_DISP_PW_AUX_B,
2352         },
2353         {
2354                 .name = "AUX C",
2355                 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2356                 .ops = &skl_power_well_ops,
2357                 .id = CNL_DISP_PW_AUX_C,
2358         },
2359         {
2360                 .name = "AUX D",
2361                 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2362                 .ops = &skl_power_well_ops,
2363                 .id = CNL_DISP_PW_AUX_D,
2364         },
2365         {
2366                 .name = "DC off",
2367                 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2368                 .ops = &gen9_dc_off_power_well_ops,
2369                 .id = SKL_DISP_PW_DC_OFF,
2370         },
2371         {
2372                 .name = "power well 2",
2373                 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2374                 .ops = &skl_power_well_ops,
2375                 .id = SKL_DISP_PW_2,
2376         },
2377         {
2378                 .name = "DDI A IO power well",
2379                 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2380                 .ops = &skl_power_well_ops,
2381                 .id = CNL_DISP_PW_DDI_A,
2382         },
2383         {
2384                 .name = "DDI B IO power well",
2385                 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2386                 .ops = &skl_power_well_ops,
2387                 .id = SKL_DISP_PW_DDI_B,
2388         },
2389         {
2390                 .name = "DDI C IO power well",
2391                 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2392                 .ops = &skl_power_well_ops,
2393                 .id = SKL_DISP_PW_DDI_C,
2394         },
2395         {
2396                 .name = "DDI D IO power well",
2397                 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2398                 .ops = &skl_power_well_ops,
2399                 .id = SKL_DISP_PW_DDI_D,
2400         },
2401 };
2402
2403 static int
2404 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2405                                    int disable_power_well)
2406 {
2407         if (disable_power_well >= 0)
2408                 return !!disable_power_well;
2409
2410         return 1;
2411 }
2412
2413 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2414                                     int enable_dc)
2415 {
2416         uint32_t mask;
2417         int requested_dc;
2418         int max_dc;
2419
2420         if (IS_GEN9_BC(dev_priv)) {
2421                 max_dc = 2;
2422                 mask = 0;
2423         } else if (IS_GEN9_LP(dev_priv)) {
2424                 max_dc = 1;
2425                 /*
2426                  * DC9 has a separate HW flow from the rest of the DC states,
2427                  * not depending on the DMC firmware. It's needed by system
2428                  * suspend/resume, so allow it unconditionally.
2429                  */
2430                 mask = DC_STATE_EN_DC9;
2431         } else {
2432                 max_dc = 0;
2433                 mask = 0;
2434         }
2435
2436         if (!i915.disable_power_well)
2437                 max_dc = 0;
2438
2439         if (enable_dc >= 0 && enable_dc <= max_dc) {
2440                 requested_dc = enable_dc;
2441         } else if (enable_dc == -1) {
2442                 requested_dc = max_dc;
2443         } else if (enable_dc > max_dc && enable_dc <= 2) {
2444                 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2445                               enable_dc, max_dc);
2446                 requested_dc = max_dc;
2447         } else {
2448                 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2449                 requested_dc = max_dc;
2450         }
2451
2452         if (requested_dc > 1)
2453                 mask |= DC_STATE_EN_UPTO_DC6;
2454         if (requested_dc > 0)
2455                 mask |= DC_STATE_EN_UPTO_DC5;
2456
2457         DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2458
2459         return mask;
2460 }
2461
2462 #define set_power_wells(power_domains, __power_wells) ({                \
2463         (power_domains)->power_wells = (__power_wells);                 \
2464         (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
2465 })
2466
2467 /**
2468  * intel_power_domains_init - initializes the power domain structures
2469  * @dev_priv: i915 device instance
2470  *
2471  * Initializes the power domain structures for @dev_priv depending upon the
2472  * supported platform.
2473  */
2474 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2475 {
2476         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2477
2478         i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2479                                                      i915.disable_power_well);
2480         dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
2481                                                             i915.enable_dc);
2482
2483         BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2484
2485         mutex_init(&power_domains->lock);
2486
2487         /*
2488          * The enabling order will be from lower to higher indexed wells,
2489          * the disabling order is reversed.
2490          */
2491         if (IS_HASWELL(dev_priv)) {
2492                 set_power_wells(power_domains, hsw_power_wells);
2493         } else if (IS_BROADWELL(dev_priv)) {
2494                 set_power_wells(power_domains, bdw_power_wells);
2495         } else if (IS_GEN9_BC(dev_priv)) {
2496                 set_power_wells(power_domains, skl_power_wells);
2497         } else if (IS_CANNONLAKE(dev_priv)) {
2498                 set_power_wells(power_domains, cnl_power_wells);
2499         } else if (IS_BROXTON(dev_priv)) {
2500                 set_power_wells(power_domains, bxt_power_wells);
2501         } else if (IS_GEMINILAKE(dev_priv)) {
2502                 set_power_wells(power_domains, glk_power_wells);
2503         } else if (IS_CHERRYVIEW(dev_priv)) {
2504                 set_power_wells(power_domains, chv_power_wells);
2505         } else if (IS_VALLEYVIEW(dev_priv)) {
2506                 set_power_wells(power_domains, vlv_power_wells);
2507         } else {
2508                 set_power_wells(power_domains, i9xx_always_on_power_well);
2509         }
2510
2511         return 0;
2512 }
2513
2514 /**
2515  * intel_power_domains_fini - finalizes the power domain structures
2516  * @dev_priv: i915 device instance
2517  *
2518  * Finalizes the power domain structures for @dev_priv depending upon the
2519  * supported platform. This function also disables runtime pm and ensures that
2520  * the device stays powered up so that the driver can be reloaded.
2521  */
2522 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2523 {
2524         struct device *kdev = &dev_priv->drm.pdev->dev;
2525
2526         /*
2527          * The i915.ko module is still not prepared to be loaded when
2528          * the power well is not enabled, so just enable it in case
2529          * we're going to unload/reload.
2530          * The following also reacquires the RPM reference the core passed
2531          * to the driver during loading, which is dropped in
2532          * intel_runtime_pm_enable(). We have to hand back the control of the
2533          * device to the core with this reference held.
2534          */
2535         intel_display_set_init_power(dev_priv, true);
2536
2537         /* Remove the refcount we took to keep power well support disabled. */
2538         if (!i915.disable_power_well)
2539                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2540
2541         /*
2542          * Remove the refcount we took in intel_runtime_pm_enable() in case
2543          * the platform doesn't support runtime PM.
2544          */
2545         if (!HAS_RUNTIME_PM(dev_priv))
2546                 pm_runtime_put(kdev);
2547 }
2548
2549 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2550 {
2551         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2552         struct i915_power_well *power_well;
2553
2554         mutex_lock(&power_domains->lock);
2555         for_each_power_well(dev_priv, power_well) {
2556                 power_well->ops->sync_hw(dev_priv, power_well);
2557                 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2558                                                                      power_well);
2559         }
2560         mutex_unlock(&power_domains->lock);
2561 }
2562
2563 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2564 {
2565         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2566         POSTING_READ(DBUF_CTL);
2567
2568         udelay(10);
2569
2570         if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2571                 DRM_ERROR("DBuf power enable timeout\n");
2572 }
2573
2574 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2575 {
2576         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2577         POSTING_READ(DBUF_CTL);
2578
2579         udelay(10);
2580
2581         if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2582                 DRM_ERROR("DBuf power disable timeout!\n");
2583 }
2584
2585 static void skl_display_core_init(struct drm_i915_private *dev_priv,
2586                                    bool resume)
2587 {
2588         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2589         struct i915_power_well *well;
2590         uint32_t val;
2591
2592         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2593
2594         /* enable PCH reset handshake */
2595         val = I915_READ(HSW_NDE_RSTWRN_OPT);
2596         I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2597
2598         /* enable PG1 and Misc I/O */
2599         mutex_lock(&power_domains->lock);
2600
2601         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2602         intel_power_well_enable(dev_priv, well);
2603
2604         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2605         intel_power_well_enable(dev_priv, well);
2606
2607         mutex_unlock(&power_domains->lock);
2608
2609         skl_init_cdclk(dev_priv);
2610
2611         gen9_dbuf_enable(dev_priv);
2612
2613         if (resume && dev_priv->csr.dmc_payload)
2614                 intel_csr_load_program(dev_priv);
2615 }
2616
2617 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2618 {
2619         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2620         struct i915_power_well *well;
2621
2622         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2623
2624         gen9_dbuf_disable(dev_priv);
2625
2626         skl_uninit_cdclk(dev_priv);
2627
2628         /* The spec doesn't call for removing the reset handshake flag */
2629         /* disable PG1 and Misc I/O */
2630
2631         mutex_lock(&power_domains->lock);
2632
2633         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2634         intel_power_well_disable(dev_priv, well);
2635
2636         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2637         intel_power_well_disable(dev_priv, well);
2638
2639         mutex_unlock(&power_domains->lock);
2640 }
2641
2642 void bxt_display_core_init(struct drm_i915_private *dev_priv,
2643                            bool resume)
2644 {
2645         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2646         struct i915_power_well *well;
2647         uint32_t val;
2648
2649         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2650
2651         /*
2652          * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2653          * or else the reset will hang because there is no PCH to respond.
2654          * Move the handshake programming to initialization sequence.
2655          * Previously was left up to BIOS.
2656          */
2657         val = I915_READ(HSW_NDE_RSTWRN_OPT);
2658         val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2659         I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2660
2661         /* Enable PG1 */
2662         mutex_lock(&power_domains->lock);
2663
2664         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2665         intel_power_well_enable(dev_priv, well);
2666
2667         mutex_unlock(&power_domains->lock);
2668
2669         bxt_init_cdclk(dev_priv);
2670
2671         gen9_dbuf_enable(dev_priv);
2672
2673         if (resume && dev_priv->csr.dmc_payload)
2674                 intel_csr_load_program(dev_priv);
2675 }
2676
2677 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2678 {
2679         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2680         struct i915_power_well *well;
2681
2682         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2683
2684         gen9_dbuf_disable(dev_priv);
2685
2686         bxt_uninit_cdclk(dev_priv);
2687
2688         /* The spec doesn't call for removing the reset handshake flag */
2689
2690         /* Disable PG1 */
2691         mutex_lock(&power_domains->lock);
2692
2693         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2694         intel_power_well_disable(dev_priv, well);
2695
2696         mutex_unlock(&power_domains->lock);
2697 }
2698
2699 #define CNL_PROCMON_IDX(val) \
2700         (((val) & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) >> VOLTAGE_INFO_SHIFT)
2701 #define NUM_CNL_PROCMON \
2702         (CNL_PROCMON_IDX(VOLTAGE_INFO_MASK | PROCESS_INFO_MASK) + 1)
2703
2704 static const struct cnl_procmon {
2705         u32 dw1, dw9, dw10;
2706 } cnl_procmon_values[NUM_CNL_PROCMON] = {
2707         [CNL_PROCMON_IDX(VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0)] =
2708                 { .dw1 = 0x00 << 16, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
2709         [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0)] =
2710                 { .dw1 = 0x00 << 16, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
2711         [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1)] =
2712                 { .dw1 = 0x00 << 16, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
2713         [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0)] =
2714                 { .dw1 = 0x00 << 16, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
2715         [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1)] =
2716                 { .dw1 = 0x44 << 16, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
2717 };
2718
2719 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
2720 {
2721         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2722         const struct cnl_procmon *procmon;
2723         struct i915_power_well *well;
2724         u32 val;
2725
2726         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2727
2728         /* 1. Enable PCH Reset Handshake */
2729         val = I915_READ(HSW_NDE_RSTWRN_OPT);
2730         val |= RESET_PCH_HANDSHAKE_ENABLE;
2731         I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2732
2733         /* 2. Enable Comp */
2734         val = I915_READ(CHICKEN_MISC_2);
2735         val &= ~COMP_PWR_DOWN;
2736         I915_WRITE(CHICKEN_MISC_2, val);
2737
2738         val = I915_READ(CNL_PORT_COMP_DW3);
2739         procmon = &cnl_procmon_values[CNL_PROCMON_IDX(val)];
2740
2741         WARN_ON(procmon->dw10 == 0);
2742
2743         val = I915_READ(CNL_PORT_COMP_DW1);
2744         val &= ~((0xff << 16) | 0xff);
2745         val |= procmon->dw1;
2746         I915_WRITE(CNL_PORT_COMP_DW1, val);
2747
2748         I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
2749         I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
2750
2751         val = I915_READ(CNL_PORT_COMP_DW0);
2752         val |= COMP_INIT;
2753         I915_WRITE(CNL_PORT_COMP_DW0, val);
2754
2755         /* 3. */
2756         val = I915_READ(CNL_PORT_CL1CM_DW5);
2757         val |= CL_POWER_DOWN_ENABLE;
2758         I915_WRITE(CNL_PORT_CL1CM_DW5, val);
2759
2760         /* 4. Enable Power Well 1 (PG1) and Aux IO Power */
2761         mutex_lock(&power_domains->lock);
2762         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2763         intel_power_well_enable(dev_priv, well);
2764         mutex_unlock(&power_domains->lock);
2765
2766         /* 5. Enable CD clock */
2767         cnl_init_cdclk(dev_priv);
2768
2769         /* 6. Enable DBUF */
2770         gen9_dbuf_enable(dev_priv);
2771 }
2772
2773 #undef CNL_PROCMON_IDX
2774 #undef NUM_CNL_PROCMON
2775
2776 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
2777 {
2778         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2779         struct i915_power_well *well;
2780         u32 val;
2781
2782         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2783
2784         /* 1. Disable all display engine functions -> aready done */
2785
2786         /* 2. Disable DBUF */
2787         gen9_dbuf_disable(dev_priv);
2788
2789         /* 3. Disable CD clock */
2790         cnl_uninit_cdclk(dev_priv);
2791
2792         /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
2793         mutex_lock(&power_domains->lock);
2794         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2795         intel_power_well_disable(dev_priv, well);
2796         mutex_unlock(&power_domains->lock);
2797
2798         /* 5. Disable Comp */
2799         val = I915_READ(CHICKEN_MISC_2);
2800         val |= COMP_PWR_DOWN;
2801         I915_WRITE(CHICKEN_MISC_2, val);
2802 }
2803
2804 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2805 {
2806         struct i915_power_well *cmn_bc =
2807                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2808         struct i915_power_well *cmn_d =
2809                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2810
2811         /*
2812          * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2813          * workaround never ever read DISPLAY_PHY_CONTROL, and
2814          * instead maintain a shadow copy ourselves. Use the actual
2815          * power well state and lane status to reconstruct the
2816          * expected initial value.
2817          */
2818         dev_priv->chv_phy_control =
2819                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2820                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2821                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2822                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2823                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2824
2825         /*
2826          * If all lanes are disabled we leave the override disabled
2827          * with all power down bits cleared to match the state we
2828          * would use after disabling the port. Otherwise enable the
2829          * override and set the lane powerdown bits accding to the
2830          * current lane status.
2831          */
2832         if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2833                 uint32_t status = I915_READ(DPLL(PIPE_A));
2834                 unsigned int mask;
2835
2836                 mask = status & DPLL_PORTB_READY_MASK;
2837                 if (mask == 0xf)
2838                         mask = 0x0;
2839                 else
2840                         dev_priv->chv_phy_control |=
2841                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2842
2843                 dev_priv->chv_phy_control |=
2844                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2845
2846                 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2847                 if (mask == 0xf)
2848                         mask = 0x0;
2849                 else
2850                         dev_priv->chv_phy_control |=
2851                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2852
2853                 dev_priv->chv_phy_control |=
2854                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2855
2856                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2857
2858                 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2859         } else {
2860                 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2861         }
2862
2863         if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2864                 uint32_t status = I915_READ(DPIO_PHY_STATUS);
2865                 unsigned int mask;
2866
2867                 mask = status & DPLL_PORTD_READY_MASK;
2868
2869                 if (mask == 0xf)
2870                         mask = 0x0;
2871                 else
2872                         dev_priv->chv_phy_control |=
2873                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2874
2875                 dev_priv->chv_phy_control |=
2876                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2877
2878                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2879
2880                 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2881         } else {
2882                 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2883         }
2884
2885         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2886
2887         DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2888                       dev_priv->chv_phy_control);
2889 }
2890
2891 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2892 {
2893         struct i915_power_well *cmn =
2894                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2895         struct i915_power_well *disp2d =
2896                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2897
2898         /* If the display might be already active skip this */
2899         if (cmn->ops->is_enabled(dev_priv, cmn) &&
2900             disp2d->ops->is_enabled(dev_priv, disp2d) &&
2901             I915_READ(DPIO_CTL) & DPIO_CMNRST)
2902                 return;
2903
2904         DRM_DEBUG_KMS("toggling display PHY side reset\n");
2905
2906         /* cmnlane needs DPLL registers */
2907         disp2d->ops->enable(dev_priv, disp2d);
2908
2909         /*
2910          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2911          * Need to assert and de-assert PHY SB reset by gating the
2912          * common lane power, then un-gating it.
2913          * Simply ungating isn't enough to reset the PHY enough to get
2914          * ports and lanes running.
2915          */
2916         cmn->ops->disable(dev_priv, cmn);
2917 }
2918
2919 /**
2920  * intel_power_domains_init_hw - initialize hardware power domain state
2921  * @dev_priv: i915 device instance
2922  * @resume: Called from resume code paths or not
2923  *
2924  * This function initializes the hardware power domain state and enables all
2925  * power wells belonging to the INIT power domain. Power wells in other
2926  * domains (and not in the INIT domain) are referenced or disabled during the
2927  * modeset state HW readout. After that the reference count of each power well
2928  * must match its HW enabled state, see intel_power_domains_verify_state().
2929  */
2930 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2931 {
2932         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2933
2934         power_domains->initializing = true;
2935
2936         if (IS_CANNONLAKE(dev_priv)) {
2937                 cnl_display_core_init(dev_priv, resume);
2938         } else if (IS_GEN9_BC(dev_priv)) {
2939                 skl_display_core_init(dev_priv, resume);
2940         } else if (IS_GEN9_LP(dev_priv)) {
2941                 bxt_display_core_init(dev_priv, resume);
2942         } else if (IS_CHERRYVIEW(dev_priv)) {
2943                 mutex_lock(&power_domains->lock);
2944                 chv_phy_control_init(dev_priv);
2945                 mutex_unlock(&power_domains->lock);
2946         } else if (IS_VALLEYVIEW(dev_priv)) {
2947                 mutex_lock(&power_domains->lock);
2948                 vlv_cmnlane_wa(dev_priv);
2949                 mutex_unlock(&power_domains->lock);
2950         }
2951
2952         /* For now, we need the power well to be always enabled. */
2953         intel_display_set_init_power(dev_priv, true);
2954         /* Disable power support if the user asked so. */
2955         if (!i915.disable_power_well)
2956                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2957         intel_power_domains_sync_hw(dev_priv);
2958         power_domains->initializing = false;
2959 }
2960
2961 /**
2962  * intel_power_domains_suspend - suspend power domain state
2963  * @dev_priv: i915 device instance
2964  *
2965  * This function prepares the hardware power domain state before entering
2966  * system suspend. It must be paired with intel_power_domains_init_hw().
2967  */
2968 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2969 {
2970         /*
2971          * Even if power well support was disabled we still want to disable
2972          * power wells while we are system suspended.
2973          */
2974         if (!i915.disable_power_well)
2975                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2976
2977         if (IS_CANNONLAKE(dev_priv))
2978                 cnl_display_core_uninit(dev_priv);
2979         else if (IS_GEN9_BC(dev_priv))
2980                 skl_display_core_uninit(dev_priv);
2981         else if (IS_GEN9_LP(dev_priv))
2982                 bxt_display_core_uninit(dev_priv);
2983 }
2984
2985 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
2986 {
2987         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2988         struct i915_power_well *power_well;
2989
2990         for_each_power_well(dev_priv, power_well) {
2991                 enum intel_display_power_domain domain;
2992
2993                 DRM_DEBUG_DRIVER("%-25s %d\n",
2994                                  power_well->name, power_well->count);
2995
2996                 for_each_power_domain(domain, power_well->domains)
2997                         DRM_DEBUG_DRIVER("  %-23s %d\n",
2998                                          intel_display_power_domain_str(domain),
2999                                          power_domains->domain_use_count[domain]);
3000         }
3001 }
3002
3003 /**
3004  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3005  * @dev_priv: i915 device instance
3006  *
3007  * Verify if the reference count of each power well matches its HW enabled
3008  * state and the total refcount of the domains it belongs to. This must be
3009  * called after modeset HW state sanitization, which is responsible for
3010  * acquiring reference counts for any power wells in use and disabling the
3011  * ones left on by BIOS but not required by any active output.
3012  */
3013 void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3014 {
3015         struct i915_power_domains *power_domains = &dev_priv->power_domains;
3016         struct i915_power_well *power_well;
3017         bool dump_domain_info;
3018
3019         mutex_lock(&power_domains->lock);
3020
3021         dump_domain_info = false;
3022         for_each_power_well(dev_priv, power_well) {
3023                 enum intel_display_power_domain domain;
3024                 int domains_count;
3025                 bool enabled;
3026
3027                 /*
3028                  * Power wells not belonging to any domain (like the MISC_IO
3029                  * and PW1 power wells) are under FW control, so ignore them,
3030                  * since their state can change asynchronously.
3031                  */
3032                 if (!power_well->domains)
3033                         continue;
3034
3035                 enabled = power_well->ops->is_enabled(dev_priv, power_well);
3036                 if ((power_well->count || power_well->always_on) != enabled)
3037                         DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3038                                   power_well->name, power_well->count, enabled);
3039
3040                 domains_count = 0;
3041                 for_each_power_domain(domain, power_well->domains)
3042                         domains_count += power_domains->domain_use_count[domain];
3043
3044                 if (power_well->count != domains_count) {
3045                         DRM_ERROR("power well %s refcount/domain refcount mismatch "
3046                                   "(refcount %d/domains refcount %d)\n",
3047                                   power_well->name, power_well->count,
3048                                   domains_count);
3049                         dump_domain_info = true;
3050                 }
3051         }
3052
3053         if (dump_domain_info) {
3054                 static bool dumped;
3055
3056                 if (!dumped) {
3057                         intel_power_domains_dump_info(dev_priv);
3058                         dumped = true;
3059                 }
3060         }
3061
3062         mutex_unlock(&power_domains->lock);
3063 }
3064
3065 /**
3066  * intel_runtime_pm_get - grab a runtime pm reference
3067  * @dev_priv: i915 device instance
3068  *
3069  * This function grabs a device-level runtime pm reference (mostly used for GEM
3070  * code to ensure the GTT or GT is on) and ensures that it is powered up.
3071  *
3072  * Any runtime pm reference obtained by this function must have a symmetric
3073  * call to intel_runtime_pm_put() to release the reference again.
3074  */
3075 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3076 {
3077         struct pci_dev *pdev = dev_priv->drm.pdev;
3078         struct device *kdev = &pdev->dev;
3079         int ret;
3080
3081         ret = pm_runtime_get_sync(kdev);
3082         WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3083
3084         atomic_inc(&dev_priv->pm.wakeref_count);
3085         assert_rpm_wakelock_held(dev_priv);
3086 }
3087
3088 /**
3089  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
3090  * @dev_priv: i915 device instance
3091  *
3092  * This function grabs a device-level runtime pm reference if the device is
3093  * already in use and ensures that it is powered up.
3094  *
3095  * Any runtime pm reference obtained by this function must have a symmetric
3096  * call to intel_runtime_pm_put() to release the reference again.
3097  */
3098 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3099 {
3100         struct pci_dev *pdev = dev_priv->drm.pdev;
3101         struct device *kdev = &pdev->dev;
3102
3103         if (IS_ENABLED(CONFIG_PM)) {
3104                 int ret = pm_runtime_get_if_in_use(kdev);
3105
3106                 /*
3107                  * In cases runtime PM is disabled by the RPM core and we get
3108                  * an -EINVAL return value we are not supposed to call this
3109                  * function, since the power state is undefined. This applies
3110                  * atm to the late/early system suspend/resume handlers.
3111                  */
3112                 WARN_ONCE(ret < 0,
3113                           "pm_runtime_get_if_in_use() failed: %d\n", ret);
3114                 if (ret <= 0)
3115                         return false;
3116         }
3117
3118         atomic_inc(&dev_priv->pm.wakeref_count);
3119         assert_rpm_wakelock_held(dev_priv);
3120
3121         return true;
3122 }
3123
3124 /**
3125  * intel_runtime_pm_get_noresume - grab a runtime pm reference
3126  * @dev_priv: i915 device instance
3127  *
3128  * This function grabs a device-level runtime pm reference (mostly used for GEM
3129  * code to ensure the GTT or GT is on).
3130  *
3131  * It will _not_ power up the device but instead only check that it's powered
3132  * on.  Therefore it is only valid to call this functions from contexts where
3133  * the device is known to be powered up and where trying to power it up would
3134  * result in hilarity and deadlocks. That pretty much means only the system
3135  * suspend/resume code where this is used to grab runtime pm references for
3136  * delayed setup down in work items.
3137  *
3138  * Any runtime pm reference obtained by this function must have a symmetric
3139  * call to intel_runtime_pm_put() to release the reference again.
3140  */
3141 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3142 {
3143         struct pci_dev *pdev = dev_priv->drm.pdev;
3144         struct device *kdev = &pdev->dev;
3145
3146         assert_rpm_wakelock_held(dev_priv);
3147         pm_runtime_get_noresume(kdev);
3148
3149         atomic_inc(&dev_priv->pm.wakeref_count);
3150 }
3151
3152 /**
3153  * intel_runtime_pm_put - release a runtime pm reference
3154  * @dev_priv: i915 device instance
3155  *
3156  * This function drops the device-level runtime pm reference obtained by
3157  * intel_runtime_pm_get() and might power down the corresponding
3158  * hardware block right away if this is the last reference.
3159  */
3160 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3161 {
3162         struct pci_dev *pdev = dev_priv->drm.pdev;
3163         struct device *kdev = &pdev->dev;
3164
3165         assert_rpm_wakelock_held(dev_priv);
3166         atomic_dec(&dev_priv->pm.wakeref_count);
3167
3168         pm_runtime_mark_last_busy(kdev);
3169         pm_runtime_put_autosuspend(kdev);
3170 }
3171
3172 /**
3173  * intel_runtime_pm_enable - enable runtime pm
3174  * @dev_priv: i915 device instance
3175  *
3176  * This function enables runtime pm at the end of the driver load sequence.
3177  *
3178  * Note that this function does currently not enable runtime pm for the
3179  * subordinate display power domains. That is only done on the first modeset
3180  * using intel_display_set_init_power().
3181  */
3182 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
3183 {
3184         struct pci_dev *pdev = dev_priv->drm.pdev;
3185         struct device *kdev = &pdev->dev;
3186
3187         pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3188         pm_runtime_mark_last_busy(kdev);
3189
3190         /*
3191          * Take a permanent reference to disable the RPM functionality and drop
3192          * it only when unloading the driver. Use the low level get/put helpers,
3193          * so the driver's own RPM reference tracking asserts also work on
3194          * platforms without RPM support.
3195          */
3196         if (!HAS_RUNTIME_PM(dev_priv)) {
3197                 int ret;
3198
3199                 pm_runtime_dont_use_autosuspend(kdev);
3200                 ret = pm_runtime_get_sync(kdev);
3201                 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3202         } else {
3203                 pm_runtime_use_autosuspend(kdev);
3204         }
3205
3206         /*
3207          * The core calls the driver load handler with an RPM reference held.
3208          * We drop that here and will reacquire it during unloading in
3209          * intel_power_domains_fini().
3210          */
3211         pm_runtime_put_autosuspend(kdev);
3212 }