2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Panel Self Refresh (PSR/SRD)
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
34 * Panel Self Refresh must be supported by both Hardware (source) and
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
54 #include <drm/drm_atomic_helper.h>
56 #include "intel_drv.h"
59 static bool psr_global_enabled(u32 debug)
61 switch (debug & I915_PSR_DEBUG_MODE_MASK) {
62 case I915_PSR_DEBUG_DEFAULT:
63 return i915_modparams.enable_psr;
64 case I915_PSR_DEBUG_DISABLE:
71 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
72 const struct intel_crtc_state *crtc_state)
74 /* Cannot enable DSC and PSR2 simultaneously */
75 WARN_ON(crtc_state->dsc_params.compression_enable &&
76 crtc_state->has_psr2);
78 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
79 case I915_PSR_DEBUG_DISABLE:
80 case I915_PSR_DEBUG_FORCE_PSR1:
83 return crtc_state->has_psr2;
87 static int edp_psr_shift(enum transcoder cpu_transcoder)
89 switch (cpu_transcoder) {
91 return EDP_PSR_TRANSCODER_A_SHIFT;
93 return EDP_PSR_TRANSCODER_B_SHIFT;
95 return EDP_PSR_TRANSCODER_C_SHIFT;
97 MISSING_CASE(cpu_transcoder);
100 return EDP_PSR_TRANSCODER_EDP_SHIFT;
104 void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
106 u32 debug_mask, mask;
107 enum transcoder cpu_transcoder;
108 u32 transcoders = BIT(TRANSCODER_EDP);
110 if (INTEL_GEN(dev_priv) >= 8)
111 transcoders |= BIT(TRANSCODER_A) |
117 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
118 int shift = edp_psr_shift(cpu_transcoder);
120 mask |= EDP_PSR_ERROR(shift);
121 debug_mask |= EDP_PSR_POST_EXIT(shift) |
122 EDP_PSR_PRE_ENTRY(shift);
125 if (debug & I915_PSR_DEBUG_IRQ)
128 I915_WRITE(EDP_PSR_IMR, ~mask);
131 static void psr_event_print(u32 val, bool psr2_enabled)
133 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
134 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
135 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
136 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
137 DRM_DEBUG_KMS("\tPSR2 disabled\n");
138 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
139 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
140 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
141 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
142 if (val & PSR_EVENT_GRAPHICS_RESET)
143 DRM_DEBUG_KMS("\tGraphics reset\n");
144 if (val & PSR_EVENT_PCH_INTERRUPT)
145 DRM_DEBUG_KMS("\tPCH interrupt\n");
146 if (val & PSR_EVENT_MEMORY_UP)
147 DRM_DEBUG_KMS("\tMemory up\n");
148 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
149 DRM_DEBUG_KMS("\tFront buffer modification\n");
150 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
151 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
152 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
153 DRM_DEBUG_KMS("\tPIPE registers updated\n");
154 if (val & PSR_EVENT_REGISTER_UPDATE)
155 DRM_DEBUG_KMS("\tRegister updated\n");
156 if (val & PSR_EVENT_HDCP_ENABLE)
157 DRM_DEBUG_KMS("\tHDCP enabled\n");
158 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
159 DRM_DEBUG_KMS("\tKVMR session enabled\n");
160 if (val & PSR_EVENT_VBI_ENABLE)
161 DRM_DEBUG_KMS("\tVBI enabled\n");
162 if (val & PSR_EVENT_LPSP_MODE_EXIT)
163 DRM_DEBUG_KMS("\tLPSP mode exited\n");
164 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
165 DRM_DEBUG_KMS("\tPSR disabled\n");
168 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
170 u32 transcoders = BIT(TRANSCODER_EDP);
171 enum transcoder cpu_transcoder;
172 ktime_t time_ns = ktime_get();
175 if (INTEL_GEN(dev_priv) >= 8)
176 transcoders |= BIT(TRANSCODER_A) |
180 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
181 int shift = edp_psr_shift(cpu_transcoder);
183 if (psr_iir & EDP_PSR_ERROR(shift)) {
184 DRM_WARN("[transcoder %s] PSR aux error\n",
185 transcoder_name(cpu_transcoder));
187 dev_priv->psr.irq_aux_error = true;
190 * If this interruption is not masked it will keep
191 * interrupting so fast that it prevents the scheduled
193 * Also after a PSR error, we don't want to arm PSR
194 * again so we don't care about unmask the interruption
195 * or unset irq_aux_error.
197 mask |= EDP_PSR_ERROR(shift);
200 if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
201 dev_priv->psr.last_entry_attempt = time_ns;
202 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
203 transcoder_name(cpu_transcoder));
206 if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
207 dev_priv->psr.last_exit = time_ns;
208 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
209 transcoder_name(cpu_transcoder));
211 if (INTEL_GEN(dev_priv) >= 9) {
212 u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
213 bool psr2_enabled = dev_priv->psr.psr2_enabled;
215 I915_WRITE(PSR_EVENT(cpu_transcoder), val);
216 psr_event_print(val, psr2_enabled);
222 mask |= I915_READ(EDP_PSR_IMR);
223 I915_WRITE(EDP_PSR_IMR, mask);
225 schedule_work(&dev_priv->psr.work);
229 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
233 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
236 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
239 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
243 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
246 return alpm_caps & DP_ALPM_CAP;
249 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
251 u8 val = 8; /* assume the worst if we can't read the value */
253 if (drm_dp_dpcd_readb(&intel_dp->aux,
254 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
255 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
257 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
261 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
267 * Returning the default X granularity if granularity not required or
270 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
273 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
275 DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
278 * Spec says that if the value read is 0 the default granularity should
281 if (r != 2 || val == 0)
287 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
289 struct drm_i915_private *dev_priv =
290 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
292 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
293 sizeof(intel_dp->psr_dpcd));
295 if (!intel_dp->psr_dpcd[0])
297 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
298 intel_dp->psr_dpcd[0]);
300 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
301 DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
305 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
306 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
310 dev_priv->psr.sink_support = true;
311 dev_priv->psr.sink_sync_latency =
312 intel_dp_get_sink_sync_latency(intel_dp);
314 WARN_ON(dev_priv->psr.dp);
315 dev_priv->psr.dp = intel_dp;
317 if (INTEL_GEN(dev_priv) >= 9 &&
318 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
319 bool y_req = intel_dp->psr_dpcd[1] &
320 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
321 bool alpm = intel_dp_get_alpm_status(intel_dp);
324 * All panels that supports PSR version 03h (PSR2 +
325 * Y-coordinate) can handle Y-coordinates in VSC but we are
326 * only sure that it is going to be used when required by the
327 * panel. This way panel is capable to do selective update
328 * without a aux frame sync.
330 * To support PSR version 02h and PSR version 03h without
331 * Y-coordinate requirement panels we would need to enable
334 dev_priv->psr.sink_psr2_support = y_req && alpm;
335 DRM_DEBUG_KMS("PSR2 %ssupported\n",
336 dev_priv->psr.sink_psr2_support ? "" : "not ");
338 if (dev_priv->psr.sink_psr2_support) {
339 dev_priv->psr.colorimetry_support =
340 intel_dp_get_colorimetry_status(intel_dp);
341 dev_priv->psr.su_x_granularity =
342 intel_dp_get_su_x_granulartiy(intel_dp);
347 static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
348 const struct intel_crtc_state *crtc_state)
350 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
351 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
352 struct edp_vsc_psr psr_vsc;
354 if (dev_priv->psr.psr2_enabled) {
355 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
356 memset(&psr_vsc, 0, sizeof(psr_vsc));
357 psr_vsc.sdp_header.HB0 = 0;
358 psr_vsc.sdp_header.HB1 = 0x7;
359 if (dev_priv->psr.colorimetry_support) {
360 psr_vsc.sdp_header.HB2 = 0x5;
361 psr_vsc.sdp_header.HB3 = 0x13;
363 psr_vsc.sdp_header.HB2 = 0x4;
364 psr_vsc.sdp_header.HB3 = 0xe;
367 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
368 memset(&psr_vsc, 0, sizeof(psr_vsc));
369 psr_vsc.sdp_header.HB0 = 0;
370 psr_vsc.sdp_header.HB1 = 0x7;
371 psr_vsc.sdp_header.HB2 = 0x2;
372 psr_vsc.sdp_header.HB3 = 0x8;
375 intel_dig_port->write_infoframe(&intel_dig_port->base,
377 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
380 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
382 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
383 u32 aux_clock_divider, aux_ctl;
385 static const u8 aux_msg[] = {
386 [0] = DP_AUX_NATIVE_WRITE << 4,
387 [1] = DP_SET_POWER >> 8,
388 [2] = DP_SET_POWER & 0xff,
390 [4] = DP_SET_POWER_D0,
392 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
393 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
394 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
395 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
397 BUILD_BUG_ON(sizeof(aux_msg) > 20);
398 for (i = 0; i < sizeof(aux_msg); i += 4)
399 I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
400 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
402 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
404 /* Start with bits set for DDI_AUX_CTL register */
405 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
408 /* Select only valid bits for SRD_AUX_CTL */
409 aux_ctl &= psr_aux_mask;
410 I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
413 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
415 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
416 u8 dpcd_val = DP_PSR_ENABLE;
418 /* Enable ALPM at sink for psr2 */
419 if (dev_priv->psr.psr2_enabled) {
420 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
422 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
424 if (dev_priv->psr.link_standby)
425 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
427 if (INTEL_GEN(dev_priv) >= 8)
428 dpcd_val |= DP_PSR_CRC_VERIFICATION;
431 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
433 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
436 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
438 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
441 if (INTEL_GEN(dev_priv) >= 11)
442 val |= EDP_PSR_TP4_TIME_0US;
444 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
445 val |= EDP_PSR_TP1_TIME_0us;
446 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
447 val |= EDP_PSR_TP1_TIME_100us;
448 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
449 val |= EDP_PSR_TP1_TIME_500us;
451 val |= EDP_PSR_TP1_TIME_2500us;
453 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
454 val |= EDP_PSR_TP2_TP3_TIME_0us;
455 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
456 val |= EDP_PSR_TP2_TP3_TIME_100us;
457 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
458 val |= EDP_PSR_TP2_TP3_TIME_500us;
460 val |= EDP_PSR_TP2_TP3_TIME_2500us;
462 if (intel_dp_source_supports_hbr2(intel_dp) &&
463 drm_dp_tps3_supported(intel_dp->dpcd))
464 val |= EDP_PSR_TP1_TP3_SEL;
466 val |= EDP_PSR_TP1_TP2_SEL;
471 static void hsw_activate_psr1(struct intel_dp *intel_dp)
473 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
474 u32 max_sleep_time = 0x1f;
475 u32 val = EDP_PSR_ENABLE;
477 /* Let's use 6 as the minimum to cover all known cases including the
478 * off-by-one issue that HW has in some cases.
480 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
482 /* sink_sync_latency of 8 means source has to wait for more than 8
483 * frames, we'll go with 9 frames for now
485 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
486 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
488 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
489 if (IS_HASWELL(dev_priv))
490 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
492 if (dev_priv->psr.link_standby)
493 val |= EDP_PSR_LINK_STANDBY;
495 val |= intel_psr1_get_tp_time(intel_dp);
497 if (INTEL_GEN(dev_priv) >= 8)
498 val |= EDP_PSR_CRC_ENABLE;
500 val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
501 I915_WRITE(EDP_PSR_CTL, val);
504 static void hsw_activate_psr2(struct intel_dp *intel_dp)
506 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
509 /* Let's use 6 as the minimum to cover all known cases including the
510 * off-by-one issue that HW has in some cases.
512 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
514 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
515 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
517 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
518 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
519 val |= EDP_Y_COORDINATE_ENABLE;
521 val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
523 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
524 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
525 val |= EDP_PSR2_TP2_TIME_50us;
526 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
527 val |= EDP_PSR2_TP2_TIME_100us;
528 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
529 val |= EDP_PSR2_TP2_TIME_500us;
531 val |= EDP_PSR2_TP2_TIME_2500us;
534 * FIXME: There is probably a issue in DMC firmwares(icl_dmc_ver1_07.bin
535 * and kbl_dmc_ver1_04.bin at least) that causes PSR2 SU to fail after
536 * exiting DC6 if EDP_PSR_TP1_TP3_SEL is kept in PSR_CTL, so for now
537 * lets workaround the issue by cleaning PSR_CTL before enable PSR2.
539 I915_WRITE(EDP_PSR_CTL, 0);
541 I915_WRITE(EDP_PSR2_CTL, val);
544 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
545 struct intel_crtc_state *crtc_state)
547 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
548 int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
549 int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
550 int psr_max_h = 0, psr_max_v = 0;
552 if (!dev_priv->psr.sink_psr2_support)
556 * DSC and PSR2 cannot be enabled simultaneously. If a requested
557 * resolution requires DSC to be enabled, priority is given to DSC
560 if (crtc_state->dsc_params.compression_enable) {
561 DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
565 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
568 } else if (IS_GEN(dev_priv, 9)) {
573 if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
574 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
575 crtc_hdisplay, crtc_vdisplay,
576 psr_max_h, psr_max_v);
581 * HW sends SU blocks of size four scan lines, which means the starting
582 * X coordinate and Y granularity requirements will always be met. We
583 * only need to validate the SU block width is a multiple of
586 if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
587 DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
588 crtc_hdisplay, dev_priv->psr.su_x_granularity);
592 if (crtc_state->crc_enabled) {
593 DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
600 void intel_psr_compute_config(struct intel_dp *intel_dp,
601 struct intel_crtc_state *crtc_state)
603 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
604 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
605 const struct drm_display_mode *adjusted_mode =
606 &crtc_state->base.adjusted_mode;
609 if (!CAN_PSR(dev_priv))
612 if (intel_dp != dev_priv->psr.dp)
616 * HSW spec explicitly says PSR is tied to port A.
617 * BDW+ platforms with DDI implementation of PSR have different
618 * PSR registers per transcoder and we only implement transcoder EDP
619 * ones. Since by Display design transcoder EDP is tied to port A
620 * we can safely escape based on the port A.
622 if (dig_port->base.port != PORT_A) {
623 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
627 if (dev_priv->psr.sink_not_reliable) {
628 DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
632 if (IS_HASWELL(dev_priv) &&
633 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
634 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
638 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
639 if (psr_setup_time < 0) {
640 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
641 intel_dp->psr_dpcd[1]);
645 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
646 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
647 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
652 crtc_state->has_psr = true;
653 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
656 static void intel_psr_activate(struct intel_dp *intel_dp)
658 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
660 if (INTEL_GEN(dev_priv) >= 9)
661 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
662 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
663 WARN_ON(dev_priv->psr.active);
664 lockdep_assert_held(&dev_priv->psr.lock);
666 /* psr1 and psr2 are mutually exclusive.*/
667 if (dev_priv->psr.psr2_enabled)
668 hsw_activate_psr2(intel_dp);
670 hsw_activate_psr1(intel_dp);
672 dev_priv->psr.active = true;
675 static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
676 enum transcoder cpu_transcoder)
678 static const i915_reg_t regs[] = {
679 [TRANSCODER_A] = CHICKEN_TRANS_A,
680 [TRANSCODER_B] = CHICKEN_TRANS_B,
681 [TRANSCODER_C] = CHICKEN_TRANS_C,
682 [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
685 WARN_ON(INTEL_GEN(dev_priv) < 9);
687 if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
688 !regs[cpu_transcoder].reg))
689 cpu_transcoder = TRANSCODER_A;
691 return regs[cpu_transcoder];
694 static void intel_psr_enable_source(struct intel_dp *intel_dp,
695 const struct intel_crtc_state *crtc_state)
697 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
698 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
701 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
702 * use hardcoded values PSR AUX transactions
704 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
705 hsw_psr_setup_aux(intel_dp);
707 if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
708 !IS_GEMINILAKE(dev_priv))) {
709 i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
711 u32 chicken = I915_READ(reg);
713 chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
714 PSR2_ADD_VERTICAL_LINE_COUNT;
715 I915_WRITE(reg, chicken);
719 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
720 * mask LPSP to avoid dependency on other drivers that might block
721 * runtime_pm besides preventing other hw tracking issues now we
722 * can rely on frontbuffer tracking.
724 mask = EDP_PSR_DEBUG_MASK_MEMUP |
725 EDP_PSR_DEBUG_MASK_HPD |
726 EDP_PSR_DEBUG_MASK_LPSP |
727 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
729 if (INTEL_GEN(dev_priv) < 11)
730 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
732 I915_WRITE(EDP_PSR_DEBUG, mask);
735 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
736 const struct intel_crtc_state *crtc_state)
738 struct intel_dp *intel_dp = dev_priv->psr.dp;
740 WARN_ON(dev_priv->psr.enabled);
742 dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
743 dev_priv->psr.busy_frontbuffer_bits = 0;
744 dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
746 DRM_DEBUG_KMS("Enabling PSR%s\n",
747 dev_priv->psr.psr2_enabled ? "2" : "1");
748 intel_psr_setup_vsc(intel_dp, crtc_state);
749 intel_psr_enable_sink(intel_dp);
750 intel_psr_enable_source(intel_dp, crtc_state);
751 dev_priv->psr.enabled = true;
753 intel_psr_activate(intel_dp);
757 * intel_psr_enable - Enable PSR
758 * @intel_dp: Intel DP
759 * @crtc_state: new CRTC state
761 * This function can only be called after the pipe is fully trained and enabled.
763 void intel_psr_enable(struct intel_dp *intel_dp,
764 const struct intel_crtc_state *crtc_state)
766 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
768 if (!crtc_state->has_psr)
771 if (WARN_ON(!CAN_PSR(dev_priv)))
774 WARN_ON(dev_priv->drrs.dp);
776 mutex_lock(&dev_priv->psr.lock);
778 if (!psr_global_enabled(dev_priv->psr.debug)) {
779 DRM_DEBUG_KMS("PSR disabled by flag\n");
783 intel_psr_enable_locked(dev_priv, crtc_state);
786 mutex_unlock(&dev_priv->psr.lock);
789 static void intel_psr_exit(struct drm_i915_private *dev_priv)
793 if (!dev_priv->psr.active) {
794 if (INTEL_GEN(dev_priv) >= 9)
795 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
796 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
800 if (dev_priv->psr.psr2_enabled) {
801 val = I915_READ(EDP_PSR2_CTL);
802 WARN_ON(!(val & EDP_PSR2_ENABLE));
803 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
805 val = I915_READ(EDP_PSR_CTL);
806 WARN_ON(!(val & EDP_PSR_ENABLE));
807 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
809 dev_priv->psr.active = false;
812 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
814 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
815 i915_reg_t psr_status;
818 lockdep_assert_held(&dev_priv->psr.lock);
820 if (!dev_priv->psr.enabled)
823 DRM_DEBUG_KMS("Disabling PSR%s\n",
824 dev_priv->psr.psr2_enabled ? "2" : "1");
826 intel_psr_exit(dev_priv);
828 if (dev_priv->psr.psr2_enabled) {
829 psr_status = EDP_PSR2_STATUS;
830 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
832 psr_status = EDP_PSR_STATUS;
833 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
836 /* Wait till PSR is idle */
837 if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
839 DRM_ERROR("Timed out waiting PSR idle state\n");
841 /* Disable PSR on Sink */
842 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
844 dev_priv->psr.enabled = false;
848 * intel_psr_disable - Disable PSR
849 * @intel_dp: Intel DP
850 * @old_crtc_state: old CRTC state
852 * This function needs to be called before disabling pipe.
854 void intel_psr_disable(struct intel_dp *intel_dp,
855 const struct intel_crtc_state *old_crtc_state)
857 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
859 if (!old_crtc_state->has_psr)
862 if (WARN_ON(!CAN_PSR(dev_priv)))
865 mutex_lock(&dev_priv->psr.lock);
867 intel_psr_disable_locked(intel_dp);
869 mutex_unlock(&dev_priv->psr.lock);
870 cancel_work_sync(&dev_priv->psr.work);
873 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
876 * Display WA #0884: all
877 * This documented WA for bxt can be safely applied
878 * broadly so we can force HW tracking to exit PSR
879 * instead of disabling and re-enabling.
880 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
881 * but it makes more sense write to the current active
884 I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
888 * intel_psr_update - Update PSR state
889 * @intel_dp: Intel DP
890 * @crtc_state: new CRTC state
892 * This functions will update PSR states, disabling, enabling or switching PSR
893 * version when executing fastsets. For full modeset, intel_psr_disable() and
894 * intel_psr_enable() should be called instead.
896 void intel_psr_update(struct intel_dp *intel_dp,
897 const struct intel_crtc_state *crtc_state)
899 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
900 struct i915_psr *psr = &dev_priv->psr;
901 bool enable, psr2_enable;
903 if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
906 mutex_lock(&dev_priv->psr.lock);
908 enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
909 psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
911 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
912 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
913 if (crtc_state->crc_enabled && psr->enabled)
914 psr_force_hw_tracking_exit(dev_priv);
920 intel_psr_disable_locked(intel_dp);
923 intel_psr_enable_locked(dev_priv, crtc_state);
926 mutex_unlock(&dev_priv->psr.lock);
930 * intel_psr_wait_for_idle - wait for PSR1 to idle
931 * @new_crtc_state: new CRTC state
932 * @out_value: PSR status in case of failure
934 * This function is expected to be called from pipe_update_start() where it is
935 * not expected to race with PSR enable or disable.
937 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
939 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
942 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
943 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
945 if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
948 /* FIXME: Update this for PSR2 if we need to wait for idle */
949 if (READ_ONCE(dev_priv->psr.psr2_enabled))
953 * From bspec: Panel Self Refresh (BDW+)
954 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
955 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
956 * defensive enough to cover everything.
959 return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
960 EDP_PSR_STATUS_STATE_MASK,
961 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
965 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
971 if (!dev_priv->psr.enabled)
974 if (dev_priv->psr.psr2_enabled) {
975 reg = EDP_PSR2_STATUS;
976 mask = EDP_PSR2_STATUS_STATE_MASK;
978 reg = EDP_PSR_STATUS;
979 mask = EDP_PSR_STATUS_STATE_MASK;
982 mutex_unlock(&dev_priv->psr.lock);
984 err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
986 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
988 /* After the unlocked wait, verify that PSR is still wanted! */
989 mutex_lock(&dev_priv->psr.lock);
990 return err == 0 && dev_priv->psr.enabled;
993 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
995 struct drm_device *dev = &dev_priv->drm;
996 struct drm_modeset_acquire_ctx ctx;
997 struct drm_atomic_state *state;
998 struct drm_crtc *crtc;
1001 state = drm_atomic_state_alloc(dev);
1005 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1006 state->acquire_ctx = &ctx;
1009 drm_for_each_crtc(crtc, dev) {
1010 struct drm_crtc_state *crtc_state;
1011 struct intel_crtc_state *intel_crtc_state;
1013 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1014 if (IS_ERR(crtc_state)) {
1015 err = PTR_ERR(crtc_state);
1019 intel_crtc_state = to_intel_crtc_state(crtc_state);
1021 if (crtc_state->active && intel_crtc_state->has_psr) {
1022 /* Mark mode as changed to trigger a pipe->update() */
1023 crtc_state->mode_changed = true;
1028 err = drm_atomic_commit(state);
1031 if (err == -EDEADLK) {
1032 drm_atomic_state_clear(state);
1033 err = drm_modeset_backoff(&ctx);
1038 drm_modeset_drop_locks(&ctx);
1039 drm_modeset_acquire_fini(&ctx);
1040 drm_atomic_state_put(state);
1045 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
1047 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
1051 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1052 mode > I915_PSR_DEBUG_FORCE_PSR1) {
1053 DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
1057 ret = mutex_lock_interruptible(&dev_priv->psr.lock);
1061 old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
1062 dev_priv->psr.debug = val;
1063 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
1065 mutex_unlock(&dev_priv->psr.lock);
1067 if (old_mode != mode)
1068 ret = intel_psr_fastset_force(dev_priv);
1073 static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
1075 struct i915_psr *psr = &dev_priv->psr;
1077 intel_psr_disable_locked(psr->dp);
1078 psr->sink_not_reliable = true;
1079 /* let's make sure that sink is awaken */
1080 drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1083 static void intel_psr_work(struct work_struct *work)
1085 struct drm_i915_private *dev_priv =
1086 container_of(work, typeof(*dev_priv), psr.work);
1088 mutex_lock(&dev_priv->psr.lock);
1090 if (!dev_priv->psr.enabled)
1093 if (READ_ONCE(dev_priv->psr.irq_aux_error))
1094 intel_psr_handle_irq(dev_priv);
1097 * We have to make sure PSR is ready for re-enable
1098 * otherwise it keeps disabled until next full enable/disable cycle.
1099 * PSR might take some time to get fully disabled
1100 * and be ready for re-enable.
1102 if (!__psr_wait_for_idle_locked(dev_priv))
1106 * The delayed work can race with an invalidate hence we need to
1107 * recheck. Since psr_flush first clears this and then reschedules we
1108 * won't ever miss a flush when bailing out here.
1110 if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
1113 intel_psr_activate(dev_priv->psr.dp);
1115 mutex_unlock(&dev_priv->psr.lock);
1119 * intel_psr_invalidate - Invalidade PSR
1120 * @dev_priv: i915 device
1121 * @frontbuffer_bits: frontbuffer plane tracking bits
1122 * @origin: which operation caused the invalidate
1124 * Since the hardware frontbuffer tracking has gaps we need to integrate
1125 * with the software frontbuffer tracking. This function gets called every
1126 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1127 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1129 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1131 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1132 unsigned frontbuffer_bits, enum fb_op_origin origin)
1134 if (!CAN_PSR(dev_priv))
1137 if (origin == ORIGIN_FLIP)
1140 mutex_lock(&dev_priv->psr.lock);
1141 if (!dev_priv->psr.enabled) {
1142 mutex_unlock(&dev_priv->psr.lock);
1146 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1147 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1149 if (frontbuffer_bits)
1150 intel_psr_exit(dev_priv);
1152 mutex_unlock(&dev_priv->psr.lock);
1156 * intel_psr_flush - Flush PSR
1157 * @dev_priv: i915 device
1158 * @frontbuffer_bits: frontbuffer plane tracking bits
1159 * @origin: which operation caused the flush
1161 * Since the hardware frontbuffer tracking has gaps we need to integrate
1162 * with the software frontbuffer tracking. This function gets called every
1163 * time frontbuffer rendering has completed and flushed out to memory. PSR
1164 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1166 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1168 void intel_psr_flush(struct drm_i915_private *dev_priv,
1169 unsigned frontbuffer_bits, enum fb_op_origin origin)
1171 if (!CAN_PSR(dev_priv))
1174 if (origin == ORIGIN_FLIP)
1177 mutex_lock(&dev_priv->psr.lock);
1178 if (!dev_priv->psr.enabled) {
1179 mutex_unlock(&dev_priv->psr.lock);
1183 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1184 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1186 /* By definition flush = invalidate + flush */
1187 if (frontbuffer_bits)
1188 psr_force_hw_tracking_exit(dev_priv);
1190 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1191 schedule_work(&dev_priv->psr.work);
1192 mutex_unlock(&dev_priv->psr.lock);
1196 * intel_psr_init - Init basic PSR work and mutex.
1197 * @dev_priv: i915 device private
1199 * This function is called only once at driver load to initialize basic
1202 void intel_psr_init(struct drm_i915_private *dev_priv)
1206 if (!HAS_PSR(dev_priv))
1209 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
1210 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
1212 if (!dev_priv->psr.sink_support)
1215 if (i915_modparams.enable_psr == -1)
1216 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1217 i915_modparams.enable_psr = 0;
1220 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1221 * will still keep the error set even after the reset done in the
1222 * irq_preinstall and irq_uninstall hooks.
1223 * And enabling in this situation cause the screen to freeze in the
1224 * first time that PSR HW tries to activate so lets keep PSR disabled
1225 * to avoid any rendering problems.
1227 val = I915_READ(EDP_PSR_IIR);
1228 val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
1230 DRM_DEBUG_KMS("PSR interruption error set\n");
1231 dev_priv->psr.sink_not_reliable = true;
1235 /* Set link_standby x link_off defaults */
1236 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1237 /* HSW and BDW require workarounds that we don't implement. */
1238 dev_priv->psr.link_standby = false;
1240 /* For new platforms let's respect VBT back again */
1241 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1243 INIT_WORK(&dev_priv->psr.work, intel_psr_work);
1244 mutex_init(&dev_priv->psr.lock);
1247 void intel_psr_short_pulse(struct intel_dp *intel_dp)
1249 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1250 struct i915_psr *psr = &dev_priv->psr;
1252 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1253 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
1254 DP_PSR_LINK_CRC_ERROR;
1256 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1259 mutex_lock(&psr->lock);
1261 if (!psr->enabled || psr->dp != intel_dp)
1264 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
1265 DRM_ERROR("PSR_STATUS dpcd read failed\n");
1269 if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
1270 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1271 intel_psr_disable_locked(intel_dp);
1272 psr->sink_not_reliable = true;
1275 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
1276 DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
1280 if (val & DP_PSR_RFB_STORAGE_ERROR)
1281 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1282 if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1283 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1284 if (val & DP_PSR_LINK_CRC_ERROR)
1285 DRM_ERROR("PSR Link CRC error, disabling PSR\n");
1288 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1291 intel_psr_disable_locked(intel_dp);
1292 psr->sink_not_reliable = true;
1294 /* clear status register */
1295 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1297 mutex_unlock(&psr->lock);
1300 bool intel_psr_enabled(struct intel_dp *intel_dp)
1302 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1305 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1308 mutex_lock(&dev_priv->psr.lock);
1309 ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
1310 mutex_unlock(&dev_priv->psr.lock);