]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/intel_psr.c
drm/i915: switch intel_wait_for_register to uncore
[linux.git] / drivers / gpu / drm / i915 / intel_psr.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 /**
25  * DOC: Panel Self Refresh (PSR/SRD)
26  *
27  * Since Haswell Display controller supports Panel Self-Refresh on display
28  * panels witch have a remote frame buffer (RFB) implemented according to PSR
29  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30  * when system is idle but display is on as it eliminates display refresh
31  * request to DDR memory completely as long as the frame buffer for that
32  * display is unchanged.
33  *
34  * Panel Self Refresh must be supported by both Hardware (source) and
35  * Panel (sink).
36  *
37  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38  * to power down the link and memory controller. For DSI panels the same idea
39  * is called "manual mode".
40  *
41  * The implementation uses the hardware-based PSR support which automatically
42  * enters/exits self-refresh mode. The hardware takes care of sending the
43  * required DP aux message and could even retrain the link (that part isn't
44  * enabled yet though). The hardware also keeps track of any frontbuffer
45  * changes to know when to exit self-refresh mode again. Unfortunately that
46  * part doesn't work too well, hence why the i915 PSR support uses the
47  * software frontbuffer tracking to make sure it doesn't miss a screen
48  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49  * get called by the frontbuffer tracking code. Note that because of locking
50  * issues the self-refresh re-enable code is done from a work queue, which
51  * must be correctly synchronized/cancelled when shutting down the pipe."
52  */
53
54 #include <drm/drm_atomic_helper.h>
55
56 #include "intel_drv.h"
57 #include "i915_drv.h"
58
59 static bool psr_global_enabled(u32 debug)
60 {
61         switch (debug & I915_PSR_DEBUG_MODE_MASK) {
62         case I915_PSR_DEBUG_DEFAULT:
63                 return i915_modparams.enable_psr;
64         case I915_PSR_DEBUG_DISABLE:
65                 return false;
66         default:
67                 return true;
68         }
69 }
70
71 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
72                                const struct intel_crtc_state *crtc_state)
73 {
74         /* Cannot enable DSC and PSR2 simultaneously */
75         WARN_ON(crtc_state->dsc_params.compression_enable &&
76                 crtc_state->has_psr2);
77
78         switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
79         case I915_PSR_DEBUG_DISABLE:
80         case I915_PSR_DEBUG_FORCE_PSR1:
81                 return false;
82         default:
83                 return crtc_state->has_psr2;
84         }
85 }
86
87 static int edp_psr_shift(enum transcoder cpu_transcoder)
88 {
89         switch (cpu_transcoder) {
90         case TRANSCODER_A:
91                 return EDP_PSR_TRANSCODER_A_SHIFT;
92         case TRANSCODER_B:
93                 return EDP_PSR_TRANSCODER_B_SHIFT;
94         case TRANSCODER_C:
95                 return EDP_PSR_TRANSCODER_C_SHIFT;
96         default:
97                 MISSING_CASE(cpu_transcoder);
98                 /* fallthrough */
99         case TRANSCODER_EDP:
100                 return EDP_PSR_TRANSCODER_EDP_SHIFT;
101         }
102 }
103
104 void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
105 {
106         u32 debug_mask, mask;
107         enum transcoder cpu_transcoder;
108         u32 transcoders = BIT(TRANSCODER_EDP);
109
110         if (INTEL_GEN(dev_priv) >= 8)
111                 transcoders |= BIT(TRANSCODER_A) |
112                                BIT(TRANSCODER_B) |
113                                BIT(TRANSCODER_C);
114
115         debug_mask = 0;
116         mask = 0;
117         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
118                 int shift = edp_psr_shift(cpu_transcoder);
119
120                 mask |= EDP_PSR_ERROR(shift);
121                 debug_mask |= EDP_PSR_POST_EXIT(shift) |
122                               EDP_PSR_PRE_ENTRY(shift);
123         }
124
125         if (debug & I915_PSR_DEBUG_IRQ)
126                 mask |= debug_mask;
127
128         I915_WRITE(EDP_PSR_IMR, ~mask);
129 }
130
131 static void psr_event_print(u32 val, bool psr2_enabled)
132 {
133         DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
134         if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
135                 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
136         if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
137                 DRM_DEBUG_KMS("\tPSR2 disabled\n");
138         if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
139                 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
140         if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
141                 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
142         if (val & PSR_EVENT_GRAPHICS_RESET)
143                 DRM_DEBUG_KMS("\tGraphics reset\n");
144         if (val & PSR_EVENT_PCH_INTERRUPT)
145                 DRM_DEBUG_KMS("\tPCH interrupt\n");
146         if (val & PSR_EVENT_MEMORY_UP)
147                 DRM_DEBUG_KMS("\tMemory up\n");
148         if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
149                 DRM_DEBUG_KMS("\tFront buffer modification\n");
150         if (val & PSR_EVENT_WD_TIMER_EXPIRE)
151                 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
152         if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
153                 DRM_DEBUG_KMS("\tPIPE registers updated\n");
154         if (val & PSR_EVENT_REGISTER_UPDATE)
155                 DRM_DEBUG_KMS("\tRegister updated\n");
156         if (val & PSR_EVENT_HDCP_ENABLE)
157                 DRM_DEBUG_KMS("\tHDCP enabled\n");
158         if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
159                 DRM_DEBUG_KMS("\tKVMR session enabled\n");
160         if (val & PSR_EVENT_VBI_ENABLE)
161                 DRM_DEBUG_KMS("\tVBI enabled\n");
162         if (val & PSR_EVENT_LPSP_MODE_EXIT)
163                 DRM_DEBUG_KMS("\tLPSP mode exited\n");
164         if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
165                 DRM_DEBUG_KMS("\tPSR disabled\n");
166 }
167
168 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
169 {
170         u32 transcoders = BIT(TRANSCODER_EDP);
171         enum transcoder cpu_transcoder;
172         ktime_t time_ns =  ktime_get();
173         u32 mask = 0;
174
175         if (INTEL_GEN(dev_priv) >= 8)
176                 transcoders |= BIT(TRANSCODER_A) |
177                                BIT(TRANSCODER_B) |
178                                BIT(TRANSCODER_C);
179
180         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
181                 int shift = edp_psr_shift(cpu_transcoder);
182
183                 if (psr_iir & EDP_PSR_ERROR(shift)) {
184                         DRM_WARN("[transcoder %s] PSR aux error\n",
185                                  transcoder_name(cpu_transcoder));
186
187                         dev_priv->psr.irq_aux_error = true;
188
189                         /*
190                          * If this interruption is not masked it will keep
191                          * interrupting so fast that it prevents the scheduled
192                          * work to run.
193                          * Also after a PSR error, we don't want to arm PSR
194                          * again so we don't care about unmask the interruption
195                          * or unset irq_aux_error.
196                          */
197                         mask |= EDP_PSR_ERROR(shift);
198                 }
199
200                 if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
201                         dev_priv->psr.last_entry_attempt = time_ns;
202                         DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
203                                       transcoder_name(cpu_transcoder));
204                 }
205
206                 if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
207                         dev_priv->psr.last_exit = time_ns;
208                         DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
209                                       transcoder_name(cpu_transcoder));
210
211                         if (INTEL_GEN(dev_priv) >= 9) {
212                                 u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
213                                 bool psr2_enabled = dev_priv->psr.psr2_enabled;
214
215                                 I915_WRITE(PSR_EVENT(cpu_transcoder), val);
216                                 psr_event_print(val, psr2_enabled);
217                         }
218                 }
219         }
220
221         if (mask) {
222                 mask |= I915_READ(EDP_PSR_IMR);
223                 I915_WRITE(EDP_PSR_IMR, mask);
224
225                 schedule_work(&dev_priv->psr.work);
226         }
227 }
228
229 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
230 {
231         u8 dprx = 0;
232
233         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
234                               &dprx) != 1)
235                 return false;
236         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
237 }
238
239 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
240 {
241         u8 alpm_caps = 0;
242
243         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
244                               &alpm_caps) != 1)
245                 return false;
246         return alpm_caps & DP_ALPM_CAP;
247 }
248
249 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
250 {
251         u8 val = 8; /* assume the worst if we can't read the value */
252
253         if (drm_dp_dpcd_readb(&intel_dp->aux,
254                               DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
255                 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
256         else
257                 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
258         return val;
259 }
260
261 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
262 {
263         u16 val;
264         ssize_t r;
265
266         /*
267          * Returning the default X granularity if granularity not required or
268          * if DPCD read fails
269          */
270         if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
271                 return 4;
272
273         r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
274         if (r != 2)
275                 DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
276
277         /*
278          * Spec says that if the value read is 0 the default granularity should
279          * be used instead.
280          */
281         if (r != 2 || val == 0)
282                 val = 4;
283
284         return val;
285 }
286
287 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
288 {
289         struct drm_i915_private *dev_priv =
290                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
291
292         drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
293                          sizeof(intel_dp->psr_dpcd));
294
295         if (!intel_dp->psr_dpcd[0])
296                 return;
297         DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
298                       intel_dp->psr_dpcd[0]);
299
300         if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
301                 DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
302                 return;
303         }
304
305         if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
306                 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
307                 return;
308         }
309
310         dev_priv->psr.sink_support = true;
311         dev_priv->psr.sink_sync_latency =
312                 intel_dp_get_sink_sync_latency(intel_dp);
313
314         WARN_ON(dev_priv->psr.dp);
315         dev_priv->psr.dp = intel_dp;
316
317         if (INTEL_GEN(dev_priv) >= 9 &&
318             (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
319                 bool y_req = intel_dp->psr_dpcd[1] &
320                              DP_PSR2_SU_Y_COORDINATE_REQUIRED;
321                 bool alpm = intel_dp_get_alpm_status(intel_dp);
322
323                 /*
324                  * All panels that supports PSR version 03h (PSR2 +
325                  * Y-coordinate) can handle Y-coordinates in VSC but we are
326                  * only sure that it is going to be used when required by the
327                  * panel. This way panel is capable to do selective update
328                  * without a aux frame sync.
329                  *
330                  * To support PSR version 02h and PSR version 03h without
331                  * Y-coordinate requirement panels we would need to enable
332                  * GTC first.
333                  */
334                 dev_priv->psr.sink_psr2_support = y_req && alpm;
335                 DRM_DEBUG_KMS("PSR2 %ssupported\n",
336                               dev_priv->psr.sink_psr2_support ? "" : "not ");
337
338                 if (dev_priv->psr.sink_psr2_support) {
339                         dev_priv->psr.colorimetry_support =
340                                 intel_dp_get_colorimetry_status(intel_dp);
341                         dev_priv->psr.su_x_granularity =
342                                 intel_dp_get_su_x_granulartiy(intel_dp);
343                 }
344         }
345 }
346
347 static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
348                                 const struct intel_crtc_state *crtc_state)
349 {
350         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
351         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
352         struct edp_vsc_psr psr_vsc;
353
354         if (dev_priv->psr.psr2_enabled) {
355                 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
356                 memset(&psr_vsc, 0, sizeof(psr_vsc));
357                 psr_vsc.sdp_header.HB0 = 0;
358                 psr_vsc.sdp_header.HB1 = 0x7;
359                 if (dev_priv->psr.colorimetry_support) {
360                         psr_vsc.sdp_header.HB2 = 0x5;
361                         psr_vsc.sdp_header.HB3 = 0x13;
362                 } else {
363                         psr_vsc.sdp_header.HB2 = 0x4;
364                         psr_vsc.sdp_header.HB3 = 0xe;
365                 }
366         } else {
367                 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
368                 memset(&psr_vsc, 0, sizeof(psr_vsc));
369                 psr_vsc.sdp_header.HB0 = 0;
370                 psr_vsc.sdp_header.HB1 = 0x7;
371                 psr_vsc.sdp_header.HB2 = 0x2;
372                 psr_vsc.sdp_header.HB3 = 0x8;
373         }
374
375         intel_dig_port->write_infoframe(&intel_dig_port->base,
376                                         crtc_state,
377                                         DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
378 }
379
380 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
381 {
382         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
383         u32 aux_clock_divider, aux_ctl;
384         int i;
385         static const u8 aux_msg[] = {
386                 [0] = DP_AUX_NATIVE_WRITE << 4,
387                 [1] = DP_SET_POWER >> 8,
388                 [2] = DP_SET_POWER & 0xff,
389                 [3] = 1 - 1,
390                 [4] = DP_SET_POWER_D0,
391         };
392         u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
393                            EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
394                            EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
395                            EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
396
397         BUILD_BUG_ON(sizeof(aux_msg) > 20);
398         for (i = 0; i < sizeof(aux_msg); i += 4)
399                 I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
400                            intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
401
402         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
403
404         /* Start with bits set for DDI_AUX_CTL register */
405         aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
406                                              aux_clock_divider);
407
408         /* Select only valid bits for SRD_AUX_CTL */
409         aux_ctl &= psr_aux_mask;
410         I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
411 }
412
413 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
414 {
415         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
416         u8 dpcd_val = DP_PSR_ENABLE;
417
418         /* Enable ALPM at sink for psr2 */
419         if (dev_priv->psr.psr2_enabled) {
420                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
421                                    DP_ALPM_ENABLE);
422                 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
423         } else {
424                 if (dev_priv->psr.link_standby)
425                         dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
426
427                 if (INTEL_GEN(dev_priv) >= 8)
428                         dpcd_val |= DP_PSR_CRC_VERIFICATION;
429         }
430
431         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
432
433         drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
434 }
435
436 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
437 {
438         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
439         u32 val = 0;
440
441         if (INTEL_GEN(dev_priv) >= 11)
442                 val |= EDP_PSR_TP4_TIME_0US;
443
444         if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
445                 val |= EDP_PSR_TP1_TIME_0us;
446         else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
447                 val |= EDP_PSR_TP1_TIME_100us;
448         else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
449                 val |= EDP_PSR_TP1_TIME_500us;
450         else
451                 val |= EDP_PSR_TP1_TIME_2500us;
452
453         if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
454                 val |= EDP_PSR_TP2_TP3_TIME_0us;
455         else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
456                 val |= EDP_PSR_TP2_TP3_TIME_100us;
457         else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
458                 val |= EDP_PSR_TP2_TP3_TIME_500us;
459         else
460                 val |= EDP_PSR_TP2_TP3_TIME_2500us;
461
462         if (intel_dp_source_supports_hbr2(intel_dp) &&
463             drm_dp_tps3_supported(intel_dp->dpcd))
464                 val |= EDP_PSR_TP1_TP3_SEL;
465         else
466                 val |= EDP_PSR_TP1_TP2_SEL;
467
468         return val;
469 }
470
471 static void hsw_activate_psr1(struct intel_dp *intel_dp)
472 {
473         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
474         u32 max_sleep_time = 0x1f;
475         u32 val = EDP_PSR_ENABLE;
476
477         /* Let's use 6 as the minimum to cover all known cases including the
478          * off-by-one issue that HW has in some cases.
479          */
480         int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
481
482         /* sink_sync_latency of 8 means source has to wait for more than 8
483          * frames, we'll go with 9 frames for now
484          */
485         idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
486         val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
487
488         val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
489         if (IS_HASWELL(dev_priv))
490                 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
491
492         if (dev_priv->psr.link_standby)
493                 val |= EDP_PSR_LINK_STANDBY;
494
495         val |= intel_psr1_get_tp_time(intel_dp);
496
497         if (INTEL_GEN(dev_priv) >= 8)
498                 val |= EDP_PSR_CRC_ENABLE;
499
500         val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
501         I915_WRITE(EDP_PSR_CTL, val);
502 }
503
504 static void hsw_activate_psr2(struct intel_dp *intel_dp)
505 {
506         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
507         u32 val;
508
509         /* Let's use 6 as the minimum to cover all known cases including the
510          * off-by-one issue that HW has in some cases.
511          */
512         int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
513
514         idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
515         val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
516
517         val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
518         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
519                 val |= EDP_Y_COORDINATE_ENABLE;
520
521         val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
522
523         if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
524             dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
525                 val |= EDP_PSR2_TP2_TIME_50us;
526         else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
527                 val |= EDP_PSR2_TP2_TIME_100us;
528         else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
529                 val |= EDP_PSR2_TP2_TIME_500us;
530         else
531                 val |= EDP_PSR2_TP2_TIME_2500us;
532
533         /*
534          * FIXME: There is probably a issue in DMC firmwares(icl_dmc_ver1_07.bin
535          * and kbl_dmc_ver1_04.bin at least) that causes PSR2 SU to fail after
536          * exiting DC6 if EDP_PSR_TP1_TP3_SEL is kept in PSR_CTL, so for now
537          * lets workaround the issue by cleaning PSR_CTL before enable PSR2.
538          */
539         I915_WRITE(EDP_PSR_CTL, 0);
540
541         I915_WRITE(EDP_PSR2_CTL, val);
542 }
543
544 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
545                                     struct intel_crtc_state *crtc_state)
546 {
547         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
548         int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
549         int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
550         int psr_max_h = 0, psr_max_v = 0;
551
552         if (!dev_priv->psr.sink_psr2_support)
553                 return false;
554
555         /*
556          * DSC and PSR2 cannot be enabled simultaneously. If a requested
557          * resolution requires DSC to be enabled, priority is given to DSC
558          * over PSR2.
559          */
560         if (crtc_state->dsc_params.compression_enable) {
561                 DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
562                 return false;
563         }
564
565         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
566                 psr_max_h = 4096;
567                 psr_max_v = 2304;
568         } else if (IS_GEN(dev_priv, 9)) {
569                 psr_max_h = 3640;
570                 psr_max_v = 2304;
571         }
572
573         if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
574                 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
575                               crtc_hdisplay, crtc_vdisplay,
576                               psr_max_h, psr_max_v);
577                 return false;
578         }
579
580         /*
581          * HW sends SU blocks of size four scan lines, which means the starting
582          * X coordinate and Y granularity requirements will always be met. We
583          * only need to validate the SU block width is a multiple of
584          * x granularity.
585          */
586         if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
587                 DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
588                               crtc_hdisplay, dev_priv->psr.su_x_granularity);
589                 return false;
590         }
591
592         if (crtc_state->crc_enabled) {
593                 DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
594                 return false;
595         }
596
597         return true;
598 }
599
600 void intel_psr_compute_config(struct intel_dp *intel_dp,
601                               struct intel_crtc_state *crtc_state)
602 {
603         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
604         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
605         const struct drm_display_mode *adjusted_mode =
606                 &crtc_state->base.adjusted_mode;
607         int psr_setup_time;
608
609         if (!CAN_PSR(dev_priv))
610                 return;
611
612         if (intel_dp != dev_priv->psr.dp)
613                 return;
614
615         /*
616          * HSW spec explicitly says PSR is tied to port A.
617          * BDW+ platforms with DDI implementation of PSR have different
618          * PSR registers per transcoder and we only implement transcoder EDP
619          * ones. Since by Display design transcoder EDP is tied to port A
620          * we can safely escape based on the port A.
621          */
622         if (dig_port->base.port != PORT_A) {
623                 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
624                 return;
625         }
626
627         if (dev_priv->psr.sink_not_reliable) {
628                 DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
629                 return;
630         }
631
632         if (IS_HASWELL(dev_priv) &&
633             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
634                 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
635                 return;
636         }
637
638         psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
639         if (psr_setup_time < 0) {
640                 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
641                               intel_dp->psr_dpcd[1]);
642                 return;
643         }
644
645         if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
646             adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
647                 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
648                               psr_setup_time);
649                 return;
650         }
651
652         crtc_state->has_psr = true;
653         crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
654 }
655
656 static void intel_psr_activate(struct intel_dp *intel_dp)
657 {
658         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
659
660         if (INTEL_GEN(dev_priv) >= 9)
661                 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
662         WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
663         WARN_ON(dev_priv->psr.active);
664         lockdep_assert_held(&dev_priv->psr.lock);
665
666         /* psr1 and psr2 are mutually exclusive.*/
667         if (dev_priv->psr.psr2_enabled)
668                 hsw_activate_psr2(intel_dp);
669         else
670                 hsw_activate_psr1(intel_dp);
671
672         dev_priv->psr.active = true;
673 }
674
675 static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
676                                          enum transcoder cpu_transcoder)
677 {
678         static const i915_reg_t regs[] = {
679                 [TRANSCODER_A] = CHICKEN_TRANS_A,
680                 [TRANSCODER_B] = CHICKEN_TRANS_B,
681                 [TRANSCODER_C] = CHICKEN_TRANS_C,
682                 [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
683         };
684
685         WARN_ON(INTEL_GEN(dev_priv) < 9);
686
687         if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
688                     !regs[cpu_transcoder].reg))
689                 cpu_transcoder = TRANSCODER_A;
690
691         return regs[cpu_transcoder];
692 }
693
694 static void intel_psr_enable_source(struct intel_dp *intel_dp,
695                                     const struct intel_crtc_state *crtc_state)
696 {
697         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
698         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
699         u32 mask;
700
701         /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
702          * use hardcoded values PSR AUX transactions
703          */
704         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
705                 hsw_psr_setup_aux(intel_dp);
706
707         if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
708                                            !IS_GEMINILAKE(dev_priv))) {
709                 i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
710                                                         cpu_transcoder);
711                 u32 chicken = I915_READ(reg);
712
713                 chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
714                            PSR2_ADD_VERTICAL_LINE_COUNT;
715                 I915_WRITE(reg, chicken);
716         }
717
718         /*
719          * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
720          * mask LPSP to avoid dependency on other drivers that might block
721          * runtime_pm besides preventing  other hw tracking issues now we
722          * can rely on frontbuffer tracking.
723          */
724         mask = EDP_PSR_DEBUG_MASK_MEMUP |
725                EDP_PSR_DEBUG_MASK_HPD |
726                EDP_PSR_DEBUG_MASK_LPSP |
727                EDP_PSR_DEBUG_MASK_MAX_SLEEP;
728
729         if (INTEL_GEN(dev_priv) < 11)
730                 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
731
732         I915_WRITE(EDP_PSR_DEBUG, mask);
733 }
734
735 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
736                                     const struct intel_crtc_state *crtc_state)
737 {
738         struct intel_dp *intel_dp = dev_priv->psr.dp;
739
740         WARN_ON(dev_priv->psr.enabled);
741
742         dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
743         dev_priv->psr.busy_frontbuffer_bits = 0;
744         dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
745
746         DRM_DEBUG_KMS("Enabling PSR%s\n",
747                       dev_priv->psr.psr2_enabled ? "2" : "1");
748         intel_psr_setup_vsc(intel_dp, crtc_state);
749         intel_psr_enable_sink(intel_dp);
750         intel_psr_enable_source(intel_dp, crtc_state);
751         dev_priv->psr.enabled = true;
752
753         intel_psr_activate(intel_dp);
754 }
755
756 /**
757  * intel_psr_enable - Enable PSR
758  * @intel_dp: Intel DP
759  * @crtc_state: new CRTC state
760  *
761  * This function can only be called after the pipe is fully trained and enabled.
762  */
763 void intel_psr_enable(struct intel_dp *intel_dp,
764                       const struct intel_crtc_state *crtc_state)
765 {
766         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
767
768         if (!crtc_state->has_psr)
769                 return;
770
771         if (WARN_ON(!CAN_PSR(dev_priv)))
772                 return;
773
774         WARN_ON(dev_priv->drrs.dp);
775
776         mutex_lock(&dev_priv->psr.lock);
777
778         if (!psr_global_enabled(dev_priv->psr.debug)) {
779                 DRM_DEBUG_KMS("PSR disabled by flag\n");
780                 goto unlock;
781         }
782
783         intel_psr_enable_locked(dev_priv, crtc_state);
784
785 unlock:
786         mutex_unlock(&dev_priv->psr.lock);
787 }
788
789 static void intel_psr_exit(struct drm_i915_private *dev_priv)
790 {
791         u32 val;
792
793         if (!dev_priv->psr.active) {
794                 if (INTEL_GEN(dev_priv) >= 9)
795                         WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
796                 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
797                 return;
798         }
799
800         if (dev_priv->psr.psr2_enabled) {
801                 val = I915_READ(EDP_PSR2_CTL);
802                 WARN_ON(!(val & EDP_PSR2_ENABLE));
803                 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
804         } else {
805                 val = I915_READ(EDP_PSR_CTL);
806                 WARN_ON(!(val & EDP_PSR_ENABLE));
807                 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
808         }
809         dev_priv->psr.active = false;
810 }
811
812 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
813 {
814         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
815         i915_reg_t psr_status;
816         u32 psr_status_mask;
817
818         lockdep_assert_held(&dev_priv->psr.lock);
819
820         if (!dev_priv->psr.enabled)
821                 return;
822
823         DRM_DEBUG_KMS("Disabling PSR%s\n",
824                       dev_priv->psr.psr2_enabled ? "2" : "1");
825
826         intel_psr_exit(dev_priv);
827
828         if (dev_priv->psr.psr2_enabled) {
829                 psr_status = EDP_PSR2_STATUS;
830                 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
831         } else {
832                 psr_status = EDP_PSR_STATUS;
833                 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
834         }
835
836         /* Wait till PSR is idle */
837         if (intel_wait_for_register(&dev_priv->uncore,
838                                     psr_status, psr_status_mask, 0, 2000))
839                 DRM_ERROR("Timed out waiting PSR idle state\n");
840
841         /* Disable PSR on Sink */
842         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
843
844         dev_priv->psr.enabled = false;
845 }
846
847 /**
848  * intel_psr_disable - Disable PSR
849  * @intel_dp: Intel DP
850  * @old_crtc_state: old CRTC state
851  *
852  * This function needs to be called before disabling pipe.
853  */
854 void intel_psr_disable(struct intel_dp *intel_dp,
855                        const struct intel_crtc_state *old_crtc_state)
856 {
857         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
858
859         if (!old_crtc_state->has_psr)
860                 return;
861
862         if (WARN_ON(!CAN_PSR(dev_priv)))
863                 return;
864
865         mutex_lock(&dev_priv->psr.lock);
866
867         intel_psr_disable_locked(intel_dp);
868
869         mutex_unlock(&dev_priv->psr.lock);
870         cancel_work_sync(&dev_priv->psr.work);
871 }
872
873 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
874 {
875         /*
876          * Display WA #0884: all
877          * This documented WA for bxt can be safely applied
878          * broadly so we can force HW tracking to exit PSR
879          * instead of disabling and re-enabling.
880          * Workaround tells us to write 0 to CUR_SURFLIVE_A,
881          * but it makes more sense write to the current active
882          * pipe.
883          */
884         I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
885 }
886
887 /**
888  * intel_psr_update - Update PSR state
889  * @intel_dp: Intel DP
890  * @crtc_state: new CRTC state
891  *
892  * This functions will update PSR states, disabling, enabling or switching PSR
893  * version when executing fastsets. For full modeset, intel_psr_disable() and
894  * intel_psr_enable() should be called instead.
895  */
896 void intel_psr_update(struct intel_dp *intel_dp,
897                       const struct intel_crtc_state *crtc_state)
898 {
899         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
900         struct i915_psr *psr = &dev_priv->psr;
901         bool enable, psr2_enable;
902
903         if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
904                 return;
905
906         mutex_lock(&dev_priv->psr.lock);
907
908         enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
909         psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
910
911         if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
912                 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
913                 if (crtc_state->crc_enabled && psr->enabled)
914                         psr_force_hw_tracking_exit(dev_priv);
915
916                 goto unlock;
917         }
918
919         if (psr->enabled)
920                 intel_psr_disable_locked(intel_dp);
921
922         if (enable)
923                 intel_psr_enable_locked(dev_priv, crtc_state);
924
925 unlock:
926         mutex_unlock(&dev_priv->psr.lock);
927 }
928
929 /**
930  * intel_psr_wait_for_idle - wait for PSR1 to idle
931  * @new_crtc_state: new CRTC state
932  * @out_value: PSR status in case of failure
933  *
934  * This function is expected to be called from pipe_update_start() where it is
935  * not expected to race with PSR enable or disable.
936  *
937  * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
938  */
939 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
940                             u32 *out_value)
941 {
942         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
943         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
944
945         if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
946                 return 0;
947
948         /* FIXME: Update this for PSR2 if we need to wait for idle */
949         if (READ_ONCE(dev_priv->psr.psr2_enabled))
950                 return 0;
951
952         /*
953          * From bspec: Panel Self Refresh (BDW+)
954          * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
955          * exit training time + 1.5 ms of aux channel handshake. 50 ms is
956          * defensive enough to cover everything.
957          */
958
959         return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
960                                          EDP_PSR_STATUS_STATE_MASK,
961                                          EDP_PSR_STATUS_STATE_IDLE, 2, 50,
962                                          out_value);
963 }
964
965 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
966 {
967         i915_reg_t reg;
968         u32 mask;
969         int err;
970
971         if (!dev_priv->psr.enabled)
972                 return false;
973
974         if (dev_priv->psr.psr2_enabled) {
975                 reg = EDP_PSR2_STATUS;
976                 mask = EDP_PSR2_STATUS_STATE_MASK;
977         } else {
978                 reg = EDP_PSR_STATUS;
979                 mask = EDP_PSR_STATUS_STATE_MASK;
980         }
981
982         mutex_unlock(&dev_priv->psr.lock);
983
984         err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
985         if (err)
986                 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
987
988         /* After the unlocked wait, verify that PSR is still wanted! */
989         mutex_lock(&dev_priv->psr.lock);
990         return err == 0 && dev_priv->psr.enabled;
991 }
992
993 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
994 {
995         struct drm_device *dev = &dev_priv->drm;
996         struct drm_modeset_acquire_ctx ctx;
997         struct drm_atomic_state *state;
998         struct drm_crtc *crtc;
999         int err;
1000
1001         state = drm_atomic_state_alloc(dev);
1002         if (!state)
1003                 return -ENOMEM;
1004
1005         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1006         state->acquire_ctx = &ctx;
1007
1008 retry:
1009         drm_for_each_crtc(crtc, dev) {
1010                 struct drm_crtc_state *crtc_state;
1011                 struct intel_crtc_state *intel_crtc_state;
1012
1013                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1014                 if (IS_ERR(crtc_state)) {
1015                         err = PTR_ERR(crtc_state);
1016                         goto error;
1017                 }
1018
1019                 intel_crtc_state = to_intel_crtc_state(crtc_state);
1020
1021                 if (crtc_state->active && intel_crtc_state->has_psr) {
1022                         /* Mark mode as changed to trigger a pipe->update() */
1023                         crtc_state->mode_changed = true;
1024                         break;
1025                 }
1026         }
1027
1028         err = drm_atomic_commit(state);
1029
1030 error:
1031         if (err == -EDEADLK) {
1032                 drm_atomic_state_clear(state);
1033                 err = drm_modeset_backoff(&ctx);
1034                 if (!err)
1035                         goto retry;
1036         }
1037
1038         drm_modeset_drop_locks(&ctx);
1039         drm_modeset_acquire_fini(&ctx);
1040         drm_atomic_state_put(state);
1041
1042         return err;
1043 }
1044
1045 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
1046 {
1047         const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
1048         u32 old_mode;
1049         int ret;
1050
1051         if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1052             mode > I915_PSR_DEBUG_FORCE_PSR1) {
1053                 DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
1054                 return -EINVAL;
1055         }
1056
1057         ret = mutex_lock_interruptible(&dev_priv->psr.lock);
1058         if (ret)
1059                 return ret;
1060
1061         old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
1062         dev_priv->psr.debug = val;
1063         intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
1064
1065         mutex_unlock(&dev_priv->psr.lock);
1066
1067         if (old_mode != mode)
1068                 ret = intel_psr_fastset_force(dev_priv);
1069
1070         return ret;
1071 }
1072
1073 static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
1074 {
1075         struct i915_psr *psr = &dev_priv->psr;
1076
1077         intel_psr_disable_locked(psr->dp);
1078         psr->sink_not_reliable = true;
1079         /* let's make sure that sink is awaken */
1080         drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1081 }
1082
1083 static void intel_psr_work(struct work_struct *work)
1084 {
1085         struct drm_i915_private *dev_priv =
1086                 container_of(work, typeof(*dev_priv), psr.work);
1087
1088         mutex_lock(&dev_priv->psr.lock);
1089
1090         if (!dev_priv->psr.enabled)
1091                 goto unlock;
1092
1093         if (READ_ONCE(dev_priv->psr.irq_aux_error))
1094                 intel_psr_handle_irq(dev_priv);
1095
1096         /*
1097          * We have to make sure PSR is ready for re-enable
1098          * otherwise it keeps disabled until next full enable/disable cycle.
1099          * PSR might take some time to get fully disabled
1100          * and be ready for re-enable.
1101          */
1102         if (!__psr_wait_for_idle_locked(dev_priv))
1103                 goto unlock;
1104
1105         /*
1106          * The delayed work can race with an invalidate hence we need to
1107          * recheck. Since psr_flush first clears this and then reschedules we
1108          * won't ever miss a flush when bailing out here.
1109          */
1110         if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
1111                 goto unlock;
1112
1113         intel_psr_activate(dev_priv->psr.dp);
1114 unlock:
1115         mutex_unlock(&dev_priv->psr.lock);
1116 }
1117
1118 /**
1119  * intel_psr_invalidate - Invalidade PSR
1120  * @dev_priv: i915 device
1121  * @frontbuffer_bits: frontbuffer plane tracking bits
1122  * @origin: which operation caused the invalidate
1123  *
1124  * Since the hardware frontbuffer tracking has gaps we need to integrate
1125  * with the software frontbuffer tracking. This function gets called every
1126  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1127  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1128  *
1129  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1130  */
1131 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1132                           unsigned frontbuffer_bits, enum fb_op_origin origin)
1133 {
1134         if (!CAN_PSR(dev_priv))
1135                 return;
1136
1137         if (origin == ORIGIN_FLIP)
1138                 return;
1139
1140         mutex_lock(&dev_priv->psr.lock);
1141         if (!dev_priv->psr.enabled) {
1142                 mutex_unlock(&dev_priv->psr.lock);
1143                 return;
1144         }
1145
1146         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1147         dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1148
1149         if (frontbuffer_bits)
1150                 intel_psr_exit(dev_priv);
1151
1152         mutex_unlock(&dev_priv->psr.lock);
1153 }
1154
1155 /**
1156  * intel_psr_flush - Flush PSR
1157  * @dev_priv: i915 device
1158  * @frontbuffer_bits: frontbuffer plane tracking bits
1159  * @origin: which operation caused the flush
1160  *
1161  * Since the hardware frontbuffer tracking has gaps we need to integrate
1162  * with the software frontbuffer tracking. This function gets called every
1163  * time frontbuffer rendering has completed and flushed out to memory. PSR
1164  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1165  *
1166  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1167  */
1168 void intel_psr_flush(struct drm_i915_private *dev_priv,
1169                      unsigned frontbuffer_bits, enum fb_op_origin origin)
1170 {
1171         if (!CAN_PSR(dev_priv))
1172                 return;
1173
1174         if (origin == ORIGIN_FLIP)
1175                 return;
1176
1177         mutex_lock(&dev_priv->psr.lock);
1178         if (!dev_priv->psr.enabled) {
1179                 mutex_unlock(&dev_priv->psr.lock);
1180                 return;
1181         }
1182
1183         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1184         dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1185
1186         /* By definition flush = invalidate + flush */
1187         if (frontbuffer_bits)
1188                 psr_force_hw_tracking_exit(dev_priv);
1189
1190         if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1191                 schedule_work(&dev_priv->psr.work);
1192         mutex_unlock(&dev_priv->psr.lock);
1193 }
1194
1195 /**
1196  * intel_psr_init - Init basic PSR work and mutex.
1197  * @dev_priv: i915 device private
1198  *
1199  * This function is  called only once at driver load to initialize basic
1200  * PSR stuff.
1201  */
1202 void intel_psr_init(struct drm_i915_private *dev_priv)
1203 {
1204         u32 val;
1205
1206         if (!HAS_PSR(dev_priv))
1207                 return;
1208
1209         dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
1210                 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
1211
1212         if (!dev_priv->psr.sink_support)
1213                 return;
1214
1215         if (i915_modparams.enable_psr == -1)
1216                 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1217                         i915_modparams.enable_psr = 0;
1218
1219         /*
1220          * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1221          * will still keep the error set even after the reset done in the
1222          * irq_preinstall and irq_uninstall hooks.
1223          * And enabling in this situation cause the screen to freeze in the
1224          * first time that PSR HW tries to activate so lets keep PSR disabled
1225          * to avoid any rendering problems.
1226          */
1227         val = I915_READ(EDP_PSR_IIR);
1228         val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
1229         if (val) {
1230                 DRM_DEBUG_KMS("PSR interruption error set\n");
1231                 dev_priv->psr.sink_not_reliable = true;
1232                 return;
1233         }
1234
1235         /* Set link_standby x link_off defaults */
1236         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1237                 /* HSW and BDW require workarounds that we don't implement. */
1238                 dev_priv->psr.link_standby = false;
1239         else
1240                 /* For new platforms let's respect VBT back again */
1241                 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1242
1243         INIT_WORK(&dev_priv->psr.work, intel_psr_work);
1244         mutex_init(&dev_priv->psr.lock);
1245 }
1246
1247 void intel_psr_short_pulse(struct intel_dp *intel_dp)
1248 {
1249         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1250         struct i915_psr *psr = &dev_priv->psr;
1251         u8 val;
1252         const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1253                           DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
1254                           DP_PSR_LINK_CRC_ERROR;
1255
1256         if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1257                 return;
1258
1259         mutex_lock(&psr->lock);
1260
1261         if (!psr->enabled || psr->dp != intel_dp)
1262                 goto exit;
1263
1264         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
1265                 DRM_ERROR("PSR_STATUS dpcd read failed\n");
1266                 goto exit;
1267         }
1268
1269         if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
1270                 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1271                 intel_psr_disable_locked(intel_dp);
1272                 psr->sink_not_reliable = true;
1273         }
1274
1275         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
1276                 DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
1277                 goto exit;
1278         }
1279
1280         if (val & DP_PSR_RFB_STORAGE_ERROR)
1281                 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1282         if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1283                 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1284         if (val & DP_PSR_LINK_CRC_ERROR)
1285                 DRM_ERROR("PSR Link CRC error, disabling PSR\n");
1286
1287         if (val & ~errors)
1288                 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1289                           val & ~errors);
1290         if (val & errors) {
1291                 intel_psr_disable_locked(intel_dp);
1292                 psr->sink_not_reliable = true;
1293         }
1294         /* clear status register */
1295         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1296 exit:
1297         mutex_unlock(&psr->lock);
1298 }
1299
1300 bool intel_psr_enabled(struct intel_dp *intel_dp)
1301 {
1302         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1303         bool ret;
1304
1305         if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1306                 return false;
1307
1308         mutex_lock(&dev_priv->psr.lock);
1309         ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
1310         mutex_unlock(&dev_priv->psr.lock);
1311
1312         return ret;
1313 }