]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/display/intel_dp.c
drm/i915: Transition port type checks to phy checks
[linux.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34
35 #include <asm/byteorder.h>
36
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_hdcp.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/i915_drm.h>
44
45 #include "i915_debugfs.h"
46 #include "i915_drv.h"
47 #include "intel_atomic.h"
48 #include "intel_audio.h"
49 #include "intel_connector.h"
50 #include "intel_ddi.h"
51 #include "intel_dp.h"
52 #include "intel_dp_link_training.h"
53 #include "intel_dp_mst.h"
54 #include "intel_dpio_phy.h"
55 #include "intel_drv.h"
56 #include "intel_fifo_underrun.h"
57 #include "intel_hdcp.h"
58 #include "intel_hdmi.h"
59 #include "intel_hotplug.h"
60 #include "intel_lspcon.h"
61 #include "intel_lvds.h"
62 #include "intel_panel.h"
63 #include "intel_psr.h"
64 #include "intel_sideband.h"
65 #include "intel_tc.h"
66 #include "intel_vdsc.h"
67
68 #define DP_DPRX_ESI_LEN 14
69
70 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
71 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER      61440
72 #define DP_DSC_MIN_SUPPORTED_BPC                8
73 #define DP_DSC_MAX_SUPPORTED_BPC                10
74
75 /* DP DSC throughput values used for slice count calculations KPixels/s */
76 #define DP_DSC_PEAK_PIXEL_RATE                  2720000
77 #define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
78 #define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
79
80 /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
81 #define DP_DSC_FEC_OVERHEAD_FACTOR              976
82
83 /* Compliance test status bits  */
84 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
85 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
86 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
87 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
88
89 struct dp_link_dpll {
90         int clock;
91         struct dpll dpll;
92 };
93
94 static const struct dp_link_dpll g4x_dpll[] = {
95         { 162000,
96                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
97         { 270000,
98                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
99 };
100
101 static const struct dp_link_dpll pch_dpll[] = {
102         { 162000,
103                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
104         { 270000,
105                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
106 };
107
108 static const struct dp_link_dpll vlv_dpll[] = {
109         { 162000,
110                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
111         { 270000,
112                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
113 };
114
115 /*
116  * CHV supports eDP 1.4 that have  more link rates.
117  * Below only provides the fixed rate but exclude variable rate.
118  */
119 static const struct dp_link_dpll chv_dpll[] = {
120         /*
121          * CHV requires to program fractional division for m2.
122          * m2 is stored in fixed point format using formula below
123          * (m2_int << 22) | m2_fraction
124          */
125         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
126                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
127         { 270000,       /* m2_int = 27, m2_fraction = 0 */
128                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
129 };
130
131 /* Constants for DP DSC configurations */
132 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
133
134 /* With Single pipe configuration, HW is capable of supporting maximum
135  * of 4 slices per line.
136  */
137 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
138
139 /**
140  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
141  * @intel_dp: DP struct
142  *
143  * If a CPU or PCH DP output is attached to an eDP panel, this function
144  * will return true, and false otherwise.
145  */
146 bool intel_dp_is_edp(struct intel_dp *intel_dp)
147 {
148         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
149
150         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
151 }
152
153 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
154 {
155         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
156 }
157
158 static void intel_dp_link_down(struct intel_encoder *encoder,
159                                const struct intel_crtc_state *old_crtc_state);
160 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
161 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
162 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
163                                            const struct intel_crtc_state *crtc_state);
164 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
165                                       enum pipe pipe);
166 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
167
168 /* update sink rates from dpcd */
169 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
170 {
171         static const int dp_rates[] = {
172                 162000, 270000, 540000, 810000
173         };
174         int i, max_rate;
175
176         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
177
178         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
179                 if (dp_rates[i] > max_rate)
180                         break;
181                 intel_dp->sink_rates[i] = dp_rates[i];
182         }
183
184         intel_dp->num_sink_rates = i;
185 }
186
187 /* Get length of rates array potentially limited by max_rate. */
188 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
189 {
190         int i;
191
192         /* Limit results by potentially reduced max rate */
193         for (i = 0; i < len; i++) {
194                 if (rates[len - i - 1] <= max_rate)
195                         return len - i;
196         }
197
198         return 0;
199 }
200
201 /* Get length of common rates array potentially limited by max_rate. */
202 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
203                                           int max_rate)
204 {
205         return intel_dp_rate_limit_len(intel_dp->common_rates,
206                                        intel_dp->num_common_rates, max_rate);
207 }
208
209 /* Theoretical max between source and sink */
210 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
211 {
212         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
213 }
214
215 /* Theoretical max between source and sink */
216 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
217 {
218         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
219         int source_max = intel_dig_port->max_lanes;
220         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
221         int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
222
223         return min3(source_max, sink_max, fia_max);
224 }
225
226 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
227 {
228         return intel_dp->max_link_lane_count;
229 }
230
231 int
232 intel_dp_link_required(int pixel_clock, int bpp)
233 {
234         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
235         return DIV_ROUND_UP(pixel_clock * bpp, 8);
236 }
237
238 int
239 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
240 {
241         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
242          * link rate that is generally expressed in Gbps. Since, 8 bits of data
243          * is transmitted every LS_Clk per lane, there is no need to account for
244          * the channel encoding that is done in the PHY layer here.
245          */
246
247         return max_link_clock * max_lanes;
248 }
249
250 static int
251 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
252 {
253         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
254         struct intel_encoder *encoder = &intel_dig_port->base;
255         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
256         int max_dotclk = dev_priv->max_dotclk_freq;
257         int ds_max_dotclk;
258
259         int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
260
261         if (type != DP_DS_PORT_TYPE_VGA)
262                 return max_dotclk;
263
264         ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
265                                                     intel_dp->downstream_ports);
266
267         if (ds_max_dotclk != 0)
268                 max_dotclk = min(max_dotclk, ds_max_dotclk);
269
270         return max_dotclk;
271 }
272
273 static int cnl_max_source_rate(struct intel_dp *intel_dp)
274 {
275         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
276         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
277         enum port port = dig_port->base.port;
278
279         u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
280
281         /* Low voltage SKUs are limited to max of 5.4G */
282         if (voltage == VOLTAGE_INFO_0_85V)
283                 return 540000;
284
285         /* For this SKU 8.1G is supported in all ports */
286         if (IS_CNL_WITH_PORT_F(dev_priv))
287                 return 810000;
288
289         /* For other SKUs, max rate on ports A and D is 5.4G */
290         if (port == PORT_A || port == PORT_D)
291                 return 540000;
292
293         return 810000;
294 }
295
296 static int icl_max_source_rate(struct intel_dp *intel_dp)
297 {
298         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
299         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
300         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
301
302         if (intel_phy_is_combo(dev_priv, phy) &&
303             !IS_ELKHARTLAKE(dev_priv) &&
304             !intel_dp_is_edp(intel_dp))
305                 return 540000;
306
307         return 810000;
308 }
309
310 static void
311 intel_dp_set_source_rates(struct intel_dp *intel_dp)
312 {
313         /* The values must be in increasing order */
314         static const int cnl_rates[] = {
315                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
316         };
317         static const int bxt_rates[] = {
318                 162000, 216000, 243000, 270000, 324000, 432000, 540000
319         };
320         static const int skl_rates[] = {
321                 162000, 216000, 270000, 324000, 432000, 540000
322         };
323         static const int hsw_rates[] = {
324                 162000, 270000, 540000
325         };
326         static const int g4x_rates[] = {
327                 162000, 270000
328         };
329         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
330         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
331         const struct ddi_vbt_port_info *info =
332                 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
333         const int *source_rates;
334         int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
335
336         /* This should only be done once */
337         WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
338
339         if (INTEL_GEN(dev_priv) >= 10) {
340                 source_rates = cnl_rates;
341                 size = ARRAY_SIZE(cnl_rates);
342                 if (IS_GEN(dev_priv, 10))
343                         max_rate = cnl_max_source_rate(intel_dp);
344                 else
345                         max_rate = icl_max_source_rate(intel_dp);
346         } else if (IS_GEN9_LP(dev_priv)) {
347                 source_rates = bxt_rates;
348                 size = ARRAY_SIZE(bxt_rates);
349         } else if (IS_GEN9_BC(dev_priv)) {
350                 source_rates = skl_rates;
351                 size = ARRAY_SIZE(skl_rates);
352         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
353                    IS_BROADWELL(dev_priv)) {
354                 source_rates = hsw_rates;
355                 size = ARRAY_SIZE(hsw_rates);
356         } else {
357                 source_rates = g4x_rates;
358                 size = ARRAY_SIZE(g4x_rates);
359         }
360
361         if (max_rate && vbt_max_rate)
362                 max_rate = min(max_rate, vbt_max_rate);
363         else if (vbt_max_rate)
364                 max_rate = vbt_max_rate;
365
366         if (max_rate)
367                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
368
369         intel_dp->source_rates = source_rates;
370         intel_dp->num_source_rates = size;
371 }
372
373 static int intersect_rates(const int *source_rates, int source_len,
374                            const int *sink_rates, int sink_len,
375                            int *common_rates)
376 {
377         int i = 0, j = 0, k = 0;
378
379         while (i < source_len && j < sink_len) {
380                 if (source_rates[i] == sink_rates[j]) {
381                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
382                                 return k;
383                         common_rates[k] = source_rates[i];
384                         ++k;
385                         ++i;
386                         ++j;
387                 } else if (source_rates[i] < sink_rates[j]) {
388                         ++i;
389                 } else {
390                         ++j;
391                 }
392         }
393         return k;
394 }
395
396 /* return index of rate in rates array, or -1 if not found */
397 static int intel_dp_rate_index(const int *rates, int len, int rate)
398 {
399         int i;
400
401         for (i = 0; i < len; i++)
402                 if (rate == rates[i])
403                         return i;
404
405         return -1;
406 }
407
408 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
409 {
410         WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
411
412         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
413                                                      intel_dp->num_source_rates,
414                                                      intel_dp->sink_rates,
415                                                      intel_dp->num_sink_rates,
416                                                      intel_dp->common_rates);
417
418         /* Paranoia, there should always be something in common. */
419         if (WARN_ON(intel_dp->num_common_rates == 0)) {
420                 intel_dp->common_rates[0] = 162000;
421                 intel_dp->num_common_rates = 1;
422         }
423 }
424
425 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
426                                        u8 lane_count)
427 {
428         /*
429          * FIXME: we need to synchronize the current link parameters with
430          * hardware readout. Currently fast link training doesn't work on
431          * boot-up.
432          */
433         if (link_rate == 0 ||
434             link_rate > intel_dp->max_link_rate)
435                 return false;
436
437         if (lane_count == 0 ||
438             lane_count > intel_dp_max_lane_count(intel_dp))
439                 return false;
440
441         return true;
442 }
443
444 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
445                                                      int link_rate,
446                                                      u8 lane_count)
447 {
448         const struct drm_display_mode *fixed_mode =
449                 intel_dp->attached_connector->panel.fixed_mode;
450         int mode_rate, max_rate;
451
452         mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
453         max_rate = intel_dp_max_data_rate(link_rate, lane_count);
454         if (mode_rate > max_rate)
455                 return false;
456
457         return true;
458 }
459
460 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
461                                             int link_rate, u8 lane_count)
462 {
463         int index;
464
465         index = intel_dp_rate_index(intel_dp->common_rates,
466                                     intel_dp->num_common_rates,
467                                     link_rate);
468         if (index > 0) {
469                 if (intel_dp_is_edp(intel_dp) &&
470                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
471                                                               intel_dp->common_rates[index - 1],
472                                                               lane_count)) {
473                         DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
474                         return 0;
475                 }
476                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
477                 intel_dp->max_link_lane_count = lane_count;
478         } else if (lane_count > 1) {
479                 if (intel_dp_is_edp(intel_dp) &&
480                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
481                                                               intel_dp_max_common_rate(intel_dp),
482                                                               lane_count >> 1)) {
483                         DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
484                         return 0;
485                 }
486                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
487                 intel_dp->max_link_lane_count = lane_count >> 1;
488         } else {
489                 DRM_ERROR("Link Training Unsuccessful\n");
490                 return -1;
491         }
492
493         return 0;
494 }
495
496 static enum drm_mode_status
497 intel_dp_mode_valid(struct drm_connector *connector,
498                     struct drm_display_mode *mode)
499 {
500         struct intel_dp *intel_dp = intel_attached_dp(connector);
501         struct intel_connector *intel_connector = to_intel_connector(connector);
502         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
503         struct drm_i915_private *dev_priv = to_i915(connector->dev);
504         int target_clock = mode->clock;
505         int max_rate, mode_rate, max_lanes, max_link_clock;
506         int max_dotclk;
507         u16 dsc_max_output_bpp = 0;
508         u8 dsc_slice_count = 0;
509
510         if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
511                 return MODE_NO_DBLESCAN;
512
513         max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
514
515         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
516                 if (mode->hdisplay > fixed_mode->hdisplay)
517                         return MODE_PANEL;
518
519                 if (mode->vdisplay > fixed_mode->vdisplay)
520                         return MODE_PANEL;
521
522                 target_clock = fixed_mode->clock;
523         }
524
525         max_link_clock = intel_dp_max_link_rate(intel_dp);
526         max_lanes = intel_dp_max_lane_count(intel_dp);
527
528         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
529         mode_rate = intel_dp_link_required(target_clock, 18);
530
531         /*
532          * Output bpp is stored in 6.4 format so right shift by 4 to get the
533          * integer value since we support only integer values of bpp.
534          */
535         if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
536             drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
537                 if (intel_dp_is_edp(intel_dp)) {
538                         dsc_max_output_bpp =
539                                 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
540                         dsc_slice_count =
541                                 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
542                                                                 true);
543                 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
544                         dsc_max_output_bpp =
545                                 intel_dp_dsc_get_output_bpp(max_link_clock,
546                                                             max_lanes,
547                                                             target_clock,
548                                                             mode->hdisplay) >> 4;
549                         dsc_slice_count =
550                                 intel_dp_dsc_get_slice_count(intel_dp,
551                                                              target_clock,
552                                                              mode->hdisplay);
553                 }
554         }
555
556         if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
557             target_clock > max_dotclk)
558                 return MODE_CLOCK_HIGH;
559
560         if (mode->clock < 10000)
561                 return MODE_CLOCK_LOW;
562
563         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
564                 return MODE_H_ILLEGAL;
565
566         return MODE_OK;
567 }
568
569 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
570 {
571         int i;
572         u32 v = 0;
573
574         if (src_bytes > 4)
575                 src_bytes = 4;
576         for (i = 0; i < src_bytes; i++)
577                 v |= ((u32)src[i]) << ((3 - i) * 8);
578         return v;
579 }
580
581 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
582 {
583         int i;
584         if (dst_bytes > 4)
585                 dst_bytes = 4;
586         for (i = 0; i < dst_bytes; i++)
587                 dst[i] = src >> ((3-i) * 8);
588 }
589
590 static void
591 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
592 static void
593 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
594                                               bool force_disable_vdd);
595 static void
596 intel_dp_pps_init(struct intel_dp *intel_dp);
597
598 static intel_wakeref_t
599 pps_lock(struct intel_dp *intel_dp)
600 {
601         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
602         intel_wakeref_t wakeref;
603
604         /*
605          * See intel_power_sequencer_reset() why we need
606          * a power domain reference here.
607          */
608         wakeref = intel_display_power_get(dev_priv,
609                                           intel_aux_power_domain(dp_to_dig_port(intel_dp)));
610
611         mutex_lock(&dev_priv->pps_mutex);
612
613         return wakeref;
614 }
615
616 static intel_wakeref_t
617 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
618 {
619         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
620
621         mutex_unlock(&dev_priv->pps_mutex);
622         intel_display_power_put(dev_priv,
623                                 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
624                                 wakeref);
625         return 0;
626 }
627
628 #define with_pps_lock(dp, wf) \
629         for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
630
631 static void
632 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
633 {
634         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
635         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
636         enum pipe pipe = intel_dp->pps_pipe;
637         bool pll_enabled, release_cl_override = false;
638         enum dpio_phy phy = DPIO_PHY(pipe);
639         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
640         u32 DP;
641
642         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
643                  "skipping pipe %c power sequencer kick due to port %c being active\n",
644                  pipe_name(pipe), port_name(intel_dig_port->base.port)))
645                 return;
646
647         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
648                       pipe_name(pipe), port_name(intel_dig_port->base.port));
649
650         /* Preserve the BIOS-computed detected bit. This is
651          * supposed to be read-only.
652          */
653         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
654         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
655         DP |= DP_PORT_WIDTH(1);
656         DP |= DP_LINK_TRAIN_PAT_1;
657
658         if (IS_CHERRYVIEW(dev_priv))
659                 DP |= DP_PIPE_SEL_CHV(pipe);
660         else
661                 DP |= DP_PIPE_SEL(pipe);
662
663         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
664
665         /*
666          * The DPLL for the pipe must be enabled for this to work.
667          * So enable temporarily it if it's not already enabled.
668          */
669         if (!pll_enabled) {
670                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
671                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
672
673                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
674                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
675                         DRM_ERROR("Failed to force on pll for pipe %c!\n",
676                                   pipe_name(pipe));
677                         return;
678                 }
679         }
680
681         /*
682          * Similar magic as in intel_dp_enable_port().
683          * We _must_ do this port enable + disable trick
684          * to make this power sequencer lock onto the port.
685          * Otherwise even VDD force bit won't work.
686          */
687         I915_WRITE(intel_dp->output_reg, DP);
688         POSTING_READ(intel_dp->output_reg);
689
690         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
691         POSTING_READ(intel_dp->output_reg);
692
693         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
694         POSTING_READ(intel_dp->output_reg);
695
696         if (!pll_enabled) {
697                 vlv_force_pll_off(dev_priv, pipe);
698
699                 if (release_cl_override)
700                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
701         }
702 }
703
704 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
705 {
706         struct intel_encoder *encoder;
707         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
708
709         /*
710          * We don't have power sequencer currently.
711          * Pick one that's not used by other ports.
712          */
713         for_each_intel_dp(&dev_priv->drm, encoder) {
714                 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
715
716                 if (encoder->type == INTEL_OUTPUT_EDP) {
717                         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
718                                 intel_dp->active_pipe != intel_dp->pps_pipe);
719
720                         if (intel_dp->pps_pipe != INVALID_PIPE)
721                                 pipes &= ~(1 << intel_dp->pps_pipe);
722                 } else {
723                         WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
724
725                         if (intel_dp->active_pipe != INVALID_PIPE)
726                                 pipes &= ~(1 << intel_dp->active_pipe);
727                 }
728         }
729
730         if (pipes == 0)
731                 return INVALID_PIPE;
732
733         return ffs(pipes) - 1;
734 }
735
736 static enum pipe
737 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
738 {
739         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
740         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
741         enum pipe pipe;
742
743         lockdep_assert_held(&dev_priv->pps_mutex);
744
745         /* We should never land here with regular DP ports */
746         WARN_ON(!intel_dp_is_edp(intel_dp));
747
748         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
749                 intel_dp->active_pipe != intel_dp->pps_pipe);
750
751         if (intel_dp->pps_pipe != INVALID_PIPE)
752                 return intel_dp->pps_pipe;
753
754         pipe = vlv_find_free_pps(dev_priv);
755
756         /*
757          * Didn't find one. This should not happen since there
758          * are two power sequencers and up to two eDP ports.
759          */
760         if (WARN_ON(pipe == INVALID_PIPE))
761                 pipe = PIPE_A;
762
763         vlv_steal_power_sequencer(dev_priv, pipe);
764         intel_dp->pps_pipe = pipe;
765
766         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
767                       pipe_name(intel_dp->pps_pipe),
768                       port_name(intel_dig_port->base.port));
769
770         /* init power sequencer on this pipe and port */
771         intel_dp_init_panel_power_sequencer(intel_dp);
772         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
773
774         /*
775          * Even vdd force doesn't work until we've made
776          * the power sequencer lock in on the port.
777          */
778         vlv_power_sequencer_kick(intel_dp);
779
780         return intel_dp->pps_pipe;
781 }
782
783 static int
784 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
785 {
786         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
787         int backlight_controller = dev_priv->vbt.backlight.controller;
788
789         lockdep_assert_held(&dev_priv->pps_mutex);
790
791         /* We should never land here with regular DP ports */
792         WARN_ON(!intel_dp_is_edp(intel_dp));
793
794         if (!intel_dp->pps_reset)
795                 return backlight_controller;
796
797         intel_dp->pps_reset = false;
798
799         /*
800          * Only the HW needs to be reprogrammed, the SW state is fixed and
801          * has been setup during connector init.
802          */
803         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
804
805         return backlight_controller;
806 }
807
808 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
809                                enum pipe pipe);
810
811 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
812                                enum pipe pipe)
813 {
814         return I915_READ(PP_STATUS(pipe)) & PP_ON;
815 }
816
817 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
818                                 enum pipe pipe)
819 {
820         return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
821 }
822
823 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
824                          enum pipe pipe)
825 {
826         return true;
827 }
828
829 static enum pipe
830 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
831                      enum port port,
832                      vlv_pipe_check pipe_check)
833 {
834         enum pipe pipe;
835
836         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
837                 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
838                         PANEL_PORT_SELECT_MASK;
839
840                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
841                         continue;
842
843                 if (!pipe_check(dev_priv, pipe))
844                         continue;
845
846                 return pipe;
847         }
848
849         return INVALID_PIPE;
850 }
851
852 static void
853 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
854 {
855         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
856         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
857         enum port port = intel_dig_port->base.port;
858
859         lockdep_assert_held(&dev_priv->pps_mutex);
860
861         /* try to find a pipe with this port selected */
862         /* first pick one where the panel is on */
863         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
864                                                   vlv_pipe_has_pp_on);
865         /* didn't find one? pick one where vdd is on */
866         if (intel_dp->pps_pipe == INVALID_PIPE)
867                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
868                                                           vlv_pipe_has_vdd_on);
869         /* didn't find one? pick one with just the correct port */
870         if (intel_dp->pps_pipe == INVALID_PIPE)
871                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
872                                                           vlv_pipe_any);
873
874         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
875         if (intel_dp->pps_pipe == INVALID_PIPE) {
876                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
877                               port_name(port));
878                 return;
879         }
880
881         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
882                       port_name(port), pipe_name(intel_dp->pps_pipe));
883
884         intel_dp_init_panel_power_sequencer(intel_dp);
885         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
886 }
887
888 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
889 {
890         struct intel_encoder *encoder;
891
892         if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
893                     !IS_GEN9_LP(dev_priv)))
894                 return;
895
896         /*
897          * We can't grab pps_mutex here due to deadlock with power_domain
898          * mutex when power_domain functions are called while holding pps_mutex.
899          * That also means that in order to use pps_pipe the code needs to
900          * hold both a power domain reference and pps_mutex, and the power domain
901          * reference get/put must be done while _not_ holding pps_mutex.
902          * pps_{lock,unlock}() do these steps in the correct order, so one
903          * should use them always.
904          */
905
906         for_each_intel_dp(&dev_priv->drm, encoder) {
907                 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
908
909                 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
910
911                 if (encoder->type != INTEL_OUTPUT_EDP)
912                         continue;
913
914                 if (IS_GEN9_LP(dev_priv))
915                         intel_dp->pps_reset = true;
916                 else
917                         intel_dp->pps_pipe = INVALID_PIPE;
918         }
919 }
920
921 struct pps_registers {
922         i915_reg_t pp_ctrl;
923         i915_reg_t pp_stat;
924         i915_reg_t pp_on;
925         i915_reg_t pp_off;
926         i915_reg_t pp_div;
927 };
928
929 static void intel_pps_get_registers(struct intel_dp *intel_dp,
930                                     struct pps_registers *regs)
931 {
932         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
933         int pps_idx = 0;
934
935         memset(regs, 0, sizeof(*regs));
936
937         if (IS_GEN9_LP(dev_priv))
938                 pps_idx = bxt_power_sequencer_idx(intel_dp);
939         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
940                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
941
942         regs->pp_ctrl = PP_CONTROL(pps_idx);
943         regs->pp_stat = PP_STATUS(pps_idx);
944         regs->pp_on = PP_ON_DELAYS(pps_idx);
945         regs->pp_off = PP_OFF_DELAYS(pps_idx);
946
947         /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
948         if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
949                 regs->pp_div = INVALID_MMIO_REG;
950         else
951                 regs->pp_div = PP_DIVISOR(pps_idx);
952 }
953
954 static i915_reg_t
955 _pp_ctrl_reg(struct intel_dp *intel_dp)
956 {
957         struct pps_registers regs;
958
959         intel_pps_get_registers(intel_dp, &regs);
960
961         return regs.pp_ctrl;
962 }
963
964 static i915_reg_t
965 _pp_stat_reg(struct intel_dp *intel_dp)
966 {
967         struct pps_registers regs;
968
969         intel_pps_get_registers(intel_dp, &regs);
970
971         return regs.pp_stat;
972 }
973
974 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
975    This function only applicable when panel PM state is not to be tracked */
976 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
977                               void *unused)
978 {
979         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
980                                                  edp_notifier);
981         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
982         intel_wakeref_t wakeref;
983
984         if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
985                 return 0;
986
987         with_pps_lock(intel_dp, wakeref) {
988                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
989                         enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
990                         i915_reg_t pp_ctrl_reg, pp_div_reg;
991                         u32 pp_div;
992
993                         pp_ctrl_reg = PP_CONTROL(pipe);
994                         pp_div_reg  = PP_DIVISOR(pipe);
995                         pp_div = I915_READ(pp_div_reg);
996                         pp_div &= PP_REFERENCE_DIVIDER_MASK;
997
998                         /* 0x1F write to PP_DIV_REG sets max cycle delay */
999                         I915_WRITE(pp_div_reg, pp_div | 0x1F);
1000                         I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1001                         msleep(intel_dp->panel_power_cycle_delay);
1002                 }
1003         }
1004
1005         return 0;
1006 }
1007
1008 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1009 {
1010         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1011
1012         lockdep_assert_held(&dev_priv->pps_mutex);
1013
1014         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1015             intel_dp->pps_pipe == INVALID_PIPE)
1016                 return false;
1017
1018         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1019 }
1020
1021 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1022 {
1023         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1024
1025         lockdep_assert_held(&dev_priv->pps_mutex);
1026
1027         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1028             intel_dp->pps_pipe == INVALID_PIPE)
1029                 return false;
1030
1031         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1032 }
1033
1034 static void
1035 intel_dp_check_edp(struct intel_dp *intel_dp)
1036 {
1037         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1038
1039         if (!intel_dp_is_edp(intel_dp))
1040                 return;
1041
1042         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1043                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1044                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1045                               I915_READ(_pp_stat_reg(intel_dp)),
1046                               I915_READ(_pp_ctrl_reg(intel_dp)));
1047         }
1048 }
1049
1050 static u32
1051 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1052 {
1053         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1054         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1055         u32 status;
1056         bool done;
1057
1058 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1059         done = wait_event_timeout(i915->gmbus_wait_queue, C,
1060                                   msecs_to_jiffies_timeout(10));
1061
1062         /* just trace the final value */
1063         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1064
1065         if (!done)
1066                 DRM_ERROR("dp aux hw did not signal timeout!\n");
1067 #undef C
1068
1069         return status;
1070 }
1071
1072 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1073 {
1074         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1075
1076         if (index)
1077                 return 0;
1078
1079         /*
1080          * The clock divider is based off the hrawclk, and would like to run at
1081          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1082          */
1083         return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1084 }
1085
1086 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1087 {
1088         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1089         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1090
1091         if (index)
1092                 return 0;
1093
1094         /*
1095          * The clock divider is based off the cdclk or PCH rawclk, and would
1096          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
1097          * divide by 2000 and use that
1098          */
1099         if (dig_port->aux_ch == AUX_CH_A)
1100                 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1101         else
1102                 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1103 }
1104
1105 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1106 {
1107         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1108         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1109
1110         if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1111                 /* Workaround for non-ULT HSW */
1112                 switch (index) {
1113                 case 0: return 63;
1114                 case 1: return 72;
1115                 default: return 0;
1116                 }
1117         }
1118
1119         return ilk_get_aux_clock_divider(intel_dp, index);
1120 }
1121
1122 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1123 {
1124         /*
1125          * SKL doesn't need us to program the AUX clock divider (Hardware will
1126          * derive the clock from CDCLK automatically). We still implement the
1127          * get_aux_clock_divider vfunc to plug-in into the existing code.
1128          */
1129         return index ? 0 : 1;
1130 }
1131
1132 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1133                                 int send_bytes,
1134                                 u32 aux_clock_divider)
1135 {
1136         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1137         struct drm_i915_private *dev_priv =
1138                         to_i915(intel_dig_port->base.base.dev);
1139         u32 precharge, timeout;
1140
1141         if (IS_GEN(dev_priv, 6))
1142                 precharge = 3;
1143         else
1144                 precharge = 5;
1145
1146         if (IS_BROADWELL(dev_priv))
1147                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1148         else
1149                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1150
1151         return DP_AUX_CH_CTL_SEND_BUSY |
1152                DP_AUX_CH_CTL_DONE |
1153                DP_AUX_CH_CTL_INTERRUPT |
1154                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1155                timeout |
1156                DP_AUX_CH_CTL_RECEIVE_ERROR |
1157                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1158                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1159                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1160 }
1161
1162 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1163                                 int send_bytes,
1164                                 u32 unused)
1165 {
1166         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1167         u32 ret;
1168
1169         ret = DP_AUX_CH_CTL_SEND_BUSY |
1170               DP_AUX_CH_CTL_DONE |
1171               DP_AUX_CH_CTL_INTERRUPT |
1172               DP_AUX_CH_CTL_TIME_OUT_ERROR |
1173               DP_AUX_CH_CTL_TIME_OUT_MAX |
1174               DP_AUX_CH_CTL_RECEIVE_ERROR |
1175               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1176               DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1177               DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1178
1179         if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
1180                 ret |= DP_AUX_CH_CTL_TBT_IO;
1181
1182         return ret;
1183 }
1184
1185 static int
1186 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1187                   const u8 *send, int send_bytes,
1188                   u8 *recv, int recv_size,
1189                   u32 aux_send_ctl_flags)
1190 {
1191         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1192         struct drm_i915_private *i915 =
1193                         to_i915(intel_dig_port->base.base.dev);
1194         struct intel_uncore *uncore = &i915->uncore;
1195         enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1196         bool is_tc_port = intel_phy_is_tc(i915, phy);
1197         i915_reg_t ch_ctl, ch_data[5];
1198         u32 aux_clock_divider;
1199         enum intel_display_power_domain aux_domain =
1200                 intel_aux_power_domain(intel_dig_port);
1201         intel_wakeref_t aux_wakeref;
1202         intel_wakeref_t pps_wakeref;
1203         int i, ret, recv_bytes;
1204         int try, clock = 0;
1205         u32 status;
1206         bool vdd;
1207
1208         ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1209         for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1210                 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1211
1212         if (is_tc_port)
1213                 intel_tc_port_lock(intel_dig_port);
1214
1215         aux_wakeref = intel_display_power_get(i915, aux_domain);
1216         pps_wakeref = pps_lock(intel_dp);
1217
1218         /*
1219          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1220          * In such cases we want to leave VDD enabled and it's up to upper layers
1221          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1222          * ourselves.
1223          */
1224         vdd = edp_panel_vdd_on(intel_dp);
1225
1226         /* dp aux is extremely sensitive to irq latency, hence request the
1227          * lowest possible wakeup latency and so prevent the cpu from going into
1228          * deep sleep states.
1229          */
1230         pm_qos_update_request(&i915->pm_qos, 0);
1231
1232         intel_dp_check_edp(intel_dp);
1233
1234         /* Try to wait for any previous AUX channel activity */
1235         for (try = 0; try < 3; try++) {
1236                 status = intel_uncore_read_notrace(uncore, ch_ctl);
1237                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1238                         break;
1239                 msleep(1);
1240         }
1241         /* just trace the final value */
1242         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1243
1244         if (try == 3) {
1245                 static u32 last_status = -1;
1246                 const u32 status = intel_uncore_read(uncore, ch_ctl);
1247
1248                 if (status != last_status) {
1249                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
1250                              status);
1251                         last_status = status;
1252                 }
1253
1254                 ret = -EBUSY;
1255                 goto out;
1256         }
1257
1258         /* Only 5 data registers! */
1259         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1260                 ret = -E2BIG;
1261                 goto out;
1262         }
1263
1264         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1265                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1266                                                           send_bytes,
1267                                                           aux_clock_divider);
1268
1269                 send_ctl |= aux_send_ctl_flags;
1270
1271                 /* Must try at least 3 times according to DP spec */
1272                 for (try = 0; try < 5; try++) {
1273                         /* Load the send data into the aux channel data registers */
1274                         for (i = 0; i < send_bytes; i += 4)
1275                                 intel_uncore_write(uncore,
1276                                                    ch_data[i >> 2],
1277                                                    intel_dp_pack_aux(send + i,
1278                                                                      send_bytes - i));
1279
1280                         /* Send the command and wait for it to complete */
1281                         intel_uncore_write(uncore, ch_ctl, send_ctl);
1282
1283                         status = intel_dp_aux_wait_done(intel_dp);
1284
1285                         /* Clear done status and any errors */
1286                         intel_uncore_write(uncore,
1287                                            ch_ctl,
1288                                            status |
1289                                            DP_AUX_CH_CTL_DONE |
1290                                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
1291                                            DP_AUX_CH_CTL_RECEIVE_ERROR);
1292
1293                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1294                          *   400us delay required for errors and timeouts
1295                          *   Timeout errors from the HW already meet this
1296                          *   requirement so skip to next iteration
1297                          */
1298                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1299                                 continue;
1300
1301                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1302                                 usleep_range(400, 500);
1303                                 continue;
1304                         }
1305                         if (status & DP_AUX_CH_CTL_DONE)
1306                                 goto done;
1307                 }
1308         }
1309
1310         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1311                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1312                 ret = -EBUSY;
1313                 goto out;
1314         }
1315
1316 done:
1317         /* Check for timeout or receive error.
1318          * Timeouts occur when the sink is not connected
1319          */
1320         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1321                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1322                 ret = -EIO;
1323                 goto out;
1324         }
1325
1326         /* Timeouts occur when the device isn't connected, so they're
1327          * "normal" -- don't fill the kernel log with these */
1328         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1329                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1330                 ret = -ETIMEDOUT;
1331                 goto out;
1332         }
1333
1334         /* Unload any bytes sent back from the other side */
1335         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1336                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1337
1338         /*
1339          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1340          * We have no idea of what happened so we return -EBUSY so
1341          * drm layer takes care for the necessary retries.
1342          */
1343         if (recv_bytes == 0 || recv_bytes > 20) {
1344                 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1345                               recv_bytes);
1346                 ret = -EBUSY;
1347                 goto out;
1348         }
1349
1350         if (recv_bytes > recv_size)
1351                 recv_bytes = recv_size;
1352
1353         for (i = 0; i < recv_bytes; i += 4)
1354                 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1355                                     recv + i, recv_bytes - i);
1356
1357         ret = recv_bytes;
1358 out:
1359         pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1360
1361         if (vdd)
1362                 edp_panel_vdd_off(intel_dp, false);
1363
1364         pps_unlock(intel_dp, pps_wakeref);
1365         intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1366
1367         if (is_tc_port)
1368                 intel_tc_port_unlock(intel_dig_port);
1369
1370         return ret;
1371 }
1372
1373 #define BARE_ADDRESS_SIZE       3
1374 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1375
1376 static void
1377 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1378                     const struct drm_dp_aux_msg *msg)
1379 {
1380         txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1381         txbuf[1] = (msg->address >> 8) & 0xff;
1382         txbuf[2] = msg->address & 0xff;
1383         txbuf[3] = msg->size - 1;
1384 }
1385
1386 static ssize_t
1387 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1388 {
1389         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1390         u8 txbuf[20], rxbuf[20];
1391         size_t txsize, rxsize;
1392         int ret;
1393
1394         intel_dp_aux_header(txbuf, msg);
1395
1396         switch (msg->request & ~DP_AUX_I2C_MOT) {
1397         case DP_AUX_NATIVE_WRITE:
1398         case DP_AUX_I2C_WRITE:
1399         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1400                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1401                 rxsize = 2; /* 0 or 1 data bytes */
1402
1403                 if (WARN_ON(txsize > 20))
1404                         return -E2BIG;
1405
1406                 WARN_ON(!msg->buffer != !msg->size);
1407
1408                 if (msg->buffer)
1409                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1410
1411                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1412                                         rxbuf, rxsize, 0);
1413                 if (ret > 0) {
1414                         msg->reply = rxbuf[0] >> 4;
1415
1416                         if (ret > 1) {
1417                                 /* Number of bytes written in a short write. */
1418                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1419                         } else {
1420                                 /* Return payload size. */
1421                                 ret = msg->size;
1422                         }
1423                 }
1424                 break;
1425
1426         case DP_AUX_NATIVE_READ:
1427         case DP_AUX_I2C_READ:
1428                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1429                 rxsize = msg->size + 1;
1430
1431                 if (WARN_ON(rxsize > 20))
1432                         return -E2BIG;
1433
1434                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1435                                         rxbuf, rxsize, 0);
1436                 if (ret > 0) {
1437                         msg->reply = rxbuf[0] >> 4;
1438                         /*
1439                          * Assume happy day, and copy the data. The caller is
1440                          * expected to check msg->reply before touching it.
1441                          *
1442                          * Return payload size.
1443                          */
1444                         ret--;
1445                         memcpy(msg->buffer, rxbuf + 1, ret);
1446                 }
1447                 break;
1448
1449         default:
1450                 ret = -EINVAL;
1451                 break;
1452         }
1453
1454         return ret;
1455 }
1456
1457
1458 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1459 {
1460         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1461         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1462         enum aux_ch aux_ch = dig_port->aux_ch;
1463
1464         switch (aux_ch) {
1465         case AUX_CH_B:
1466         case AUX_CH_C:
1467         case AUX_CH_D:
1468                 return DP_AUX_CH_CTL(aux_ch);
1469         default:
1470                 MISSING_CASE(aux_ch);
1471                 return DP_AUX_CH_CTL(AUX_CH_B);
1472         }
1473 }
1474
1475 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1476 {
1477         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1478         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1479         enum aux_ch aux_ch = dig_port->aux_ch;
1480
1481         switch (aux_ch) {
1482         case AUX_CH_B:
1483         case AUX_CH_C:
1484         case AUX_CH_D:
1485                 return DP_AUX_CH_DATA(aux_ch, index);
1486         default:
1487                 MISSING_CASE(aux_ch);
1488                 return DP_AUX_CH_DATA(AUX_CH_B, index);
1489         }
1490 }
1491
1492 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1493 {
1494         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1495         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1496         enum aux_ch aux_ch = dig_port->aux_ch;
1497
1498         switch (aux_ch) {
1499         case AUX_CH_A:
1500                 return DP_AUX_CH_CTL(aux_ch);
1501         case AUX_CH_B:
1502         case AUX_CH_C:
1503         case AUX_CH_D:
1504                 return PCH_DP_AUX_CH_CTL(aux_ch);
1505         default:
1506                 MISSING_CASE(aux_ch);
1507                 return DP_AUX_CH_CTL(AUX_CH_A);
1508         }
1509 }
1510
1511 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1512 {
1513         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1514         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1515         enum aux_ch aux_ch = dig_port->aux_ch;
1516
1517         switch (aux_ch) {
1518         case AUX_CH_A:
1519                 return DP_AUX_CH_DATA(aux_ch, index);
1520         case AUX_CH_B:
1521         case AUX_CH_C:
1522         case AUX_CH_D:
1523                 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1524         default:
1525                 MISSING_CASE(aux_ch);
1526                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1527         }
1528 }
1529
1530 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1531 {
1532         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1533         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1534         enum aux_ch aux_ch = dig_port->aux_ch;
1535
1536         switch (aux_ch) {
1537         case AUX_CH_A:
1538         case AUX_CH_B:
1539         case AUX_CH_C:
1540         case AUX_CH_D:
1541         case AUX_CH_E:
1542         case AUX_CH_F:
1543                 return DP_AUX_CH_CTL(aux_ch);
1544         default:
1545                 MISSING_CASE(aux_ch);
1546                 return DP_AUX_CH_CTL(AUX_CH_A);
1547         }
1548 }
1549
1550 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1551 {
1552         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1553         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1554         enum aux_ch aux_ch = dig_port->aux_ch;
1555
1556         switch (aux_ch) {
1557         case AUX_CH_A:
1558         case AUX_CH_B:
1559         case AUX_CH_C:
1560         case AUX_CH_D:
1561         case AUX_CH_E:
1562         case AUX_CH_F:
1563                 return DP_AUX_CH_DATA(aux_ch, index);
1564         default:
1565                 MISSING_CASE(aux_ch);
1566                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1567         }
1568 }
1569
1570 static void
1571 intel_dp_aux_fini(struct intel_dp *intel_dp)
1572 {
1573         kfree(intel_dp->aux.name);
1574 }
1575
1576 static void
1577 intel_dp_aux_init(struct intel_dp *intel_dp)
1578 {
1579         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1580         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1581         struct intel_encoder *encoder = &dig_port->base;
1582
1583         if (INTEL_GEN(dev_priv) >= 9) {
1584                 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1585                 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1586         } else if (HAS_PCH_SPLIT(dev_priv)) {
1587                 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1588                 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1589         } else {
1590                 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1591                 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1592         }
1593
1594         if (INTEL_GEN(dev_priv) >= 9)
1595                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1596         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1597                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1598         else if (HAS_PCH_SPLIT(dev_priv))
1599                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1600         else
1601                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1602
1603         if (INTEL_GEN(dev_priv) >= 9)
1604                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1605         else
1606                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1607
1608         drm_dp_aux_init(&intel_dp->aux);
1609
1610         /* Failure to allocate our preferred name is not critical */
1611         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1612                                        port_name(encoder->port));
1613         intel_dp->aux.transfer = intel_dp_aux_transfer;
1614 }
1615
1616 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1617 {
1618         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1619
1620         return max_rate >= 540000;
1621 }
1622
1623 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1624 {
1625         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1626
1627         return max_rate >= 810000;
1628 }
1629
1630 static void
1631 intel_dp_set_clock(struct intel_encoder *encoder,
1632                    struct intel_crtc_state *pipe_config)
1633 {
1634         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1635         const struct dp_link_dpll *divisor = NULL;
1636         int i, count = 0;
1637
1638         if (IS_G4X(dev_priv)) {
1639                 divisor = g4x_dpll;
1640                 count = ARRAY_SIZE(g4x_dpll);
1641         } else if (HAS_PCH_SPLIT(dev_priv)) {
1642                 divisor = pch_dpll;
1643                 count = ARRAY_SIZE(pch_dpll);
1644         } else if (IS_CHERRYVIEW(dev_priv)) {
1645                 divisor = chv_dpll;
1646                 count = ARRAY_SIZE(chv_dpll);
1647         } else if (IS_VALLEYVIEW(dev_priv)) {
1648                 divisor = vlv_dpll;
1649                 count = ARRAY_SIZE(vlv_dpll);
1650         }
1651
1652         if (divisor && count) {
1653                 for (i = 0; i < count; i++) {
1654                         if (pipe_config->port_clock == divisor[i].clock) {
1655                                 pipe_config->dpll = divisor[i].dpll;
1656                                 pipe_config->clock_set = true;
1657                                 break;
1658                         }
1659                 }
1660         }
1661 }
1662
1663 static void snprintf_int_array(char *str, size_t len,
1664                                const int *array, int nelem)
1665 {
1666         int i;
1667
1668         str[0] = '\0';
1669
1670         for (i = 0; i < nelem; i++) {
1671                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1672                 if (r >= len)
1673                         return;
1674                 str += r;
1675                 len -= r;
1676         }
1677 }
1678
1679 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1680 {
1681         char str[128]; /* FIXME: too big for stack? */
1682
1683         if ((drm_debug & DRM_UT_KMS) == 0)
1684                 return;
1685
1686         snprintf_int_array(str, sizeof(str),
1687                            intel_dp->source_rates, intel_dp->num_source_rates);
1688         DRM_DEBUG_KMS("source rates: %s\n", str);
1689
1690         snprintf_int_array(str, sizeof(str),
1691                            intel_dp->sink_rates, intel_dp->num_sink_rates);
1692         DRM_DEBUG_KMS("sink rates: %s\n", str);
1693
1694         snprintf_int_array(str, sizeof(str),
1695                            intel_dp->common_rates, intel_dp->num_common_rates);
1696         DRM_DEBUG_KMS("common rates: %s\n", str);
1697 }
1698
1699 int
1700 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1701 {
1702         int len;
1703
1704         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1705         if (WARN_ON(len <= 0))
1706                 return 162000;
1707
1708         return intel_dp->common_rates[len - 1];
1709 }
1710
1711 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1712 {
1713         int i = intel_dp_rate_index(intel_dp->sink_rates,
1714                                     intel_dp->num_sink_rates, rate);
1715
1716         if (WARN_ON(i < 0))
1717                 i = 0;
1718
1719         return i;
1720 }
1721
1722 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1723                            u8 *link_bw, u8 *rate_select)
1724 {
1725         /* eDP 1.4 rate select method. */
1726         if (intel_dp->use_rate_select) {
1727                 *link_bw = 0;
1728                 *rate_select =
1729                         intel_dp_rate_select(intel_dp, port_clock);
1730         } else {
1731                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1732                 *rate_select = 0;
1733         }
1734 }
1735
1736 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1737                                          const struct intel_crtc_state *pipe_config)
1738 {
1739         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1740
1741         return INTEL_GEN(dev_priv) >= 11 &&
1742                 pipe_config->cpu_transcoder != TRANSCODER_A;
1743 }
1744
1745 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1746                                   const struct intel_crtc_state *pipe_config)
1747 {
1748         return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1749                 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1750 }
1751
1752 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1753                                          const struct intel_crtc_state *pipe_config)
1754 {
1755         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1756
1757         return INTEL_GEN(dev_priv) >= 10 &&
1758                 pipe_config->cpu_transcoder != TRANSCODER_A;
1759 }
1760
1761 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1762                                   const struct intel_crtc_state *pipe_config)
1763 {
1764         if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1765                 return false;
1766
1767         return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1768                 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1769 }
1770
1771 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1772                                 struct intel_crtc_state *pipe_config)
1773 {
1774         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1775         struct intel_connector *intel_connector = intel_dp->attached_connector;
1776         int bpp, bpc;
1777
1778         bpp = pipe_config->pipe_bpp;
1779         bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1780
1781         if (bpc > 0)
1782                 bpp = min(bpp, 3*bpc);
1783
1784         if (intel_dp_is_edp(intel_dp)) {
1785                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1786                 if (intel_connector->base.display_info.bpc == 0 &&
1787                     dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1788                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1789                                       dev_priv->vbt.edp.bpp);
1790                         bpp = dev_priv->vbt.edp.bpp;
1791                 }
1792         }
1793
1794         return bpp;
1795 }
1796
1797 /* Adjust link config limits based on compliance test requests. */
1798 void
1799 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1800                                   struct intel_crtc_state *pipe_config,
1801                                   struct link_config_limits *limits)
1802 {
1803         /* For DP Compliance we override the computed bpp for the pipe */
1804         if (intel_dp->compliance.test_data.bpc != 0) {
1805                 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1806
1807                 limits->min_bpp = limits->max_bpp = bpp;
1808                 pipe_config->dither_force_disable = bpp == 6 * 3;
1809
1810                 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1811         }
1812
1813         /* Use values requested by Compliance Test Request */
1814         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1815                 int index;
1816
1817                 /* Validate the compliance test data since max values
1818                  * might have changed due to link train fallback.
1819                  */
1820                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1821                                                intel_dp->compliance.test_lane_count)) {
1822                         index = intel_dp_rate_index(intel_dp->common_rates,
1823                                                     intel_dp->num_common_rates,
1824                                                     intel_dp->compliance.test_link_rate);
1825                         if (index >= 0)
1826                                 limits->min_clock = limits->max_clock = index;
1827                         limits->min_lane_count = limits->max_lane_count =
1828                                 intel_dp->compliance.test_lane_count;
1829                 }
1830         }
1831 }
1832
1833 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1834 {
1835         /*
1836          * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1837          * format of the number of bytes per pixel will be half the number
1838          * of bytes of RGB pixel.
1839          */
1840         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1841                 bpp /= 2;
1842
1843         return bpp;
1844 }
1845
1846 /* Optimize link config in order: max bpp, min clock, min lanes */
1847 static int
1848 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1849                                   struct intel_crtc_state *pipe_config,
1850                                   const struct link_config_limits *limits)
1851 {
1852         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1853         int bpp, clock, lane_count;
1854         int mode_rate, link_clock, link_avail;
1855
1856         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1857                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1858                                                    bpp);
1859
1860                 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1861                         for (lane_count = limits->min_lane_count;
1862                              lane_count <= limits->max_lane_count;
1863                              lane_count <<= 1) {
1864                                 link_clock = intel_dp->common_rates[clock];
1865                                 link_avail = intel_dp_max_data_rate(link_clock,
1866                                                                     lane_count);
1867
1868                                 if (mode_rate <= link_avail) {
1869                                         pipe_config->lane_count = lane_count;
1870                                         pipe_config->pipe_bpp = bpp;
1871                                         pipe_config->port_clock = link_clock;
1872
1873                                         return 0;
1874                                 }
1875                         }
1876                 }
1877         }
1878
1879         return -EINVAL;
1880 }
1881
1882 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1883 {
1884         int i, num_bpc;
1885         u8 dsc_bpc[3] = {0};
1886
1887         num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1888                                                        dsc_bpc);
1889         for (i = 0; i < num_bpc; i++) {
1890                 if (dsc_max_bpc >= dsc_bpc[i])
1891                         return dsc_bpc[i] * 3;
1892         }
1893
1894         return 0;
1895 }
1896
1897 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1898                                        struct intel_crtc_state *pipe_config,
1899                                        struct drm_connector_state *conn_state,
1900                                        struct link_config_limits *limits)
1901 {
1902         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1903         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1904         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1905         u8 dsc_max_bpc;
1906         int pipe_bpp;
1907         int ret;
1908
1909         pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1910                 intel_dp_supports_fec(intel_dp, pipe_config);
1911
1912         if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1913                 return -EINVAL;
1914
1915         dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
1916                             conn_state->max_requested_bpc);
1917
1918         pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1919         if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
1920                 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
1921                 return -EINVAL;
1922         }
1923
1924         /*
1925          * For now enable DSC for max bpp, max link rate, max lane count.
1926          * Optimize this later for the minimum possible link rate/lane count
1927          * with DSC enabled for the requested mode.
1928          */
1929         pipe_config->pipe_bpp = pipe_bpp;
1930         pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1931         pipe_config->lane_count = limits->max_lane_count;
1932
1933         if (intel_dp_is_edp(intel_dp)) {
1934                 pipe_config->dsc_params.compressed_bpp =
1935                         min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1936                               pipe_config->pipe_bpp);
1937                 pipe_config->dsc_params.slice_count =
1938                         drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1939                                                         true);
1940         } else {
1941                 u16 dsc_max_output_bpp;
1942                 u8 dsc_dp_slice_count;
1943
1944                 dsc_max_output_bpp =
1945                         intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1946                                                     pipe_config->lane_count,
1947                                                     adjusted_mode->crtc_clock,
1948                                                     adjusted_mode->crtc_hdisplay);
1949                 dsc_dp_slice_count =
1950                         intel_dp_dsc_get_slice_count(intel_dp,
1951                                                      adjusted_mode->crtc_clock,
1952                                                      adjusted_mode->crtc_hdisplay);
1953                 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1954                         DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
1955                         return -EINVAL;
1956                 }
1957                 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1958                                                                dsc_max_output_bpp >> 4,
1959                                                                pipe_config->pipe_bpp);
1960                 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1961         }
1962         /*
1963          * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1964          * is greater than the maximum Cdclock and if slice count is even
1965          * then we need to use 2 VDSC instances.
1966          */
1967         if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1968                 if (pipe_config->dsc_params.slice_count > 1) {
1969                         pipe_config->dsc_params.dsc_split = true;
1970                 } else {
1971                         DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
1972                         return -EINVAL;
1973                 }
1974         }
1975
1976         ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
1977         if (ret < 0) {
1978                 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
1979                               "Compressed BPP = %d\n",
1980                               pipe_config->pipe_bpp,
1981                               pipe_config->dsc_params.compressed_bpp);
1982                 return ret;
1983         }
1984
1985         pipe_config->dsc_params.compression_enable = true;
1986         DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
1987                       "Compressed Bpp = %d Slice Count = %d\n",
1988                       pipe_config->pipe_bpp,
1989                       pipe_config->dsc_params.compressed_bpp,
1990                       pipe_config->dsc_params.slice_count);
1991
1992         return 0;
1993 }
1994
1995 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
1996 {
1997         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
1998                 return 6 * 3;
1999         else
2000                 return 8 * 3;
2001 }
2002
2003 static int
2004 intel_dp_compute_link_config(struct intel_encoder *encoder,
2005                              struct intel_crtc_state *pipe_config,
2006                              struct drm_connector_state *conn_state)
2007 {
2008         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2009         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2010         struct link_config_limits limits;
2011         int common_len;
2012         int ret;
2013
2014         common_len = intel_dp_common_len_rate_limit(intel_dp,
2015                                                     intel_dp->max_link_rate);
2016
2017         /* No common link rates between source and sink */
2018         WARN_ON(common_len <= 0);
2019
2020         limits.min_clock = 0;
2021         limits.max_clock = common_len - 1;
2022
2023         limits.min_lane_count = 1;
2024         limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2025
2026         limits.min_bpp = intel_dp_min_bpp(pipe_config);
2027         limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2028
2029         if (intel_dp_is_edp(intel_dp)) {
2030                 /*
2031                  * Use the maximum clock and number of lanes the eDP panel
2032                  * advertizes being capable of. The panels are generally
2033                  * designed to support only a single clock and lane
2034                  * configuration, and typically these values correspond to the
2035                  * native resolution of the panel.
2036                  */
2037                 limits.min_lane_count = limits.max_lane_count;
2038                 limits.min_clock = limits.max_clock;
2039         }
2040
2041         intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2042
2043         DRM_DEBUG_KMS("DP link computation with max lane count %i "
2044                       "max rate %d max bpp %d pixel clock %iKHz\n",
2045                       limits.max_lane_count,
2046                       intel_dp->common_rates[limits.max_clock],
2047                       limits.max_bpp, adjusted_mode->crtc_clock);
2048
2049         /*
2050          * Optimize for slow and wide. This is the place to add alternative
2051          * optimization policy.
2052          */
2053         ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2054
2055         /* enable compression if the mode doesn't fit available BW */
2056         DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2057         if (ret || intel_dp->force_dsc_en) {
2058                 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2059                                                   conn_state, &limits);
2060                 if (ret < 0)
2061                         return ret;
2062         }
2063
2064         if (pipe_config->dsc_params.compression_enable) {
2065                 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2066                               pipe_config->lane_count, pipe_config->port_clock,
2067                               pipe_config->pipe_bpp,
2068                               pipe_config->dsc_params.compressed_bpp);
2069
2070                 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2071                               intel_dp_link_required(adjusted_mode->crtc_clock,
2072                                                      pipe_config->dsc_params.compressed_bpp),
2073                               intel_dp_max_data_rate(pipe_config->port_clock,
2074                                                      pipe_config->lane_count));
2075         } else {
2076                 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2077                               pipe_config->lane_count, pipe_config->port_clock,
2078                               pipe_config->pipe_bpp);
2079
2080                 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2081                               intel_dp_link_required(adjusted_mode->crtc_clock,
2082                                                      pipe_config->pipe_bpp),
2083                               intel_dp_max_data_rate(pipe_config->port_clock,
2084                                                      pipe_config->lane_count));
2085         }
2086         return 0;
2087 }
2088
2089 static int
2090 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2091                          struct drm_connector *connector,
2092                          struct intel_crtc_state *crtc_state)
2093 {
2094         const struct drm_display_info *info = &connector->display_info;
2095         const struct drm_display_mode *adjusted_mode =
2096                 &crtc_state->base.adjusted_mode;
2097         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2098         int ret;
2099
2100         if (!drm_mode_is_420_only(info, adjusted_mode) ||
2101             !intel_dp_get_colorimetry_status(intel_dp) ||
2102             !connector->ycbcr_420_allowed)
2103                 return 0;
2104
2105         crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2106
2107         /* YCBCR 420 output conversion needs a scaler */
2108         ret = skl_update_scaler_crtc(crtc_state);
2109         if (ret) {
2110                 DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2111                 return ret;
2112         }
2113
2114         intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2115
2116         return 0;
2117 }
2118
2119 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2120                                   const struct drm_connector_state *conn_state)
2121 {
2122         const struct intel_digital_connector_state *intel_conn_state =
2123                 to_intel_digital_connector_state(conn_state);
2124         const struct drm_display_mode *adjusted_mode =
2125                 &crtc_state->base.adjusted_mode;
2126
2127         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2128                 /*
2129                  * See:
2130                  * CEA-861-E - 5.1 Default Encoding Parameters
2131                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2132                  */
2133                 return crtc_state->pipe_bpp != 18 &&
2134                         drm_default_rgb_quant_range(adjusted_mode) ==
2135                         HDMI_QUANTIZATION_RANGE_LIMITED;
2136         } else {
2137                 return intel_conn_state->broadcast_rgb ==
2138                         INTEL_BROADCAST_RGB_LIMITED;
2139         }
2140 }
2141
2142 int
2143 intel_dp_compute_config(struct intel_encoder *encoder,
2144                         struct intel_crtc_state *pipe_config,
2145                         struct drm_connector_state *conn_state)
2146 {
2147         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2148         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2149         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2150         struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2151         enum port port = encoder->port;
2152         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2153         struct intel_connector *intel_connector = intel_dp->attached_connector;
2154         struct intel_digital_connector_state *intel_conn_state =
2155                 to_intel_digital_connector_state(conn_state);
2156         bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2157                                            DP_DPCD_QUIRK_CONSTANT_N);
2158         int ret = 0, output_bpp;
2159
2160         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2161                 pipe_config->has_pch_encoder = true;
2162
2163         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2164         if (lspcon->active)
2165                 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2166         else
2167                 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2168                                                pipe_config);
2169
2170         if (ret)
2171                 return ret;
2172
2173         pipe_config->has_drrs = false;
2174         if (IS_G4X(dev_priv) || port == PORT_A)
2175                 pipe_config->has_audio = false;
2176         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2177                 pipe_config->has_audio = intel_dp->has_audio;
2178         else
2179                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2180
2181         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2182                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2183                                        adjusted_mode);
2184
2185                 if (INTEL_GEN(dev_priv) >= 9) {
2186                         ret = skl_update_scaler_crtc(pipe_config);
2187                         if (ret)
2188                                 return ret;
2189                 }
2190
2191                 if (HAS_GMCH(dev_priv))
2192                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
2193                                                  conn_state->scaling_mode);
2194                 else
2195                         intel_pch_panel_fitting(intel_crtc, pipe_config,
2196                                                 conn_state->scaling_mode);
2197         }
2198
2199         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2200                 return -EINVAL;
2201
2202         if (HAS_GMCH(dev_priv) &&
2203             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2204                 return -EINVAL;
2205
2206         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2207                 return -EINVAL;
2208
2209         ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2210         if (ret < 0)
2211                 return ret;
2212
2213         pipe_config->limited_color_range =
2214                 intel_dp_limited_color_range(pipe_config, conn_state);
2215
2216         if (pipe_config->dsc_params.compression_enable)
2217                 output_bpp = pipe_config->dsc_params.compressed_bpp;
2218         else
2219                 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2220
2221         intel_link_compute_m_n(output_bpp,
2222                                pipe_config->lane_count,
2223                                adjusted_mode->crtc_clock,
2224                                pipe_config->port_clock,
2225                                &pipe_config->dp_m_n,
2226                                constant_n);
2227
2228         if (intel_connector->panel.downclock_mode != NULL &&
2229                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2230                         pipe_config->has_drrs = true;
2231                         intel_link_compute_m_n(output_bpp,
2232                                                pipe_config->lane_count,
2233                                                intel_connector->panel.downclock_mode->clock,
2234                                                pipe_config->port_clock,
2235                                                &pipe_config->dp_m2_n2,
2236                                                constant_n);
2237         }
2238
2239         if (!HAS_DDI(dev_priv))
2240                 intel_dp_set_clock(encoder, pipe_config);
2241
2242         intel_psr_compute_config(intel_dp, pipe_config);
2243
2244         return 0;
2245 }
2246
2247 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2248                               int link_rate, u8 lane_count,
2249                               bool link_mst)
2250 {
2251         intel_dp->link_trained = false;
2252         intel_dp->link_rate = link_rate;
2253         intel_dp->lane_count = lane_count;
2254         intel_dp->link_mst = link_mst;
2255 }
2256
2257 static void intel_dp_prepare(struct intel_encoder *encoder,
2258                              const struct intel_crtc_state *pipe_config)
2259 {
2260         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2261         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2262         enum port port = encoder->port;
2263         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2264         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2265
2266         intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2267                                  pipe_config->lane_count,
2268                                  intel_crtc_has_type(pipe_config,
2269                                                      INTEL_OUTPUT_DP_MST));
2270
2271         /*
2272          * There are four kinds of DP registers:
2273          *
2274          *      IBX PCH
2275          *      SNB CPU
2276          *      IVB CPU
2277          *      CPT PCH
2278          *
2279          * IBX PCH and CPU are the same for almost everything,
2280          * except that the CPU DP PLL is configured in this
2281          * register
2282          *
2283          * CPT PCH is quite different, having many bits moved
2284          * to the TRANS_DP_CTL register instead. That
2285          * configuration happens (oddly) in ironlake_pch_enable
2286          */
2287
2288         /* Preserve the BIOS-computed detected bit. This is
2289          * supposed to be read-only.
2290          */
2291         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2292
2293         /* Handle DP bits in common between all three register formats */
2294         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2295         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2296
2297         /* Split out the IBX/CPU vs CPT settings */
2298
2299         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2300                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2301                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2302                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2303                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2304                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2305
2306                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2307                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2308
2309                 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2310         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2311                 u32 trans_dp;
2312
2313                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2314
2315                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2316                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2317                         trans_dp |= TRANS_DP_ENH_FRAMING;
2318                 else
2319                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
2320                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2321         } else {
2322                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2323                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
2324
2325                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2326                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2327                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2328                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2329                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2330
2331                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2332                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2333
2334                 if (IS_CHERRYVIEW(dev_priv))
2335                         intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2336                 else
2337                         intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2338         }
2339 }
2340
2341 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
2342 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2343
2344 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
2345 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
2346
2347 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2348 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2349
2350 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2351
2352 static void wait_panel_status(struct intel_dp *intel_dp,
2353                                        u32 mask,
2354                                        u32 value)
2355 {
2356         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2357         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2358
2359         lockdep_assert_held(&dev_priv->pps_mutex);
2360
2361         intel_pps_verify_state(intel_dp);
2362
2363         pp_stat_reg = _pp_stat_reg(intel_dp);
2364         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2365
2366         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2367                         mask, value,
2368                         I915_READ(pp_stat_reg),
2369                         I915_READ(pp_ctrl_reg));
2370
2371         if (intel_wait_for_register(&dev_priv->uncore,
2372                                     pp_stat_reg, mask, value,
2373                                     5000))
2374                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2375                                 I915_READ(pp_stat_reg),
2376                                 I915_READ(pp_ctrl_reg));
2377
2378         DRM_DEBUG_KMS("Wait complete\n");
2379 }
2380
2381 static void wait_panel_on(struct intel_dp *intel_dp)
2382 {
2383         DRM_DEBUG_KMS("Wait for panel power on\n");
2384         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2385 }
2386
2387 static void wait_panel_off(struct intel_dp *intel_dp)
2388 {
2389         DRM_DEBUG_KMS("Wait for panel power off time\n");
2390         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2391 }
2392
2393 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2394 {
2395         ktime_t panel_power_on_time;
2396         s64 panel_power_off_duration;
2397
2398         DRM_DEBUG_KMS("Wait for panel power cycle\n");
2399
2400         /* take the difference of currrent time and panel power off time
2401          * and then make panel wait for t11_t12 if needed. */
2402         panel_power_on_time = ktime_get_boottime();
2403         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2404
2405         /* When we disable the VDD override bit last we have to do the manual
2406          * wait. */
2407         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2408                 wait_remaining_ms_from_jiffies(jiffies,
2409                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2410
2411         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2412 }
2413
2414 static void wait_backlight_on(struct intel_dp *intel_dp)
2415 {
2416         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2417                                        intel_dp->backlight_on_delay);
2418 }
2419
2420 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2421 {
2422         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2423                                        intel_dp->backlight_off_delay);
2424 }
2425
2426 /* Read the current pp_control value, unlocking the register if it
2427  * is locked
2428  */
2429
2430 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2431 {
2432         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2433         u32 control;
2434
2435         lockdep_assert_held(&dev_priv->pps_mutex);
2436
2437         control = I915_READ(_pp_ctrl_reg(intel_dp));
2438         if (WARN_ON(!HAS_DDI(dev_priv) &&
2439                     (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2440                 control &= ~PANEL_UNLOCK_MASK;
2441                 control |= PANEL_UNLOCK_REGS;
2442         }
2443         return control;
2444 }
2445
2446 /*
2447  * Must be paired with edp_panel_vdd_off().
2448  * Must hold pps_mutex around the whole on/off sequence.
2449  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2450  */
2451 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2452 {
2453         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2454         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2455         u32 pp;
2456         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2457         bool need_to_disable = !intel_dp->want_panel_vdd;
2458
2459         lockdep_assert_held(&dev_priv->pps_mutex);
2460
2461         if (!intel_dp_is_edp(intel_dp))
2462                 return false;
2463
2464         cancel_delayed_work(&intel_dp->panel_vdd_work);
2465         intel_dp->want_panel_vdd = true;
2466
2467         if (edp_have_panel_vdd(intel_dp))
2468                 return need_to_disable;
2469
2470         intel_display_power_get(dev_priv,
2471                                 intel_aux_power_domain(intel_dig_port));
2472
2473         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2474                       port_name(intel_dig_port->base.port));
2475
2476         if (!edp_have_panel_power(intel_dp))
2477                 wait_panel_power_cycle(intel_dp);
2478
2479         pp = ironlake_get_pp_control(intel_dp);
2480         pp |= EDP_FORCE_VDD;
2481
2482         pp_stat_reg = _pp_stat_reg(intel_dp);
2483         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2484
2485         I915_WRITE(pp_ctrl_reg, pp);
2486         POSTING_READ(pp_ctrl_reg);
2487         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2488                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2489         /*
2490          * If the panel wasn't on, delay before accessing aux channel
2491          */
2492         if (!edp_have_panel_power(intel_dp)) {
2493                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2494                               port_name(intel_dig_port->base.port));
2495                 msleep(intel_dp->panel_power_up_delay);
2496         }
2497
2498         return need_to_disable;
2499 }
2500
2501 /*
2502  * Must be paired with intel_edp_panel_vdd_off() or
2503  * intel_edp_panel_off().
2504  * Nested calls to these functions are not allowed since
2505  * we drop the lock. Caller must use some higher level
2506  * locking to prevent nested calls from other threads.
2507  */
2508 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2509 {
2510         intel_wakeref_t wakeref;
2511         bool vdd;
2512
2513         if (!intel_dp_is_edp(intel_dp))
2514                 return;
2515
2516         vdd = false;
2517         with_pps_lock(intel_dp, wakeref)
2518                 vdd = edp_panel_vdd_on(intel_dp);
2519         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2520              port_name(dp_to_dig_port(intel_dp)->base.port));
2521 }
2522
2523 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2524 {
2525         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2526         struct intel_digital_port *intel_dig_port =
2527                 dp_to_dig_port(intel_dp);
2528         u32 pp;
2529         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2530
2531         lockdep_assert_held(&dev_priv->pps_mutex);
2532
2533         WARN_ON(intel_dp->want_panel_vdd);
2534
2535         if (!edp_have_panel_vdd(intel_dp))
2536                 return;
2537
2538         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2539                       port_name(intel_dig_port->base.port));
2540
2541         pp = ironlake_get_pp_control(intel_dp);
2542         pp &= ~EDP_FORCE_VDD;
2543
2544         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2545         pp_stat_reg = _pp_stat_reg(intel_dp);
2546
2547         I915_WRITE(pp_ctrl_reg, pp);
2548         POSTING_READ(pp_ctrl_reg);
2549
2550         /* Make sure sequencer is idle before allowing subsequent activity */
2551         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2552         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2553
2554         if ((pp & PANEL_POWER_ON) == 0)
2555                 intel_dp->panel_power_off_time = ktime_get_boottime();
2556
2557         intel_display_power_put_unchecked(dev_priv,
2558                                           intel_aux_power_domain(intel_dig_port));
2559 }
2560
2561 static void edp_panel_vdd_work(struct work_struct *__work)
2562 {
2563         struct intel_dp *intel_dp =
2564                 container_of(to_delayed_work(__work),
2565                              struct intel_dp, panel_vdd_work);
2566         intel_wakeref_t wakeref;
2567
2568         with_pps_lock(intel_dp, wakeref) {
2569                 if (!intel_dp->want_panel_vdd)
2570                         edp_panel_vdd_off_sync(intel_dp);
2571         }
2572 }
2573
2574 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2575 {
2576         unsigned long delay;
2577
2578         /*
2579          * Queue the timer to fire a long time from now (relative to the power
2580          * down delay) to keep the panel power up across a sequence of
2581          * operations.
2582          */
2583         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2584         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2585 }
2586
2587 /*
2588  * Must be paired with edp_panel_vdd_on().
2589  * Must hold pps_mutex around the whole on/off sequence.
2590  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2591  */
2592 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2593 {
2594         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2595
2596         lockdep_assert_held(&dev_priv->pps_mutex);
2597
2598         if (!intel_dp_is_edp(intel_dp))
2599                 return;
2600
2601         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2602              port_name(dp_to_dig_port(intel_dp)->base.port));
2603
2604         intel_dp->want_panel_vdd = false;
2605
2606         if (sync)
2607                 edp_panel_vdd_off_sync(intel_dp);
2608         else
2609                 edp_panel_vdd_schedule_off(intel_dp);
2610 }
2611
2612 static void edp_panel_on(struct intel_dp *intel_dp)
2613 {
2614         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2615         u32 pp;
2616         i915_reg_t pp_ctrl_reg;
2617
2618         lockdep_assert_held(&dev_priv->pps_mutex);
2619
2620         if (!intel_dp_is_edp(intel_dp))
2621                 return;
2622
2623         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2624                       port_name(dp_to_dig_port(intel_dp)->base.port));
2625
2626         if (WARN(edp_have_panel_power(intel_dp),
2627                  "eDP port %c panel power already on\n",
2628                  port_name(dp_to_dig_port(intel_dp)->base.port)))
2629                 return;
2630
2631         wait_panel_power_cycle(intel_dp);
2632
2633         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2634         pp = ironlake_get_pp_control(intel_dp);
2635         if (IS_GEN(dev_priv, 5)) {
2636                 /* ILK workaround: disable reset around power sequence */
2637                 pp &= ~PANEL_POWER_RESET;
2638                 I915_WRITE(pp_ctrl_reg, pp);
2639                 POSTING_READ(pp_ctrl_reg);
2640         }
2641
2642         pp |= PANEL_POWER_ON;
2643         if (!IS_GEN(dev_priv, 5))
2644                 pp |= PANEL_POWER_RESET;
2645
2646         I915_WRITE(pp_ctrl_reg, pp);
2647         POSTING_READ(pp_ctrl_reg);
2648
2649         wait_panel_on(intel_dp);
2650         intel_dp->last_power_on = jiffies;
2651
2652         if (IS_GEN(dev_priv, 5)) {
2653                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2654                 I915_WRITE(pp_ctrl_reg, pp);
2655                 POSTING_READ(pp_ctrl_reg);
2656         }
2657 }
2658
2659 void intel_edp_panel_on(struct intel_dp *intel_dp)
2660 {
2661         intel_wakeref_t wakeref;
2662
2663         if (!intel_dp_is_edp(intel_dp))
2664                 return;
2665
2666         with_pps_lock(intel_dp, wakeref)
2667                 edp_panel_on(intel_dp);
2668 }
2669
2670
2671 static void edp_panel_off(struct intel_dp *intel_dp)
2672 {
2673         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2674         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2675         u32 pp;
2676         i915_reg_t pp_ctrl_reg;
2677
2678         lockdep_assert_held(&dev_priv->pps_mutex);
2679
2680         if (!intel_dp_is_edp(intel_dp))
2681                 return;
2682
2683         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2684                       port_name(dig_port->base.port));
2685
2686         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2687              port_name(dig_port->base.port));
2688
2689         pp = ironlake_get_pp_control(intel_dp);
2690         /* We need to switch off panel power _and_ force vdd, for otherwise some
2691          * panels get very unhappy and cease to work. */
2692         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2693                 EDP_BLC_ENABLE);
2694
2695         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2696
2697         intel_dp->want_panel_vdd = false;
2698
2699         I915_WRITE(pp_ctrl_reg, pp);
2700         POSTING_READ(pp_ctrl_reg);
2701
2702         wait_panel_off(intel_dp);
2703         intel_dp->panel_power_off_time = ktime_get_boottime();
2704
2705         /* We got a reference when we enabled the VDD. */
2706         intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2707 }
2708
2709 void intel_edp_panel_off(struct intel_dp *intel_dp)
2710 {
2711         intel_wakeref_t wakeref;
2712
2713         if (!intel_dp_is_edp(intel_dp))
2714                 return;
2715
2716         with_pps_lock(intel_dp, wakeref)
2717                 edp_panel_off(intel_dp);
2718 }
2719
2720 /* Enable backlight in the panel power control. */
2721 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2722 {
2723         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2724         intel_wakeref_t wakeref;
2725
2726         /*
2727          * If we enable the backlight right away following a panel power
2728          * on, we may see slight flicker as the panel syncs with the eDP
2729          * link.  So delay a bit to make sure the image is solid before
2730          * allowing it to appear.
2731          */
2732         wait_backlight_on(intel_dp);
2733
2734         with_pps_lock(intel_dp, wakeref) {
2735                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2736                 u32 pp;
2737
2738                 pp = ironlake_get_pp_control(intel_dp);
2739                 pp |= EDP_BLC_ENABLE;
2740
2741                 I915_WRITE(pp_ctrl_reg, pp);
2742                 POSTING_READ(pp_ctrl_reg);
2743         }
2744 }
2745
2746 /* Enable backlight PWM and backlight PP control. */
2747 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2748                             const struct drm_connector_state *conn_state)
2749 {
2750         struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2751
2752         if (!intel_dp_is_edp(intel_dp))
2753                 return;
2754
2755         DRM_DEBUG_KMS("\n");
2756
2757         intel_panel_enable_backlight(crtc_state, conn_state);
2758         _intel_edp_backlight_on(intel_dp);
2759 }
2760
2761 /* Disable backlight in the panel power control. */
2762 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2763 {
2764         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2765         intel_wakeref_t wakeref;
2766
2767         if (!intel_dp_is_edp(intel_dp))
2768                 return;
2769
2770         with_pps_lock(intel_dp, wakeref) {
2771                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2772                 u32 pp;
2773
2774                 pp = ironlake_get_pp_control(intel_dp);
2775                 pp &= ~EDP_BLC_ENABLE;
2776
2777                 I915_WRITE(pp_ctrl_reg, pp);
2778                 POSTING_READ(pp_ctrl_reg);
2779         }
2780
2781         intel_dp->last_backlight_off = jiffies;
2782         edp_wait_backlight_off(intel_dp);
2783 }
2784
2785 /* Disable backlight PP control and backlight PWM. */
2786 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2787 {
2788         struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2789
2790         if (!intel_dp_is_edp(intel_dp))
2791                 return;
2792
2793         DRM_DEBUG_KMS("\n");
2794
2795         _intel_edp_backlight_off(intel_dp);
2796         intel_panel_disable_backlight(old_conn_state);
2797 }
2798
2799 /*
2800  * Hook for controlling the panel power control backlight through the bl_power
2801  * sysfs attribute. Take care to handle multiple calls.
2802  */
2803 static void intel_edp_backlight_power(struct intel_connector *connector,
2804                                       bool enable)
2805 {
2806         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2807         intel_wakeref_t wakeref;
2808         bool is_enabled;
2809
2810         is_enabled = false;
2811         with_pps_lock(intel_dp, wakeref)
2812                 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2813         if (is_enabled == enable)
2814                 return;
2815
2816         DRM_DEBUG_KMS("panel power control backlight %s\n",
2817                       enable ? "enable" : "disable");
2818
2819         if (enable)
2820                 _intel_edp_backlight_on(intel_dp);
2821         else
2822                 _intel_edp_backlight_off(intel_dp);
2823 }
2824
2825 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2826 {
2827         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2828         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2829         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2830
2831         I915_STATE_WARN(cur_state != state,
2832                         "DP port %c state assertion failure (expected %s, current %s)\n",
2833                         port_name(dig_port->base.port),
2834                         onoff(state), onoff(cur_state));
2835 }
2836 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2837
2838 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2839 {
2840         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2841
2842         I915_STATE_WARN(cur_state != state,
2843                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2844                         onoff(state), onoff(cur_state));
2845 }
2846 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2847 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2848
2849 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2850                                 const struct intel_crtc_state *pipe_config)
2851 {
2852         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2853         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2854
2855         assert_pipe_disabled(dev_priv, crtc->pipe);
2856         assert_dp_port_disabled(intel_dp);
2857         assert_edp_pll_disabled(dev_priv);
2858
2859         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2860                       pipe_config->port_clock);
2861
2862         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2863
2864         if (pipe_config->port_clock == 162000)
2865                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2866         else
2867                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2868
2869         I915_WRITE(DP_A, intel_dp->DP);
2870         POSTING_READ(DP_A);
2871         udelay(500);
2872
2873         /*
2874          * [DevILK] Work around required when enabling DP PLL
2875          * while a pipe is enabled going to FDI:
2876          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2877          * 2. Program DP PLL enable
2878          */
2879         if (IS_GEN(dev_priv, 5))
2880                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2881
2882         intel_dp->DP |= DP_PLL_ENABLE;
2883
2884         I915_WRITE(DP_A, intel_dp->DP);
2885         POSTING_READ(DP_A);
2886         udelay(200);
2887 }
2888
2889 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2890                                  const struct intel_crtc_state *old_crtc_state)
2891 {
2892         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2893         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2894
2895         assert_pipe_disabled(dev_priv, crtc->pipe);
2896         assert_dp_port_disabled(intel_dp);
2897         assert_edp_pll_enabled(dev_priv);
2898
2899         DRM_DEBUG_KMS("disabling eDP PLL\n");
2900
2901         intel_dp->DP &= ~DP_PLL_ENABLE;
2902
2903         I915_WRITE(DP_A, intel_dp->DP);
2904         POSTING_READ(DP_A);
2905         udelay(200);
2906 }
2907
2908 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2909 {
2910         /*
2911          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2912          * be capable of signalling downstream hpd with a long pulse.
2913          * Whether or not that means D3 is safe to use is not clear,
2914          * but let's assume so until proven otherwise.
2915          *
2916          * FIXME should really check all downstream ports...
2917          */
2918         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2919                 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2920                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2921 }
2922
2923 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2924                                            const struct intel_crtc_state *crtc_state,
2925                                            bool enable)
2926 {
2927         int ret;
2928
2929         if (!crtc_state->dsc_params.compression_enable)
2930                 return;
2931
2932         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2933                                  enable ? DP_DECOMPRESSION_EN : 0);
2934         if (ret < 0)
2935                 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2936                               enable ? "enable" : "disable");
2937 }
2938
2939 /* If the sink supports it, try to set the power state appropriately */
2940 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2941 {
2942         int ret, i;
2943
2944         /* Should have a valid DPCD by this point */
2945         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2946                 return;
2947
2948         if (mode != DRM_MODE_DPMS_ON) {
2949                 if (downstream_hpd_needs_d0(intel_dp))
2950                         return;
2951
2952                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2953                                          DP_SET_POWER_D3);
2954         } else {
2955                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2956
2957                 /*
2958                  * When turning on, we need to retry for 1ms to give the sink
2959                  * time to wake up.
2960                  */
2961                 for (i = 0; i < 3; i++) {
2962                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2963                                                  DP_SET_POWER_D0);
2964                         if (ret == 1)
2965                                 break;
2966                         msleep(1);
2967                 }
2968
2969                 if (ret == 1 && lspcon->active)
2970                         lspcon_wait_pcon_mode(lspcon);
2971         }
2972
2973         if (ret != 1)
2974                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2975                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2976 }
2977
2978 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
2979                                  enum port port, enum pipe *pipe)
2980 {
2981         enum pipe p;
2982
2983         for_each_pipe(dev_priv, p) {
2984                 u32 val = I915_READ(TRANS_DP_CTL(p));
2985
2986                 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
2987                         *pipe = p;
2988                         return true;
2989                 }
2990         }
2991
2992         DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
2993
2994         /* must initialize pipe to something for the asserts */
2995         *pipe = PIPE_A;
2996
2997         return false;
2998 }
2999
3000 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3001                            i915_reg_t dp_reg, enum port port,
3002                            enum pipe *pipe)
3003 {
3004         bool ret;
3005         u32 val;
3006
3007         val = I915_READ(dp_reg);
3008
3009         ret = val & DP_PORT_EN;
3010
3011         /* asserts want to know the pipe even if the port is disabled */
3012         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3013                 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3014         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3015                 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3016         else if (IS_CHERRYVIEW(dev_priv))
3017                 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3018         else
3019                 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3020
3021         return ret;
3022 }
3023
3024 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3025                                   enum pipe *pipe)
3026 {
3027         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3028         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3029         intel_wakeref_t wakeref;
3030         bool ret;
3031
3032         wakeref = intel_display_power_get_if_enabled(dev_priv,
3033                                                      encoder->power_domain);
3034         if (!wakeref)
3035                 return false;
3036
3037         ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3038                                     encoder->port, pipe);
3039
3040         intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3041
3042         return ret;
3043 }
3044
3045 static void intel_dp_get_config(struct intel_encoder *encoder,
3046                                 struct intel_crtc_state *pipe_config)
3047 {
3048         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3049         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3050         u32 tmp, flags = 0;
3051         enum port port = encoder->port;
3052         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3053
3054         if (encoder->type == INTEL_OUTPUT_EDP)
3055                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3056         else
3057                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3058
3059         tmp = I915_READ(intel_dp->output_reg);
3060
3061         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3062
3063         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3064                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3065
3066                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3067                         flags |= DRM_MODE_FLAG_PHSYNC;
3068                 else
3069                         flags |= DRM_MODE_FLAG_NHSYNC;
3070
3071                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3072                         flags |= DRM_MODE_FLAG_PVSYNC;
3073                 else
3074                         flags |= DRM_MODE_FLAG_NVSYNC;
3075         } else {
3076                 if (tmp & DP_SYNC_HS_HIGH)
3077                         flags |= DRM_MODE_FLAG_PHSYNC;
3078                 else
3079                         flags |= DRM_MODE_FLAG_NHSYNC;
3080
3081                 if (tmp & DP_SYNC_VS_HIGH)
3082                         flags |= DRM_MODE_FLAG_PVSYNC;
3083                 else
3084                         flags |= DRM_MODE_FLAG_NVSYNC;
3085         }
3086
3087         pipe_config->base.adjusted_mode.flags |= flags;
3088
3089         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3090                 pipe_config->limited_color_range = true;
3091
3092         pipe_config->lane_count =
3093                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3094
3095         intel_dp_get_m_n(crtc, pipe_config);
3096
3097         if (port == PORT_A) {
3098                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3099                         pipe_config->port_clock = 162000;
3100                 else
3101                         pipe_config->port_clock = 270000;
3102         }
3103
3104         pipe_config->base.adjusted_mode.crtc_clock =
3105                 intel_dotclock_calculate(pipe_config->port_clock,
3106                                          &pipe_config->dp_m_n);
3107
3108         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3109             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3110                 /*
3111                  * This is a big fat ugly hack.
3112                  *
3113                  * Some machines in UEFI boot mode provide us a VBT that has 18
3114                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3115                  * unknown we fail to light up. Yet the same BIOS boots up with
3116                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3117                  * max, not what it tells us to use.
3118                  *
3119                  * Note: This will still be broken if the eDP panel is not lit
3120                  * up by the BIOS, and thus we can't get the mode at module
3121                  * load.
3122                  */
3123                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3124                               pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3125                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3126         }
3127 }
3128
3129 static void intel_disable_dp(struct intel_encoder *encoder,
3130                              const struct intel_crtc_state *old_crtc_state,
3131                              const struct drm_connector_state *old_conn_state)
3132 {
3133         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3134
3135         intel_dp->link_trained = false;
3136
3137         if (old_crtc_state->has_audio)
3138                 intel_audio_codec_disable(encoder,
3139                                           old_crtc_state, old_conn_state);
3140
3141         /* Make sure the panel is off before trying to change the mode. But also
3142          * ensure that we have vdd while we switch off the panel. */
3143         intel_edp_panel_vdd_on(intel_dp);
3144         intel_edp_backlight_off(old_conn_state);
3145         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3146         intel_edp_panel_off(intel_dp);
3147 }
3148
3149 static void g4x_disable_dp(struct intel_encoder *encoder,
3150                            const struct intel_crtc_state *old_crtc_state,
3151                            const struct drm_connector_state *old_conn_state)
3152 {
3153         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3154 }
3155
3156 static void vlv_disable_dp(struct intel_encoder *encoder,
3157                            const struct intel_crtc_state *old_crtc_state,
3158                            const struct drm_connector_state *old_conn_state)
3159 {
3160         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3161 }
3162
3163 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3164                                 const struct intel_crtc_state *old_crtc_state,
3165                                 const struct drm_connector_state *old_conn_state)
3166 {
3167         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3168         enum port port = encoder->port;
3169
3170         /*
3171          * Bspec does not list a specific disable sequence for g4x DP.
3172          * Follow the ilk+ sequence (disable pipe before the port) for
3173          * g4x DP as it does not suffer from underruns like the normal
3174          * g4x modeset sequence (disable pipe after the port).
3175          */
3176         intel_dp_link_down(encoder, old_crtc_state);
3177
3178         /* Only ilk+ has port A */
3179         if (port == PORT_A)
3180                 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3181 }
3182
3183 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3184                                 const struct intel_crtc_state *old_crtc_state,
3185                                 const struct drm_connector_state *old_conn_state)
3186 {
3187         intel_dp_link_down(encoder, old_crtc_state);
3188 }
3189
3190 static void chv_post_disable_dp(struct intel_encoder *encoder,
3191                                 const struct intel_crtc_state *old_crtc_state,
3192                                 const struct drm_connector_state *old_conn_state)
3193 {
3194         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3195
3196         intel_dp_link_down(encoder, old_crtc_state);
3197
3198         vlv_dpio_get(dev_priv);
3199
3200         /* Assert data lane reset */
3201         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3202
3203         vlv_dpio_put(dev_priv);
3204 }
3205
3206 static void
3207 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3208                          u32 *DP,
3209                          u8 dp_train_pat)
3210 {
3211         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3212         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3213         enum port port = intel_dig_port->base.port;
3214         u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3215
3216         if (dp_train_pat & train_pat_mask)
3217                 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3218                               dp_train_pat & train_pat_mask);
3219
3220         if (HAS_DDI(dev_priv)) {
3221                 u32 temp = I915_READ(DP_TP_CTL(port));
3222
3223                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3224                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3225                 else
3226                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3227
3228                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3229                 switch (dp_train_pat & train_pat_mask) {
3230                 case DP_TRAINING_PATTERN_DISABLE:
3231                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3232
3233                         break;
3234                 case DP_TRAINING_PATTERN_1:
3235                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3236                         break;
3237                 case DP_TRAINING_PATTERN_2:
3238                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3239                         break;
3240                 case DP_TRAINING_PATTERN_3:
3241                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3242                         break;
3243                 case DP_TRAINING_PATTERN_4:
3244                         temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3245                         break;
3246                 }
3247                 I915_WRITE(DP_TP_CTL(port), temp);
3248
3249         } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3250                    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3251                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3252
3253                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3254                 case DP_TRAINING_PATTERN_DISABLE:
3255                         *DP |= DP_LINK_TRAIN_OFF_CPT;
3256                         break;
3257                 case DP_TRAINING_PATTERN_1:
3258                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3259                         break;
3260                 case DP_TRAINING_PATTERN_2:
3261                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3262                         break;
3263                 case DP_TRAINING_PATTERN_3:
3264                         DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3265                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3266                         break;
3267                 }
3268
3269         } else {
3270                 *DP &= ~DP_LINK_TRAIN_MASK;
3271
3272                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3273                 case DP_TRAINING_PATTERN_DISABLE:
3274                         *DP |= DP_LINK_TRAIN_OFF;
3275                         break;
3276                 case DP_TRAINING_PATTERN_1:
3277                         *DP |= DP_LINK_TRAIN_PAT_1;
3278                         break;
3279                 case DP_TRAINING_PATTERN_2:
3280                         *DP |= DP_LINK_TRAIN_PAT_2;
3281                         break;
3282                 case DP_TRAINING_PATTERN_3:
3283                         DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3284                         *DP |= DP_LINK_TRAIN_PAT_2;
3285                         break;
3286                 }
3287         }
3288 }
3289
3290 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3291                                  const struct intel_crtc_state *old_crtc_state)
3292 {
3293         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3294
3295         /* enable with pattern 1 (as per spec) */
3296
3297         intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3298
3299         /*
3300          * Magic for VLV/CHV. We _must_ first set up the register
3301          * without actually enabling the port, and then do another
3302          * write to enable the port. Otherwise link training will
3303          * fail when the power sequencer is freshly used for this port.
3304          */
3305         intel_dp->DP |= DP_PORT_EN;
3306         if (old_crtc_state->has_audio)
3307                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3308
3309         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3310         POSTING_READ(intel_dp->output_reg);
3311 }
3312
3313 static void intel_enable_dp(struct intel_encoder *encoder,
3314                             const struct intel_crtc_state *pipe_config,
3315                             const struct drm_connector_state *conn_state)
3316 {
3317         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3318         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3319         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3320         u32 dp_reg = I915_READ(intel_dp->output_reg);
3321         enum pipe pipe = crtc->pipe;
3322         intel_wakeref_t wakeref;
3323
3324         if (WARN_ON(dp_reg & DP_PORT_EN))
3325                 return;
3326
3327         with_pps_lock(intel_dp, wakeref) {
3328                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3329                         vlv_init_panel_power_sequencer(encoder, pipe_config);
3330
3331                 intel_dp_enable_port(intel_dp, pipe_config);
3332
3333                 edp_panel_vdd_on(intel_dp);
3334                 edp_panel_on(intel_dp);
3335                 edp_panel_vdd_off(intel_dp, true);
3336         }
3337
3338         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3339                 unsigned int lane_mask = 0x0;
3340
3341                 if (IS_CHERRYVIEW(dev_priv))
3342                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3343
3344                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3345                                     lane_mask);
3346         }
3347
3348         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3349         intel_dp_start_link_train(intel_dp);
3350         intel_dp_stop_link_train(intel_dp);
3351
3352         if (pipe_config->has_audio) {
3353                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3354                                  pipe_name(pipe));
3355                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3356         }
3357 }
3358
3359 static void g4x_enable_dp(struct intel_encoder *encoder,
3360                           const struct intel_crtc_state *pipe_config,
3361                           const struct drm_connector_state *conn_state)
3362 {
3363         intel_enable_dp(encoder, pipe_config, conn_state);
3364         intel_edp_backlight_on(pipe_config, conn_state);
3365 }
3366
3367 static void vlv_enable_dp(struct intel_encoder *encoder,
3368                           const struct intel_crtc_state *pipe_config,
3369                           const struct drm_connector_state *conn_state)
3370 {
3371         intel_edp_backlight_on(pipe_config, conn_state);
3372 }
3373
3374 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3375                               const struct intel_crtc_state *pipe_config,
3376                               const struct drm_connector_state *conn_state)
3377 {
3378         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3379         enum port port = encoder->port;
3380
3381         intel_dp_prepare(encoder, pipe_config);
3382
3383         /* Only ilk+ has port A */
3384         if (port == PORT_A)
3385                 ironlake_edp_pll_on(intel_dp, pipe_config);
3386 }
3387
3388 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3389 {
3390         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3391         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3392         enum pipe pipe = intel_dp->pps_pipe;
3393         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3394
3395         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3396
3397         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3398                 return;
3399
3400         edp_panel_vdd_off_sync(intel_dp);
3401
3402         /*
3403          * VLV seems to get confused when multiple power sequencers
3404          * have the same port selected (even if only one has power/vdd
3405          * enabled). The failure manifests as vlv_wait_port_ready() failing
3406          * CHV on the other hand doesn't seem to mind having the same port
3407          * selected in multiple power sequencers, but let's clear the
3408          * port select always when logically disconnecting a power sequencer
3409          * from a port.
3410          */
3411         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3412                       pipe_name(pipe), port_name(intel_dig_port->base.port));
3413         I915_WRITE(pp_on_reg, 0);
3414         POSTING_READ(pp_on_reg);
3415
3416         intel_dp->pps_pipe = INVALID_PIPE;
3417 }
3418
3419 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3420                                       enum pipe pipe)
3421 {
3422         struct intel_encoder *encoder;
3423
3424         lockdep_assert_held(&dev_priv->pps_mutex);
3425
3426         for_each_intel_dp(&dev_priv->drm, encoder) {
3427                 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3428                 enum port port = encoder->port;
3429
3430                 WARN(intel_dp->active_pipe == pipe,
3431                      "stealing pipe %c power sequencer from active (e)DP port %c\n",
3432                      pipe_name(pipe), port_name(port));
3433
3434                 if (intel_dp->pps_pipe != pipe)
3435                         continue;
3436
3437                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3438                               pipe_name(pipe), port_name(port));
3439
3440                 /* make sure vdd is off before we steal it */
3441                 vlv_detach_power_sequencer(intel_dp);
3442         }
3443 }
3444
3445 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3446                                            const struct intel_crtc_state *crtc_state)
3447 {
3448         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3449         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3450         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3451
3452         lockdep_assert_held(&dev_priv->pps_mutex);
3453
3454         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3455
3456         if (intel_dp->pps_pipe != INVALID_PIPE &&
3457             intel_dp->pps_pipe != crtc->pipe) {
3458                 /*
3459                  * If another power sequencer was being used on this
3460                  * port previously make sure to turn off vdd there while
3461                  * we still have control of it.
3462                  */
3463                 vlv_detach_power_sequencer(intel_dp);
3464         }
3465
3466         /*
3467          * We may be stealing the power
3468          * sequencer from another port.
3469          */
3470         vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3471
3472         intel_dp->active_pipe = crtc->pipe;
3473
3474         if (!intel_dp_is_edp(intel_dp))
3475                 return;
3476
3477         /* now it's all ours */
3478         intel_dp->pps_pipe = crtc->pipe;
3479
3480         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3481                       pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3482
3483         /* init power sequencer on this pipe and port */
3484         intel_dp_init_panel_power_sequencer(intel_dp);
3485         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3486 }
3487
3488 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3489                               const struct intel_crtc_state *pipe_config,
3490                               const struct drm_connector_state *conn_state)
3491 {
3492         vlv_phy_pre_encoder_enable(encoder, pipe_config);
3493
3494         intel_enable_dp(encoder, pipe_config, conn_state);
3495 }
3496
3497 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3498                                   const struct intel_crtc_state *pipe_config,
3499                                   const struct drm_connector_state *conn_state)
3500 {
3501         intel_dp_prepare(encoder, pipe_config);
3502
3503         vlv_phy_pre_pll_enable(encoder, pipe_config);
3504 }
3505
3506 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3507                               const struct intel_crtc_state *pipe_config,
3508                               const struct drm_connector_state *conn_state)
3509 {
3510         chv_phy_pre_encoder_enable(encoder, pipe_config);
3511
3512         intel_enable_dp(encoder, pipe_config, conn_state);
3513
3514         /* Second common lane will stay alive on its own now */
3515         chv_phy_release_cl2_override(encoder);
3516 }
3517
3518 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3519                                   const struct intel_crtc_state *pipe_config,
3520                                   const struct drm_connector_state *conn_state)
3521 {
3522         intel_dp_prepare(encoder, pipe_config);
3523
3524         chv_phy_pre_pll_enable(encoder, pipe_config);
3525 }
3526
3527 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3528                                     const struct intel_crtc_state *old_crtc_state,
3529                                     const struct drm_connector_state *old_conn_state)
3530 {
3531         chv_phy_post_pll_disable(encoder, old_crtc_state);
3532 }
3533
3534 /*
3535  * Fetch AUX CH registers 0x202 - 0x207 which contain
3536  * link status information
3537  */
3538 bool
3539 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3540 {
3541         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3542                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3543 }
3544
3545 /* These are source-specific values. */
3546 u8
3547 intel_dp_voltage_max(struct intel_dp *intel_dp)
3548 {
3549         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3550         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3551         enum port port = encoder->port;
3552
3553         if (HAS_DDI(dev_priv))
3554                 return intel_ddi_dp_voltage_max(encoder);
3555         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3556                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3557         else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3558                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3559         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3560                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3561         else
3562                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3563 }
3564
3565 u8
3566 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3567 {
3568         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3569         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3570         enum port port = encoder->port;
3571
3572         if (HAS_DDI(dev_priv)) {
3573                 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3574         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3575                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3576                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3577                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3578                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3579                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3580                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3581                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3582                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3583                 default:
3584                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3585                 }
3586         } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3587                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3588                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3589                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3590                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3591                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3592                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3593                 default:
3594                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3595                 }
3596         } else {
3597                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3598                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3599                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3600                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3601                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3602                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3603                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3604                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3605                 default:
3606                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3607                 }
3608         }
3609 }
3610
3611 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3612 {
3613         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3614         unsigned long demph_reg_value, preemph_reg_value,
3615                 uniqtranscale_reg_value;
3616         u8 train_set = intel_dp->train_set[0];
3617
3618         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3619         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3620                 preemph_reg_value = 0x0004000;
3621                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3622                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3623                         demph_reg_value = 0x2B405555;
3624                         uniqtranscale_reg_value = 0x552AB83A;
3625                         break;
3626                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3627                         demph_reg_value = 0x2B404040;
3628                         uniqtranscale_reg_value = 0x5548B83A;
3629                         break;
3630                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3631                         demph_reg_value = 0x2B245555;
3632                         uniqtranscale_reg_value = 0x5560B83A;
3633                         break;
3634                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3635                         demph_reg_value = 0x2B405555;
3636                         uniqtranscale_reg_value = 0x5598DA3A;
3637                         break;
3638                 default:
3639                         return 0;
3640                 }
3641                 break;
3642         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3643                 preemph_reg_value = 0x0002000;
3644                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3645                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3646                         demph_reg_value = 0x2B404040;
3647                         uniqtranscale_reg_value = 0x5552B83A;
3648                         break;
3649                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3650                         demph_reg_value = 0x2B404848;
3651                         uniqtranscale_reg_value = 0x5580B83A;
3652                         break;
3653                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3654                         demph_reg_value = 0x2B404040;
3655                         uniqtranscale_reg_value = 0x55ADDA3A;
3656                         break;
3657                 default:
3658                         return 0;
3659                 }
3660                 break;
3661         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3662                 preemph_reg_value = 0x0000000;
3663                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3664                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3665                         demph_reg_value = 0x2B305555;
3666                         uniqtranscale_reg_value = 0x5570B83A;
3667                         break;
3668                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3669                         demph_reg_value = 0x2B2B4040;
3670                         uniqtranscale_reg_value = 0x55ADDA3A;
3671                         break;
3672                 default:
3673                         return 0;
3674                 }
3675                 break;
3676         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3677                 preemph_reg_value = 0x0006000;
3678                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3679                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3680                         demph_reg_value = 0x1B405555;
3681                         uniqtranscale_reg_value = 0x55ADDA3A;
3682                         break;
3683                 default:
3684                         return 0;
3685                 }
3686                 break;
3687         default:
3688                 return 0;
3689         }
3690
3691         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3692                                  uniqtranscale_reg_value, 0);
3693
3694         return 0;
3695 }
3696
3697 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3698 {
3699         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3700         u32 deemph_reg_value, margin_reg_value;
3701         bool uniq_trans_scale = false;
3702         u8 train_set = intel_dp->train_set[0];
3703
3704         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3705         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3706                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3707                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3708                         deemph_reg_value = 128;
3709                         margin_reg_value = 52;
3710                         break;
3711                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3712                         deemph_reg_value = 128;
3713                         margin_reg_value = 77;
3714                         break;
3715                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3716                         deemph_reg_value = 128;
3717                         margin_reg_value = 102;
3718                         break;
3719                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3720                         deemph_reg_value = 128;
3721                         margin_reg_value = 154;
3722                         uniq_trans_scale = true;
3723                         break;
3724                 default:
3725                         return 0;
3726                 }
3727                 break;
3728         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3729                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3730                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3731                         deemph_reg_value = 85;
3732                         margin_reg_value = 78;
3733                         break;
3734                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3735                         deemph_reg_value = 85;
3736                         margin_reg_value = 116;
3737                         break;
3738                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3739                         deemph_reg_value = 85;
3740                         margin_reg_value = 154;
3741                         break;
3742                 default:
3743                         return 0;
3744                 }
3745                 break;
3746         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3747                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3748                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3749                         deemph_reg_value = 64;
3750                         margin_reg_value = 104;
3751                         break;
3752                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3753                         deemph_reg_value = 64;
3754                         margin_reg_value = 154;
3755                         break;
3756                 default:
3757                         return 0;
3758                 }
3759                 break;
3760         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3761                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3762                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3763                         deemph_reg_value = 43;
3764                         margin_reg_value = 154;
3765                         break;
3766                 default:
3767                         return 0;
3768                 }
3769                 break;
3770         default:
3771                 return 0;
3772         }
3773
3774         chv_set_phy_signal_level(encoder, deemph_reg_value,
3775                                  margin_reg_value, uniq_trans_scale);
3776
3777         return 0;
3778 }
3779
3780 static u32
3781 g4x_signal_levels(u8 train_set)
3782 {
3783         u32 signal_levels = 0;
3784
3785         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3786         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3787         default:
3788                 signal_levels |= DP_VOLTAGE_0_4;
3789                 break;
3790         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3791                 signal_levels |= DP_VOLTAGE_0_6;
3792                 break;
3793         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3794                 signal_levels |= DP_VOLTAGE_0_8;
3795                 break;
3796         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3797                 signal_levels |= DP_VOLTAGE_1_2;
3798                 break;
3799         }
3800         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3801         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3802         default:
3803                 signal_levels |= DP_PRE_EMPHASIS_0;
3804                 break;
3805         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3806                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3807                 break;
3808         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3809                 signal_levels |= DP_PRE_EMPHASIS_6;
3810                 break;
3811         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3812                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3813                 break;
3814         }
3815         return signal_levels;
3816 }
3817
3818 /* SNB CPU eDP voltage swing and pre-emphasis control */
3819 static u32
3820 snb_cpu_edp_signal_levels(u8 train_set)
3821 {
3822         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3823                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3824         switch (signal_levels) {
3825         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3826         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3827                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3828         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3829                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3830         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3831         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3832                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3833         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3834         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3835                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3836         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3837         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3838                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3839         default:
3840                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3841                               "0x%x\n", signal_levels);
3842                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3843         }
3844 }
3845
3846 /* IVB CPU eDP voltage swing and pre-emphasis control */
3847 static u32
3848 ivb_cpu_edp_signal_levels(u8 train_set)
3849 {
3850         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3851                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3852         switch (signal_levels) {
3853         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3854                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3855         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3856                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3857         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3858                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3859
3860         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3861                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3862         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3863                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3864
3865         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3866                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3867         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3868                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3869
3870         default:
3871                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3872                               "0x%x\n", signal_levels);
3873                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3874         }
3875 }
3876
3877 void
3878 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3879 {
3880         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3881         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3882         enum port port = intel_dig_port->base.port;
3883         u32 signal_levels, mask = 0;
3884         u8 train_set = intel_dp->train_set[0];
3885
3886         if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3887                 signal_levels = bxt_signal_levels(intel_dp);
3888         } else if (HAS_DDI(dev_priv)) {
3889                 signal_levels = ddi_signal_levels(intel_dp);
3890                 mask = DDI_BUF_EMP_MASK;
3891         } else if (IS_CHERRYVIEW(dev_priv)) {
3892                 signal_levels = chv_signal_levels(intel_dp);
3893         } else if (IS_VALLEYVIEW(dev_priv)) {
3894                 signal_levels = vlv_signal_levels(intel_dp);
3895         } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3896                 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3897                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3898         } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3899                 signal_levels = snb_cpu_edp_signal_levels(train_set);
3900                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3901         } else {
3902                 signal_levels = g4x_signal_levels(train_set);
3903                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3904         }
3905
3906         if (mask)
3907                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3908
3909         DRM_DEBUG_KMS("Using vswing level %d\n",
3910                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3911         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3912                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3913                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3914
3915         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3916
3917         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3918         POSTING_READ(intel_dp->output_reg);
3919 }
3920
3921 void
3922 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3923                                        u8 dp_train_pat)
3924 {
3925         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3926         struct drm_i915_private *dev_priv =
3927                 to_i915(intel_dig_port->base.base.dev);
3928
3929         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3930
3931         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3932         POSTING_READ(intel_dp->output_reg);
3933 }
3934
3935 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3936 {
3937         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3938         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3939         enum port port = intel_dig_port->base.port;
3940         u32 val;
3941
3942         if (!HAS_DDI(dev_priv))
3943                 return;
3944
3945         val = I915_READ(DP_TP_CTL(port));
3946         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3947         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3948         I915_WRITE(DP_TP_CTL(port), val);
3949
3950         /*
3951          * On PORT_A we can have only eDP in SST mode. There the only reason
3952          * we need to set idle transmission mode is to work around a HW issue
3953          * where we enable the pipe while not in idle link-training mode.
3954          * In this case there is requirement to wait for a minimum number of
3955          * idle patterns to be sent.
3956          */
3957         if (port == PORT_A)
3958                 return;
3959
3960         if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
3961                                     DP_TP_STATUS_IDLE_DONE,
3962                                     DP_TP_STATUS_IDLE_DONE,
3963                                     1))
3964                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3965 }
3966
3967 static void
3968 intel_dp_link_down(struct intel_encoder *encoder,
3969                    const struct intel_crtc_state *old_crtc_state)
3970 {
3971         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3972         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3973         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3974         enum port port = encoder->port;
3975         u32 DP = intel_dp->DP;
3976
3977         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3978                 return;
3979
3980         DRM_DEBUG_KMS("\n");
3981
3982         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3983             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3984                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3985                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3986         } else {
3987                 DP &= ~DP_LINK_TRAIN_MASK;
3988                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3989         }
3990         I915_WRITE(intel_dp->output_reg, DP);
3991         POSTING_READ(intel_dp->output_reg);
3992
3993         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3994         I915_WRITE(intel_dp->output_reg, DP);
3995         POSTING_READ(intel_dp->output_reg);
3996
3997         /*
3998          * HW workaround for IBX, we need to move the port
3999          * to transcoder A after disabling it to allow the
4000          * matching HDMI port to be enabled on transcoder A.
4001          */
4002         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4003                 /*
4004                  * We get CPU/PCH FIFO underruns on the other pipe when
4005                  * doing the workaround. Sweep them under the rug.
4006                  */
4007                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4008                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4009
4010                 /* always enable with pattern 1 (as per spec) */
4011                 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4012                 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4013                         DP_LINK_TRAIN_PAT_1;
4014                 I915_WRITE(intel_dp->output_reg, DP);
4015                 POSTING_READ(intel_dp->output_reg);
4016
4017                 DP &= ~DP_PORT_EN;
4018                 I915_WRITE(intel_dp->output_reg, DP);
4019                 POSTING_READ(intel_dp->output_reg);
4020
4021                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4022                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4023                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4024         }
4025
4026         msleep(intel_dp->panel_power_down_delay);
4027
4028         intel_dp->DP = DP;
4029
4030         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4031                 intel_wakeref_t wakeref;
4032
4033                 with_pps_lock(intel_dp, wakeref)
4034                         intel_dp->active_pipe = INVALID_PIPE;
4035         }
4036 }
4037
4038 static void
4039 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4040 {
4041         u8 dpcd_ext[6];
4042
4043         /*
4044          * Prior to DP1.3 the bit represented by
4045          * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4046          * if it is set DP_DPCD_REV at 0000h could be at a value less than
4047          * the true capability of the panel. The only way to check is to
4048          * then compare 0000h and 2200h.
4049          */
4050         if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4051               DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4052                 return;
4053
4054         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4055                              &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4056                 DRM_ERROR("DPCD failed read at extended capabilities\n");
4057                 return;
4058         }
4059
4060         if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4061                 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4062                 return;
4063         }
4064
4065         if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4066                 return;
4067
4068         DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4069                       (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4070
4071         memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4072 }
4073
4074 bool
4075 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4076 {
4077         if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4078                              sizeof(intel_dp->dpcd)) < 0)
4079                 return false; /* aux transfer failed */
4080
4081         intel_dp_extended_receiver_capabilities(intel_dp);
4082
4083         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4084
4085         return intel_dp->dpcd[DP_DPCD_REV] != 0;
4086 }
4087
4088 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4089 {
4090         u8 dprx = 0;
4091
4092         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4093                               &dprx) != 1)
4094                 return false;
4095         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4096 }
4097
4098 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4099 {
4100         /*
4101          * Clear the cached register set to avoid using stale values
4102          * for the sinks that do not support DSC.
4103          */
4104         memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4105
4106         /* Clear fec_capable to avoid using stale values */
4107         intel_dp->fec_capable = 0;
4108
4109         /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4110         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4111             intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4112                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4113                                      intel_dp->dsc_dpcd,
4114                                      sizeof(intel_dp->dsc_dpcd)) < 0)
4115                         DRM_ERROR("Failed to read DPCD register 0x%x\n",
4116                                   DP_DSC_SUPPORT);
4117
4118                 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4119                               (int)sizeof(intel_dp->dsc_dpcd),
4120                               intel_dp->dsc_dpcd);
4121
4122                 /* FEC is supported only on DP 1.4 */
4123                 if (!intel_dp_is_edp(intel_dp) &&
4124                     drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4125                                       &intel_dp->fec_capable) < 0)
4126                         DRM_ERROR("Failed to read FEC DPCD register\n");
4127
4128                 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4129         }
4130 }
4131
4132 static bool
4133 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4134 {
4135         struct drm_i915_private *dev_priv =
4136                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4137
4138         /* this function is meant to be called only once */
4139         WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4140
4141         if (!intel_dp_read_dpcd(intel_dp))
4142                 return false;
4143
4144         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4145                          drm_dp_is_branch(intel_dp->dpcd));
4146
4147         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4148                 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4149                         DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4150
4151         /*
4152          * Read the eDP display control registers.
4153          *
4154          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4155          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4156          * set, but require eDP 1.4+ detection (e.g. for supported link rates
4157          * method). The display control registers should read zero if they're
4158          * not supported anyway.
4159          */
4160         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4161                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4162                              sizeof(intel_dp->edp_dpcd))
4163                 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4164                               intel_dp->edp_dpcd);
4165
4166         /*
4167          * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4168          * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4169          */
4170         intel_psr_init_dpcd(intel_dp);
4171
4172         /* Read the eDP 1.4+ supported link rates. */
4173         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4174                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4175                 int i;
4176
4177                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4178                                 sink_rates, sizeof(sink_rates));
4179
4180                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4181                         int val = le16_to_cpu(sink_rates[i]);
4182
4183                         if (val == 0)
4184                                 break;
4185
4186                         /* Value read multiplied by 200kHz gives the per-lane
4187                          * link rate in kHz. The source rates are, however,
4188                          * stored in terms of LS_Clk kHz. The full conversion
4189                          * back to symbols is
4190                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4191                          */
4192                         intel_dp->sink_rates[i] = (val * 200) / 10;
4193                 }
4194                 intel_dp->num_sink_rates = i;
4195         }
4196
4197         /*
4198          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4199          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4200          */
4201         if (intel_dp->num_sink_rates)
4202                 intel_dp->use_rate_select = true;
4203         else
4204                 intel_dp_set_sink_rates(intel_dp);
4205
4206         intel_dp_set_common_rates(intel_dp);
4207
4208         /* Read the eDP DSC DPCD registers */
4209         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4210                 intel_dp_get_dsc_sink_cap(intel_dp);
4211
4212         return true;
4213 }
4214
4215
4216 static bool
4217 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4218 {
4219         if (!intel_dp_read_dpcd(intel_dp))
4220                 return false;
4221
4222         /* Don't clobber cached eDP rates. */
4223         if (!intel_dp_is_edp(intel_dp)) {
4224                 intel_dp_set_sink_rates(intel_dp);
4225                 intel_dp_set_common_rates(intel_dp);
4226         }
4227
4228         /*
4229          * Some eDP panels do not set a valid value for sink count, that is why
4230          * it don't care about read it here and in intel_edp_init_dpcd().
4231          */
4232         if (!intel_dp_is_edp(intel_dp)) {
4233                 u8 count;
4234                 ssize_t r;
4235
4236                 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4237                 if (r < 1)
4238                         return false;
4239
4240                 /*
4241                  * Sink count can change between short pulse hpd hence
4242                  * a member variable in intel_dp will track any changes
4243                  * between short pulse interrupts.
4244                  */
4245                 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4246
4247                 /*
4248                  * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4249                  * a dongle is present but no display. Unless we require to know
4250                  * if a dongle is present or not, we don't need to update
4251                  * downstream port information. So, an early return here saves
4252                  * time from performing other operations which are not required.
4253                  */
4254                 if (!intel_dp->sink_count)
4255                         return false;
4256         }
4257
4258         if (!drm_dp_is_branch(intel_dp->dpcd))
4259                 return true; /* native DP sink */
4260
4261         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4262                 return true; /* no per-port downstream info */
4263
4264         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4265                              intel_dp->downstream_ports,
4266                              DP_MAX_DOWNSTREAM_PORTS) < 0)
4267                 return false; /* downstream port status fetch failed */
4268
4269         return true;
4270 }
4271
4272 static bool
4273 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4274 {
4275         u8 mstm_cap;
4276
4277         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4278                 return false;
4279
4280         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4281                 return false;
4282
4283         return mstm_cap & DP_MST_CAP;
4284 }
4285
4286 static bool
4287 intel_dp_can_mst(struct intel_dp *intel_dp)
4288 {
4289         return i915_modparams.enable_dp_mst &&
4290                 intel_dp->can_mst &&
4291                 intel_dp_sink_can_mst(intel_dp);
4292 }
4293
4294 static void
4295 intel_dp_configure_mst(struct intel_dp *intel_dp)
4296 {
4297         struct intel_encoder *encoder =
4298                 &dp_to_dig_port(intel_dp)->base;
4299         bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4300
4301         DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4302                       port_name(encoder->port), yesno(intel_dp->can_mst),
4303                       yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4304
4305         if (!intel_dp->can_mst)
4306                 return;
4307
4308         intel_dp->is_mst = sink_can_mst &&
4309                 i915_modparams.enable_dp_mst;
4310
4311         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4312                                         intel_dp->is_mst);
4313 }
4314
4315 static bool
4316 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4317 {
4318         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4319                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4320                 DP_DPRX_ESI_LEN;
4321 }
4322
4323 u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
4324                                 int mode_clock, int mode_hdisplay)
4325 {
4326         u16 bits_per_pixel, max_bpp_small_joiner_ram;
4327         int i;
4328
4329         /*
4330          * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4331          * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4332          * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4333          * for MST -> TimeSlotsPerMTP has to be calculated
4334          */
4335         bits_per_pixel = (link_clock * lane_count * 8 *
4336                           DP_DSC_FEC_OVERHEAD_FACTOR) /
4337                 mode_clock;
4338
4339         /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4340         max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4341                 mode_hdisplay;
4342
4343         /*
4344          * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4345          * check, output bpp from small joiner RAM check)
4346          */
4347         bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4348
4349         /* Error out if the max bpp is less than smallest allowed valid bpp */
4350         if (bits_per_pixel < valid_dsc_bpp[0]) {
4351                 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4352                 return 0;
4353         }
4354
4355         /* Find the nearest match in the array of known BPPs from VESA */
4356         for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4357                 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4358                         break;
4359         }
4360         bits_per_pixel = valid_dsc_bpp[i];
4361
4362         /*
4363          * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4364          * fractional part is 0
4365          */
4366         return bits_per_pixel << 4;
4367 }
4368
4369 u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4370                                 int mode_clock,
4371                                 int mode_hdisplay)
4372 {
4373         u8 min_slice_count, i;
4374         int max_slice_width;
4375
4376         if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4377                 min_slice_count = DIV_ROUND_UP(mode_clock,
4378                                                DP_DSC_MAX_ENC_THROUGHPUT_0);
4379         else
4380                 min_slice_count = DIV_ROUND_UP(mode_clock,
4381                                                DP_DSC_MAX_ENC_THROUGHPUT_1);
4382
4383         max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4384         if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4385                 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4386                               max_slice_width);
4387                 return 0;
4388         }
4389         /* Also take into account max slice width */
4390         min_slice_count = min_t(u8, min_slice_count,
4391                                 DIV_ROUND_UP(mode_hdisplay,
4392                                              max_slice_width));
4393
4394         /* Find the closest match to the valid slice count values */
4395         for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4396                 if (valid_dsc_slicecount[i] >
4397                     drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4398                                                     false))
4399                         break;
4400                 if (min_slice_count  <= valid_dsc_slicecount[i])
4401                         return valid_dsc_slicecount[i];
4402         }
4403
4404         DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4405         return 0;
4406 }
4407
4408 static void
4409 intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4410                                const struct intel_crtc_state *crtc_state)
4411 {
4412         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4413         struct dp_sdp vsc_sdp = {};
4414
4415         /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4416         vsc_sdp.sdp_header.HB0 = 0;
4417         vsc_sdp.sdp_header.HB1 = 0x7;
4418
4419         /*
4420          * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4421          * Colorimetry Format indication.
4422          */
4423         vsc_sdp.sdp_header.HB2 = 0x5;
4424
4425         /*
4426          * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4427          * Colorimetry Format indication (HB2 = 05h).
4428          */
4429         vsc_sdp.sdp_header.HB3 = 0x13;
4430
4431         /*
4432          * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
4433          * DB16[3:0] DP 1.4a spec, Table 2-120
4434          */
4435         vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
4436         /* RGB->YCBCR color conversion uses the BT.709 color space. */
4437         vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4438
4439         /*
4440          * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4441          * the following Component Bit Depth values are defined:
4442          * 001b = 8bpc.
4443          * 010b = 10bpc.
4444          * 011b = 12bpc.
4445          * 100b = 16bpc.
4446          */
4447         switch (crtc_state->pipe_bpp) {
4448         case 24: /* 8bpc */
4449                 vsc_sdp.db[17] = 0x1;
4450                 break;
4451         case 30: /* 10bpc */
4452                 vsc_sdp.db[17] = 0x2;
4453                 break;
4454         case 36: /* 12bpc */
4455                 vsc_sdp.db[17] = 0x3;
4456                 break;
4457         case 48: /* 16bpc */
4458                 vsc_sdp.db[17] = 0x4;
4459                 break;
4460         default:
4461                 MISSING_CASE(crtc_state->pipe_bpp);
4462                 break;
4463         }
4464
4465         /*
4466          * Dynamic Range (Bit 7)
4467          * 0 = VESA range, 1 = CTA range.
4468          * all YCbCr are always limited range
4469          */
4470         vsc_sdp.db[17] |= 0x80;
4471
4472         /*
4473          * Content Type (Bits 2:0)
4474          * 000b = Not defined.
4475          * 001b = Graphics.
4476          * 010b = Photo.
4477          * 011b = Video.
4478          * 100b = Game
4479          * All other values are RESERVED.
4480          * Note: See CTA-861-G for the definition and expected
4481          * processing by a stream sink for the above contect types.
4482          */
4483         vsc_sdp.db[18] = 0;
4484
4485         intel_dig_port->write_infoframe(&intel_dig_port->base,
4486                         crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4487 }
4488
4489 void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
4490                                const struct intel_crtc_state *crtc_state)
4491 {
4492         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
4493                 return;
4494
4495         intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
4496 }
4497
4498 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4499 {
4500         int status = 0;
4501         int test_link_rate;
4502         u8 test_lane_count, test_link_bw;
4503         /* (DP CTS 1.2)
4504          * 4.3.1.11
4505          */
4506         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4507         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4508                                    &test_lane_count);
4509
4510         if (status <= 0) {
4511                 DRM_DEBUG_KMS("Lane count read failed\n");
4512                 return DP_TEST_NAK;
4513         }
4514         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4515
4516         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4517                                    &test_link_bw);
4518         if (status <= 0) {
4519                 DRM_DEBUG_KMS("Link Rate read failed\n");
4520                 return DP_TEST_NAK;
4521         }
4522         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4523
4524         /* Validate the requested link rate and lane count */
4525         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4526                                         test_lane_count))
4527                 return DP_TEST_NAK;
4528
4529         intel_dp->compliance.test_lane_count = test_lane_count;
4530         intel_dp->compliance.test_link_rate = test_link_rate;
4531
4532         return DP_TEST_ACK;
4533 }
4534
4535 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4536 {
4537         u8 test_pattern;
4538         u8 test_misc;
4539         __be16 h_width, v_height;
4540         int status = 0;
4541
4542         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4543         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4544                                    &test_pattern);
4545         if (status <= 0) {
4546                 DRM_DEBUG_KMS("Test pattern read failed\n");
4547                 return DP_TEST_NAK;
4548         }
4549         if (test_pattern != DP_COLOR_RAMP)
4550                 return DP_TEST_NAK;
4551
4552         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4553                                   &h_width, 2);
4554         if (status <= 0) {
4555                 DRM_DEBUG_KMS("H Width read failed\n");
4556                 return DP_TEST_NAK;
4557         }
4558
4559         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4560                                   &v_height, 2);
4561         if (status <= 0) {
4562                 DRM_DEBUG_KMS("V Height read failed\n");
4563                 return DP_TEST_NAK;
4564         }
4565
4566         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4567                                    &test_misc);
4568         if (status <= 0) {
4569                 DRM_DEBUG_KMS("TEST MISC read failed\n");
4570                 return DP_TEST_NAK;
4571         }
4572         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4573                 return DP_TEST_NAK;
4574         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4575                 return DP_TEST_NAK;
4576         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4577         case DP_TEST_BIT_DEPTH_6:
4578                 intel_dp->compliance.test_data.bpc = 6;
4579                 break;
4580         case DP_TEST_BIT_DEPTH_8:
4581                 intel_dp->compliance.test_data.bpc = 8;
4582                 break;
4583         default:
4584                 return DP_TEST_NAK;
4585         }
4586
4587         intel_dp->compliance.test_data.video_pattern = test_pattern;
4588         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4589         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4590         /* Set test active flag here so userspace doesn't interrupt things */
4591         intel_dp->compliance.test_active = 1;
4592
4593         return DP_TEST_ACK;
4594 }
4595
4596 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4597 {
4598         u8 test_result = DP_TEST_ACK;
4599         struct intel_connector *intel_connector = intel_dp->attached_connector;
4600         struct drm_connector *connector = &intel_connector->base;
4601
4602         if (intel_connector->detect_edid == NULL ||
4603             connector->edid_corrupt ||
4604             intel_dp->aux.i2c_defer_count > 6) {
4605                 /* Check EDID read for NACKs, DEFERs and corruption
4606                  * (DP CTS 1.2 Core r1.1)
4607                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4608                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4609                  *    4.2.2.6 : EDID corruption detected
4610                  * Use failsafe mode for all cases
4611                  */
4612                 if (intel_dp->aux.i2c_nack_count > 0 ||
4613                         intel_dp->aux.i2c_defer_count > 0)
4614                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4615                                       intel_dp->aux.i2c_nack_count,
4616                                       intel_dp->aux.i2c_defer_count);
4617                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4618         } else {
4619                 struct edid *block = intel_connector->detect_edid;
4620
4621                 /* We have to write the checksum
4622                  * of the last block read
4623                  */
4624                 block += intel_connector->detect_edid->extensions;
4625
4626                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4627                                        block->checksum) <= 0)
4628                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4629
4630                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4631                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4632         }
4633
4634         /* Set test active flag here so userspace doesn't interrupt things */
4635         intel_dp->compliance.test_active = 1;
4636
4637         return test_result;
4638 }
4639
4640 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4641 {
4642         u8 test_result = DP_TEST_NAK;
4643         return test_result;
4644 }
4645
4646 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4647 {
4648         u8 response = DP_TEST_NAK;
4649         u8 request = 0;
4650         int status;
4651
4652         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4653         if (status <= 0) {
4654                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4655                 goto update_status;
4656         }
4657
4658         switch (request) {
4659         case DP_TEST_LINK_TRAINING:
4660                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4661                 response = intel_dp_autotest_link_training(intel_dp);
4662                 break;
4663         case DP_TEST_LINK_VIDEO_PATTERN:
4664                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4665                 response = intel_dp_autotest_video_pattern(intel_dp);
4666                 break;
4667         case DP_TEST_LINK_EDID_READ:
4668                 DRM_DEBUG_KMS("EDID test requested\n");
4669                 response = intel_dp_autotest_edid(intel_dp);
4670                 break;
4671         case DP_TEST_LINK_PHY_TEST_PATTERN:
4672                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4673                 response = intel_dp_autotest_phy_pattern(intel_dp);
4674                 break;
4675         default:
4676                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4677                 break;
4678         }
4679
4680         if (response & DP_TEST_ACK)
4681                 intel_dp->compliance.test_type = request;
4682
4683 update_status:
4684         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4685         if (status <= 0)
4686                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4687 }
4688
4689 static int
4690 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4691 {
4692         bool bret;
4693
4694         if (intel_dp->is_mst) {
4695                 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4696                 int ret = 0;
4697                 int retry;
4698                 bool handled;
4699
4700                 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4701                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4702 go_again:
4703                 if (bret == true) {
4704
4705                         /* check link status - esi[10] = 0x200c */
4706                         if (intel_dp->active_mst_links > 0 &&
4707                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4708                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4709                                 intel_dp_start_link_train(intel_dp);
4710                                 intel_dp_stop_link_train(intel_dp);
4711                         }
4712
4713                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4714                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4715
4716                         if (handled) {
4717                                 for (retry = 0; retry < 3; retry++) {
4718                                         int wret;
4719                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4720                                                                  DP_SINK_COUNT_ESI+1,
4721                                                                  &esi[1], 3);
4722                                         if (wret == 3) {
4723                                                 break;
4724                                         }
4725                                 }
4726
4727                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4728                                 if (bret == true) {
4729                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4730                                         goto go_again;
4731                                 }
4732                         } else
4733                                 ret = 0;
4734
4735                         return ret;
4736                 } else {
4737                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4738                         intel_dp->is_mst = false;
4739                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4740                                                         intel_dp->is_mst);
4741                 }
4742         }
4743         return -EINVAL;
4744 }
4745
4746 static bool
4747 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4748 {
4749         u8 link_status[DP_LINK_STATUS_SIZE];
4750
4751         if (!intel_dp->link_trained)
4752                 return false;
4753
4754         /*
4755          * While PSR source HW is enabled, it will control main-link sending
4756          * frames, enabling and disabling it so trying to do a retrain will fail
4757          * as the link would or not be on or it could mix training patterns
4758          * and frame data at the same time causing retrain to fail.
4759          * Also when exiting PSR, HW will retrain the link anyways fixing
4760          * any link status error.
4761          */
4762         if (intel_psr_enabled(intel_dp))
4763                 return false;
4764
4765         if (!intel_dp_get_link_status(intel_dp, link_status))
4766                 return false;
4767
4768         /*
4769          * Validate the cached values of intel_dp->link_rate and
4770          * intel_dp->lane_count before attempting to retrain.
4771          */
4772         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4773                                         intel_dp->lane_count))
4774                 return false;
4775
4776         /* Retrain if Channel EQ or CR not ok */
4777         return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4778 }
4779
4780 int intel_dp_retrain_link(struct intel_encoder *encoder,
4781                           struct drm_modeset_acquire_ctx *ctx)
4782 {
4783         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4784         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4785         struct intel_connector *connector = intel_dp->attached_connector;
4786         struct drm_connector_state *conn_state;
4787         struct intel_crtc_state *crtc_state;
4788         struct intel_crtc *crtc;
4789         int ret;
4790
4791         /* FIXME handle the MST connectors as well */
4792
4793         if (!connector || connector->base.status != connector_status_connected)
4794                 return 0;
4795
4796         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4797                                ctx);
4798         if (ret)
4799                 return ret;
4800
4801         conn_state = connector->base.state;
4802
4803         crtc = to_intel_crtc(conn_state->crtc);
4804         if (!crtc)
4805                 return 0;
4806
4807         ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4808         if (ret)
4809                 return ret;
4810
4811         crtc_state = to_intel_crtc_state(crtc->base.state);
4812
4813         WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4814
4815         if (!crtc_state->base.active)
4816                 return 0;
4817
4818         if (conn_state->commit &&
4819             !try_wait_for_completion(&conn_state->commit->hw_done))
4820                 return 0;
4821
4822         if (!intel_dp_needs_link_retrain(intel_dp))
4823                 return 0;
4824
4825         /* Suppress underruns caused by re-training */
4826         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4827         if (crtc_state->has_pch_encoder)
4828                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4829                                                       intel_crtc_pch_transcoder(crtc), false);
4830
4831         intel_dp_start_link_train(intel_dp);
4832         intel_dp_stop_link_train(intel_dp);
4833
4834         /* Keep underrun reporting disabled until things are stable */
4835         intel_wait_for_vblank(dev_priv, crtc->pipe);
4836
4837         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4838         if (crtc_state->has_pch_encoder)
4839                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4840                                                       intel_crtc_pch_transcoder(crtc), true);
4841
4842         return 0;
4843 }
4844
4845 /*
4846  * If display is now connected check links status,
4847  * there has been known issues of link loss triggering
4848  * long pulse.
4849  *
4850  * Some sinks (eg. ASUS PB287Q) seem to perform some
4851  * weird HPD ping pong during modesets. So we can apparently
4852  * end up with HPD going low during a modeset, and then
4853  * going back up soon after. And once that happens we must
4854  * retrain the link to get a picture. That's in case no
4855  * userspace component reacted to intermittent HPD dip.
4856  */
4857 static bool intel_dp_hotplug(struct intel_encoder *encoder,
4858                              struct intel_connector *connector)
4859 {
4860         struct drm_modeset_acquire_ctx ctx;
4861         bool changed;
4862         int ret;
4863
4864         changed = intel_encoder_hotplug(encoder, connector);
4865
4866         drm_modeset_acquire_init(&ctx, 0);
4867
4868         for (;;) {
4869                 ret = intel_dp_retrain_link(encoder, &ctx);
4870
4871                 if (ret == -EDEADLK) {
4872                         drm_modeset_backoff(&ctx);
4873                         continue;
4874                 }
4875
4876                 break;
4877         }
4878
4879         drm_modeset_drop_locks(&ctx);
4880         drm_modeset_acquire_fini(&ctx);
4881         WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4882
4883         return changed;
4884 }
4885
4886 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4887 {
4888         u8 val;
4889
4890         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4891                 return;
4892
4893         if (drm_dp_dpcd_readb(&intel_dp->aux,
4894                               DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4895                 return;
4896
4897         drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4898
4899         if (val & DP_AUTOMATED_TEST_REQUEST)
4900                 intel_dp_handle_test_request(intel_dp);
4901
4902         if (val & DP_CP_IRQ)
4903                 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4904
4905         if (val & DP_SINK_SPECIFIC_IRQ)
4906                 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4907 }
4908
4909 /*
4910  * According to DP spec
4911  * 5.1.2:
4912  *  1. Read DPCD
4913  *  2. Configure link according to Receiver Capabilities
4914  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4915  *  4. Check link status on receipt of hot-plug interrupt
4916  *
4917  * intel_dp_short_pulse -  handles short pulse interrupts
4918  * when full detection is not required.
4919  * Returns %true if short pulse is handled and full detection
4920  * is NOT required and %false otherwise.
4921  */
4922 static bool
4923 intel_dp_short_pulse(struct intel_dp *intel_dp)
4924 {
4925         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4926         u8 old_sink_count = intel_dp->sink_count;
4927         bool ret;
4928
4929         /*
4930          * Clearing compliance test variables to allow capturing
4931          * of values for next automated test request.
4932          */
4933         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4934
4935         /*
4936          * Now read the DPCD to see if it's actually running
4937          * If the current value of sink count doesn't match with
4938          * the value that was stored earlier or dpcd read failed
4939          * we need to do full detection
4940          */
4941         ret = intel_dp_get_dpcd(intel_dp);
4942
4943         if ((old_sink_count != intel_dp->sink_count) || !ret) {
4944                 /* No need to proceed if we are going to do full detect */
4945                 return false;
4946         }
4947
4948         intel_dp_check_service_irq(intel_dp);
4949
4950         /* Handle CEC interrupts, if any */
4951         drm_dp_cec_irq(&intel_dp->aux);
4952
4953         /* defer to the hotplug work for link retraining if needed */
4954         if (intel_dp_needs_link_retrain(intel_dp))
4955                 return false;
4956
4957         intel_psr_short_pulse(intel_dp);
4958
4959         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4960                 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4961                 /* Send a Hotplug Uevent to userspace to start modeset */
4962                 drm_kms_helper_hotplug_event(&dev_priv->drm);
4963         }
4964
4965         return true;
4966 }
4967
4968 /* XXX this is probably wrong for multiple downstream ports */
4969 static enum drm_connector_status
4970 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4971 {
4972         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4973         u8 *dpcd = intel_dp->dpcd;
4974         u8 type;
4975
4976         if (WARN_ON(intel_dp_is_edp(intel_dp)))
4977                 return connector_status_connected;
4978
4979         if (lspcon->active)
4980                 lspcon_resume(lspcon);
4981
4982         if (!intel_dp_get_dpcd(intel_dp))
4983                 return connector_status_disconnected;
4984
4985         /* if there's no downstream port, we're done */
4986         if (!drm_dp_is_branch(dpcd))
4987                 return connector_status_connected;
4988
4989         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4990         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4991             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4992
4993                 return intel_dp->sink_count ?
4994                 connector_status_connected : connector_status_disconnected;
4995         }
4996
4997         if (intel_dp_can_mst(intel_dp))
4998                 return connector_status_connected;
4999
5000         /* If no HPD, poke DDC gently */
5001         if (drm_probe_ddc(&intel_dp->aux.ddc))
5002                 return connector_status_connected;
5003
5004         /* Well we tried, say unknown for unreliable port types */
5005         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5006                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5007                 if (type == DP_DS_PORT_TYPE_VGA ||
5008                     type == DP_DS_PORT_TYPE_NON_EDID)
5009                         return connector_status_unknown;
5010         } else {
5011                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5012                         DP_DWN_STRM_PORT_TYPE_MASK;
5013                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5014                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
5015                         return connector_status_unknown;
5016         }
5017
5018         /* Anything else is out of spec, warn and ignore */
5019         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
5020         return connector_status_disconnected;
5021 }
5022
5023 static enum drm_connector_status
5024 edp_detect(struct intel_dp *intel_dp)
5025 {
5026         return connector_status_connected;
5027 }
5028
5029 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5030 {
5031         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5032         u32 bit;
5033
5034         switch (encoder->hpd_pin) {
5035         case HPD_PORT_B:
5036                 bit = SDE_PORTB_HOTPLUG;
5037                 break;
5038         case HPD_PORT_C:
5039                 bit = SDE_PORTC_HOTPLUG;
5040                 break;
5041         case HPD_PORT_D:
5042                 bit = SDE_PORTD_HOTPLUG;
5043                 break;
5044         default:
5045                 MISSING_CASE(encoder->hpd_pin);
5046                 return false;
5047         }
5048
5049         return I915_READ(SDEISR) & bit;
5050 }
5051
5052 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
5053 {
5054         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5055         u32 bit;
5056
5057         switch (encoder->hpd_pin) {
5058         case HPD_PORT_B:
5059                 bit = SDE_PORTB_HOTPLUG_CPT;
5060                 break;
5061         case HPD_PORT_C:
5062                 bit = SDE_PORTC_HOTPLUG_CPT;
5063                 break;
5064         case HPD_PORT_D:
5065                 bit = SDE_PORTD_HOTPLUG_CPT;
5066                 break;
5067         default:
5068                 MISSING_CASE(encoder->hpd_pin);
5069                 return false;
5070         }
5071
5072         return I915_READ(SDEISR) & bit;
5073 }
5074
5075 static bool spt_digital_port_connected(struct intel_encoder *encoder)
5076 {
5077         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5078         u32 bit;
5079
5080         switch (encoder->hpd_pin) {
5081         case HPD_PORT_A:
5082                 bit = SDE_PORTA_HOTPLUG_SPT;
5083                 break;
5084         case HPD_PORT_E:
5085                 bit = SDE_PORTE_HOTPLUG_SPT;
5086                 break;
5087         default:
5088                 return cpt_digital_port_connected(encoder);
5089         }
5090
5091         return I915_READ(SDEISR) & bit;
5092 }
5093
5094 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5095 {
5096         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5097         u32 bit;
5098
5099         switch (encoder->hpd_pin) {
5100         case HPD_PORT_B:
5101                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5102                 break;
5103         case HPD_PORT_C:
5104                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5105                 break;
5106         case HPD_PORT_D:
5107                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5108                 break;
5109         default:
5110                 MISSING_CASE(encoder->hpd_pin);
5111                 return false;
5112         }
5113
5114         return I915_READ(PORT_HOTPLUG_STAT) & bit;
5115 }
5116
5117 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5118 {
5119         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5120         u32 bit;
5121
5122         switch (encoder->hpd_pin) {
5123         case HPD_PORT_B:
5124                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5125                 break;
5126         case HPD_PORT_C:
5127                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5128                 break;
5129         case HPD_PORT_D:
5130                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5131                 break;
5132         default:
5133                 MISSING_CASE(encoder->hpd_pin);
5134                 return false;
5135         }
5136
5137         return I915_READ(PORT_HOTPLUG_STAT) & bit;
5138 }
5139
5140 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5141 {
5142         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5143
5144         if (encoder->hpd_pin == HPD_PORT_A)
5145                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5146         else
5147                 return ibx_digital_port_connected(encoder);
5148 }
5149
5150 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5151 {
5152         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5153
5154         if (encoder->hpd_pin == HPD_PORT_A)
5155                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5156         else
5157                 return cpt_digital_port_connected(encoder);
5158 }
5159
5160 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5161 {
5162         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5163
5164         if (encoder->hpd_pin == HPD_PORT_A)
5165                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5166         else
5167                 return cpt_digital_port_connected(encoder);
5168 }
5169
5170 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5171 {
5172         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5173
5174         if (encoder->hpd_pin == HPD_PORT_A)
5175                 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5176         else
5177                 return cpt_digital_port_connected(encoder);
5178 }
5179
5180 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5181 {
5182         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5183         u32 bit;
5184
5185         switch (encoder->hpd_pin) {
5186         case HPD_PORT_A:
5187                 bit = BXT_DE_PORT_HP_DDIA;
5188                 break;
5189         case HPD_PORT_B:
5190                 bit = BXT_DE_PORT_HP_DDIB;
5191                 break;
5192         case HPD_PORT_C:
5193                 bit = BXT_DE_PORT_HP_DDIC;
5194                 break;
5195         default:
5196                 MISSING_CASE(encoder->hpd_pin);
5197                 return false;
5198         }
5199
5200         return I915_READ(GEN8_DE_PORT_ISR) & bit;
5201 }
5202
5203 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5204                                      struct intel_digital_port *intel_dig_port)
5205 {
5206         enum port port = intel_dig_port->base.port;
5207
5208         return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5209 }
5210
5211 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5212 {
5213         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5214         struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5215         enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
5216
5217         if (intel_phy_is_combo(dev_priv, phy))
5218                 return icl_combo_port_connected(dev_priv, dig_port);
5219         else if (intel_phy_is_tc(dev_priv, phy))
5220                 return intel_tc_port_connected(dig_port);
5221         else
5222                 MISSING_CASE(encoder->hpd_pin);
5223
5224         return false;
5225 }
5226
5227 /*
5228  * intel_digital_port_connected - is the specified port connected?
5229  * @encoder: intel_encoder
5230  *
5231  * In cases where there's a connector physically connected but it can't be used
5232  * by our hardware we also return false, since the rest of the driver should
5233  * pretty much treat the port as disconnected. This is relevant for type-C
5234  * (starting on ICL) where there's ownership involved.
5235  *
5236  * Return %true if port is connected, %false otherwise.
5237  */
5238 static bool __intel_digital_port_connected(struct intel_encoder *encoder)
5239 {
5240         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5241
5242         if (HAS_GMCH(dev_priv)) {
5243                 if (IS_GM45(dev_priv))
5244                         return gm45_digital_port_connected(encoder);
5245                 else
5246                         return g4x_digital_port_connected(encoder);
5247         }
5248
5249         if (INTEL_GEN(dev_priv) >= 11)
5250                 return icl_digital_port_connected(encoder);
5251         else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5252                 return spt_digital_port_connected(encoder);
5253         else if (IS_GEN9_LP(dev_priv))
5254                 return bxt_digital_port_connected(encoder);
5255         else if (IS_GEN(dev_priv, 8))
5256                 return bdw_digital_port_connected(encoder);
5257         else if (IS_GEN(dev_priv, 7))
5258                 return ivb_digital_port_connected(encoder);
5259         else if (IS_GEN(dev_priv, 6))
5260                 return snb_digital_port_connected(encoder);
5261         else if (IS_GEN(dev_priv, 5))
5262                 return ilk_digital_port_connected(encoder);
5263
5264         MISSING_CASE(INTEL_GEN(dev_priv));
5265         return false;
5266 }
5267
5268 bool intel_digital_port_connected(struct intel_encoder *encoder)
5269 {
5270         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5271         bool is_connected = false;
5272         intel_wakeref_t wakeref;
5273
5274         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5275                 is_connected = __intel_digital_port_connected(encoder);
5276
5277         return is_connected;
5278 }
5279
5280 static struct edid *
5281 intel_dp_get_edid(struct intel_dp *intel_dp)
5282 {
5283         struct intel_connector *intel_connector = intel_dp->attached_connector;
5284
5285         /* use cached edid if we have one */
5286         if (intel_connector->edid) {
5287                 /* invalid edid */
5288                 if (IS_ERR(intel_connector->edid))
5289                         return NULL;
5290
5291                 return drm_edid_duplicate(intel_connector->edid);
5292         } else
5293                 return drm_get_edid(&intel_connector->base,
5294                                     &intel_dp->aux.ddc);
5295 }
5296
5297 static void
5298 intel_dp_set_edid(struct intel_dp *intel_dp)
5299 {
5300         struct intel_connector *intel_connector = intel_dp->attached_connector;
5301         struct edid *edid;
5302
5303         intel_dp_unset_edid(intel_dp);
5304         edid = intel_dp_get_edid(intel_dp);
5305         intel_connector->detect_edid = edid;
5306
5307         intel_dp->has_audio = drm_detect_monitor_audio(edid);
5308         drm_dp_cec_set_edid(&intel_dp->aux, edid);
5309 }
5310
5311 static void
5312 intel_dp_unset_edid(struct intel_dp *intel_dp)
5313 {
5314         struct intel_connector *intel_connector = intel_dp->attached_connector;
5315
5316         drm_dp_cec_unset_edid(&intel_dp->aux);
5317         kfree(intel_connector->detect_edid);
5318         intel_connector->detect_edid = NULL;
5319
5320         intel_dp->has_audio = false;
5321 }
5322
5323 static int
5324 intel_dp_detect(struct drm_connector *connector,
5325                 struct drm_modeset_acquire_ctx *ctx,
5326                 bool force)
5327 {
5328         struct drm_i915_private *dev_priv = to_i915(connector->dev);
5329         struct intel_dp *intel_dp = intel_attached_dp(connector);
5330         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5331         struct intel_encoder *encoder = &dig_port->base;
5332         enum drm_connector_status status;
5333
5334         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5335                       connector->base.id, connector->name);
5336         WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5337
5338         /* Can't disconnect eDP */
5339         if (intel_dp_is_edp(intel_dp))
5340                 status = edp_detect(intel_dp);
5341         else if (intel_digital_port_connected(encoder))
5342                 status = intel_dp_detect_dpcd(intel_dp);
5343         else
5344                 status = connector_status_disconnected;
5345
5346         if (status == connector_status_disconnected) {
5347                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5348                 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5349
5350                 if (intel_dp->is_mst) {
5351                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5352                                       intel_dp->is_mst,
5353                                       intel_dp->mst_mgr.mst_state);
5354                         intel_dp->is_mst = false;
5355                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5356                                                         intel_dp->is_mst);
5357                 }
5358
5359                 goto out;
5360         }
5361
5362         if (intel_dp->reset_link_params) {
5363                 /* Initial max link lane count */
5364                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5365
5366                 /* Initial max link rate */
5367                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5368
5369                 intel_dp->reset_link_params = false;
5370         }
5371
5372         intel_dp_print_rates(intel_dp);
5373
5374         /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5375         if (INTEL_GEN(dev_priv) >= 11)
5376                 intel_dp_get_dsc_sink_cap(intel_dp);
5377
5378         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5379                          drm_dp_is_branch(intel_dp->dpcd));
5380
5381         intel_dp_configure_mst(intel_dp);
5382
5383         if (intel_dp->is_mst) {
5384                 /*
5385                  * If we are in MST mode then this connector
5386                  * won't appear connected or have anything
5387                  * with EDID on it
5388                  */
5389                 status = connector_status_disconnected;
5390                 goto out;
5391         }
5392
5393         /*
5394          * Some external monitors do not signal loss of link synchronization
5395          * with an IRQ_HPD, so force a link status check.
5396          */
5397         if (!intel_dp_is_edp(intel_dp)) {
5398                 int ret;
5399
5400                 ret = intel_dp_retrain_link(encoder, ctx);
5401                 if (ret)
5402                         return ret;
5403         }
5404
5405         /*
5406          * Clearing NACK and defer counts to get their exact values
5407          * while reading EDID which are required by Compliance tests
5408          * 4.2.2.4 and 4.2.2.5
5409          */
5410         intel_dp->aux.i2c_nack_count = 0;
5411         intel_dp->aux.i2c_defer_count = 0;
5412
5413         intel_dp_set_edid(intel_dp);
5414         if (intel_dp_is_edp(intel_dp) ||
5415             to_intel_connector(connector)->detect_edid)
5416                 status = connector_status_connected;
5417
5418         intel_dp_check_service_irq(intel_dp);
5419
5420 out:
5421         if (status != connector_status_connected && !intel_dp->is_mst)
5422                 intel_dp_unset_edid(intel_dp);
5423
5424         return status;
5425 }
5426
5427 static void
5428 intel_dp_force(struct drm_connector *connector)
5429 {
5430         struct intel_dp *intel_dp = intel_attached_dp(connector);
5431         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5432         struct intel_encoder *intel_encoder = &dig_port->base;
5433         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5434         enum intel_display_power_domain aux_domain =
5435                 intel_aux_power_domain(dig_port);
5436         intel_wakeref_t wakeref;
5437
5438         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5439                       connector->base.id, connector->name);
5440         intel_dp_unset_edid(intel_dp);
5441
5442         if (connector->status != connector_status_connected)
5443                 return;
5444
5445         wakeref = intel_display_power_get(dev_priv, aux_domain);
5446
5447         intel_dp_set_edid(intel_dp);
5448
5449         intel_display_power_put(dev_priv, aux_domain, wakeref);
5450 }
5451
5452 static int intel_dp_get_modes(struct drm_connector *connector)
5453 {
5454         struct intel_connector *intel_connector = to_intel_connector(connector);
5455         struct edid *edid;
5456
5457         edid = intel_connector->detect_edid;
5458         if (edid) {
5459                 int ret = intel_connector_update_modes(connector, edid);
5460                 if (ret)
5461                         return ret;
5462         }
5463
5464         /* if eDP has no EDID, fall back to fixed mode */
5465         if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5466             intel_connector->panel.fixed_mode) {
5467                 struct drm_display_mode *mode;
5468
5469                 mode = drm_mode_duplicate(connector->dev,
5470                                           intel_connector->panel.fixed_mode);
5471                 if (mode) {
5472                         drm_mode_probed_add(connector, mode);
5473                         return 1;
5474                 }
5475         }
5476
5477         return 0;
5478 }
5479
5480 static int
5481 intel_dp_connector_register(struct drm_connector *connector)
5482 {
5483         struct intel_dp *intel_dp = intel_attached_dp(connector);
5484         struct drm_device *dev = connector->dev;
5485         int ret;
5486
5487         ret = intel_connector_register(connector);
5488         if (ret)
5489                 return ret;
5490
5491         i915_debugfs_connector_add(connector);
5492
5493         DRM_DEBUG_KMS("registering %s bus for %s\n",
5494                       intel_dp->aux.name, connector->kdev->kobj.name);
5495
5496         intel_dp->aux.dev = connector->kdev;
5497         ret = drm_dp_aux_register(&intel_dp->aux);
5498         if (!ret)
5499                 drm_dp_cec_register_connector(&intel_dp->aux,
5500                                               connector->name, dev->dev);
5501         return ret;
5502 }
5503
5504 static void
5505 intel_dp_connector_unregister(struct drm_connector *connector)
5506 {
5507         struct intel_dp *intel_dp = intel_attached_dp(connector);
5508
5509         drm_dp_cec_unregister_connector(&intel_dp->aux);
5510         drm_dp_aux_unregister(&intel_dp->aux);
5511         intel_connector_unregister(connector);
5512 }
5513
5514 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5515 {
5516         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5517         struct intel_dp *intel_dp = &intel_dig_port->dp;
5518
5519         intel_dp_mst_encoder_cleanup(intel_dig_port);
5520         if (intel_dp_is_edp(intel_dp)) {
5521                 intel_wakeref_t wakeref;
5522
5523                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5524                 /*
5525                  * vdd might still be enabled do to the delayed vdd off.
5526                  * Make sure vdd is actually turned off here.
5527                  */
5528                 with_pps_lock(intel_dp, wakeref)
5529                         edp_panel_vdd_off_sync(intel_dp);
5530
5531                 if (intel_dp->edp_notifier.notifier_call) {
5532                         unregister_reboot_notifier(&intel_dp->edp_notifier);
5533                         intel_dp->edp_notifier.notifier_call = NULL;
5534                 }
5535         }
5536
5537         intel_dp_aux_fini(intel_dp);
5538 }
5539
5540 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5541 {
5542         intel_dp_encoder_flush_work(encoder);
5543
5544         drm_encoder_cleanup(encoder);
5545         kfree(enc_to_dig_port(encoder));
5546 }
5547
5548 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5549 {
5550         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5551         intel_wakeref_t wakeref;
5552
5553         if (!intel_dp_is_edp(intel_dp))
5554                 return;
5555
5556         /*
5557          * vdd might still be enabled do to the delayed vdd off.
5558          * Make sure vdd is actually turned off here.
5559          */
5560         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5561         with_pps_lock(intel_dp, wakeref)
5562                 edp_panel_vdd_off_sync(intel_dp);
5563 }
5564
5565 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5566 {
5567         long ret;
5568
5569 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5570         ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5571                                                msecs_to_jiffies(timeout));
5572
5573         if (!ret)
5574                 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5575 }
5576
5577 static
5578 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5579                                 u8 *an)
5580 {
5581         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5582         static const struct drm_dp_aux_msg msg = {
5583                 .request = DP_AUX_NATIVE_WRITE,
5584                 .address = DP_AUX_HDCP_AKSV,
5585                 .size = DRM_HDCP_KSV_LEN,
5586         };
5587         u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5588         ssize_t dpcd_ret;
5589         int ret;
5590
5591         /* Output An first, that's easy */
5592         dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5593                                      an, DRM_HDCP_AN_LEN);
5594         if (dpcd_ret != DRM_HDCP_AN_LEN) {
5595                 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5596                               dpcd_ret);
5597                 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5598         }
5599
5600         /*
5601          * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5602          * order to get it on the wire, we need to create the AUX header as if
5603          * we were writing the data, and then tickle the hardware to output the
5604          * data once the header is sent out.
5605          */
5606         intel_dp_aux_header(txbuf, &msg);
5607
5608         ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5609                                 rxbuf, sizeof(rxbuf),
5610                                 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5611         if (ret < 0) {
5612                 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5613                 return ret;
5614         } else if (ret == 0) {
5615                 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5616                 return -EIO;
5617         }
5618
5619         reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5620         if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5621                 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5622                               reply);
5623                 return -EIO;
5624         }
5625         return 0;
5626 }
5627
5628 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5629                                    u8 *bksv)
5630 {
5631         ssize_t ret;
5632         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5633                                DRM_HDCP_KSV_LEN);
5634         if (ret != DRM_HDCP_KSV_LEN) {
5635                 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5636                 return ret >= 0 ? -EIO : ret;
5637         }
5638         return 0;
5639 }
5640
5641 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5642                                       u8 *bstatus)
5643 {
5644         ssize_t ret;
5645         /*
5646          * For some reason the HDMI and DP HDCP specs call this register
5647          * definition by different names. In the HDMI spec, it's called BSTATUS,
5648          * but in DP it's called BINFO.
5649          */
5650         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5651                                bstatus, DRM_HDCP_BSTATUS_LEN);
5652         if (ret != DRM_HDCP_BSTATUS_LEN) {
5653                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5654                 return ret >= 0 ? -EIO : ret;
5655         }
5656         return 0;
5657 }
5658
5659 static
5660 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5661                              u8 *bcaps)
5662 {
5663         ssize_t ret;
5664
5665         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5666                                bcaps, 1);
5667         if (ret != 1) {
5668                 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5669                 return ret >= 0 ? -EIO : ret;
5670         }
5671
5672         return 0;
5673 }
5674
5675 static
5676 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5677                                    bool *repeater_present)
5678 {
5679         ssize_t ret;
5680         u8 bcaps;
5681
5682         ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5683         if (ret)
5684                 return ret;
5685
5686         *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5687         return 0;
5688 }
5689
5690 static
5691 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5692                                 u8 *ri_prime)
5693 {
5694         ssize_t ret;
5695         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5696                                ri_prime, DRM_HDCP_RI_LEN);
5697         if (ret != DRM_HDCP_RI_LEN) {
5698                 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5699                 return ret >= 0 ? -EIO : ret;
5700         }
5701         return 0;
5702 }
5703
5704 static
5705 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5706                                  bool *ksv_ready)
5707 {
5708         ssize_t ret;
5709         u8 bstatus;
5710         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5711                                &bstatus, 1);
5712         if (ret != 1) {
5713                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5714                 return ret >= 0 ? -EIO : ret;
5715         }
5716         *ksv_ready = bstatus & DP_BSTATUS_READY;
5717         return 0;
5718 }
5719
5720 static
5721 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5722                                 int num_downstream, u8 *ksv_fifo)
5723 {
5724         ssize_t ret;
5725         int i;
5726
5727         /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5728         for (i = 0; i < num_downstream; i += 3) {
5729                 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5730                 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5731                                        DP_AUX_HDCP_KSV_FIFO,
5732                                        ksv_fifo + i * DRM_HDCP_KSV_LEN,
5733                                        len);
5734                 if (ret != len) {
5735                         DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5736                                       i, ret);
5737                         return ret >= 0 ? -EIO : ret;
5738                 }
5739         }
5740         return 0;
5741 }
5742
5743 static
5744 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5745                                     int i, u32 *part)
5746 {
5747         ssize_t ret;
5748
5749         if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5750                 return -EINVAL;
5751
5752         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5753                                DP_AUX_HDCP_V_PRIME(i), part,
5754                                DRM_HDCP_V_PRIME_PART_LEN);
5755         if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5756                 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5757                 return ret >= 0 ? -EIO : ret;
5758         }
5759         return 0;
5760 }
5761
5762 static
5763 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5764                                     bool enable)
5765 {
5766         /* Not used for single stream DisplayPort setups */
5767         return 0;
5768 }
5769
5770 static
5771 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5772 {
5773         ssize_t ret;
5774         u8 bstatus;
5775
5776         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5777                                &bstatus, 1);
5778         if (ret != 1) {
5779                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5780                 return false;
5781         }
5782
5783         return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5784 }
5785
5786 static
5787 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5788                           bool *hdcp_capable)
5789 {
5790         ssize_t ret;
5791         u8 bcaps;
5792
5793         ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5794         if (ret)
5795                 return ret;
5796
5797         *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5798         return 0;
5799 }
5800
5801 struct hdcp2_dp_errata_stream_type {
5802         u8      msg_id;
5803         u8      stream_type;
5804 } __packed;
5805
5806 static struct hdcp2_dp_msg_data {
5807         u8 msg_id;
5808         u32 offset;
5809         bool msg_detectable;
5810         u32 timeout;
5811         u32 timeout2; /* Added for non_paired situation */
5812         } hdcp2_msg_data[] = {
5813                 {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
5814                 {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
5815                                 false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
5816                 {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
5817                                 false, 0, 0},
5818                 {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
5819                                 false, 0, 0},
5820                 {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
5821                                 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
5822                                 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
5823                 {HDCP_2_2_AKE_SEND_PAIRING_INFO,
5824                                 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
5825                                 HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
5826                 {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
5827                 {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
5828                                 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
5829                 {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
5830                                 0, 0},
5831                 {HDCP_2_2_REP_SEND_RECVID_LIST,
5832                                 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
5833                                 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
5834                 {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
5835                                 0, 0},
5836                 {HDCP_2_2_REP_STREAM_MANAGE,
5837                                 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
5838                                 0, 0},
5839                 {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
5840                                 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
5841 /* local define to shovel this through the write_2_2 interface */
5842 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE  50
5843                 {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
5844                                 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
5845                                 0, 0},
5846                 };
5847
5848 static inline
5849 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
5850                                   u8 *rx_status)
5851 {
5852         ssize_t ret;
5853
5854         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5855                                DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
5856                                HDCP_2_2_DP_RXSTATUS_LEN);
5857         if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
5858                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5859                 return ret >= 0 ? -EIO : ret;
5860         }
5861
5862         return 0;
5863 }
5864
5865 static
5866 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
5867                                   u8 msg_id, bool *msg_ready)
5868 {
5869         u8 rx_status;
5870         int ret;
5871
5872         *msg_ready = false;
5873         ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
5874         if (ret < 0)
5875                 return ret;
5876
5877         switch (msg_id) {
5878         case HDCP_2_2_AKE_SEND_HPRIME:
5879                 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
5880                         *msg_ready = true;
5881                 break;
5882         case HDCP_2_2_AKE_SEND_PAIRING_INFO:
5883                 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
5884                         *msg_ready = true;
5885                 break;
5886         case HDCP_2_2_REP_SEND_RECVID_LIST:
5887                 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
5888                         *msg_ready = true;
5889                 break;
5890         default:
5891                 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
5892                 return -EINVAL;
5893         }
5894
5895         return 0;
5896 }
5897
5898 static ssize_t
5899 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
5900                             struct hdcp2_dp_msg_data *hdcp2_msg_data)
5901 {
5902         struct intel_dp *dp = &intel_dig_port->dp;
5903         struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5904         u8 msg_id = hdcp2_msg_data->msg_id;
5905         int ret, timeout;
5906         bool msg_ready = false;
5907
5908         if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
5909                 timeout = hdcp2_msg_data->timeout2;
5910         else
5911                 timeout = hdcp2_msg_data->timeout;
5912
5913         /*
5914          * There is no way to detect the CERT, LPRIME and STREAM_READY
5915          * availability. So Wait for timeout and read the msg.
5916          */
5917         if (!hdcp2_msg_data->msg_detectable) {
5918                 mdelay(timeout);
5919                 ret = 0;
5920         } else {
5921                 /*
5922                  * As we want to check the msg availability at timeout, Ignoring
5923                  * the timeout at wait for CP_IRQ.
5924                  */
5925                 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
5926                 ret = hdcp2_detect_msg_availability(intel_dig_port,
5927                                                     msg_id, &msg_ready);
5928                 if (!msg_ready)
5929                         ret = -ETIMEDOUT;
5930         }
5931
5932         if (ret)
5933                 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
5934                               hdcp2_msg_data->msg_id, ret, timeout);
5935
5936         return ret;
5937 }
5938
5939 static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
5940 {
5941         int i;
5942
5943         for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
5944                 if (hdcp2_msg_data[i].msg_id == msg_id)
5945                         return &hdcp2_msg_data[i];
5946
5947         return NULL;
5948 }
5949
5950 static
5951 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
5952                              void *buf, size_t size)
5953 {
5954         struct intel_dp *dp = &intel_dig_port->dp;
5955         struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5956         unsigned int offset;
5957         u8 *byte = buf;
5958         ssize_t ret, bytes_to_write, len;
5959         struct hdcp2_dp_msg_data *hdcp2_msg_data;
5960
5961         hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
5962         if (!hdcp2_msg_data)
5963                 return -EINVAL;
5964
5965         offset = hdcp2_msg_data->offset;
5966
5967         /* No msg_id in DP HDCP2.2 msgs */
5968         bytes_to_write = size - 1;
5969         byte++;
5970
5971         hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
5972
5973         while (bytes_to_write) {
5974                 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
5975                                 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
5976
5977                 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
5978                                         offset, (void *)byte, len);
5979                 if (ret < 0)
5980                         return ret;
5981
5982                 bytes_to_write -= ret;
5983                 byte += ret;
5984                 offset += ret;
5985         }
5986
5987         return size;
5988 }
5989
5990 static
5991 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
5992 {
5993         u8 rx_info[HDCP_2_2_RXINFO_LEN];
5994         u32 dev_cnt;
5995         ssize_t ret;
5996
5997         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5998                                DP_HDCP_2_2_REG_RXINFO_OFFSET,
5999                                (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6000         if (ret != HDCP_2_2_RXINFO_LEN)
6001                 return ret >= 0 ? -EIO : ret;
6002
6003         dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6004                    HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6005
6006         if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6007                 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6008
6009         ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6010                 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6011                 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6012
6013         return ret;
6014 }
6015
6016 static
6017 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6018                             u8 msg_id, void *buf, size_t size)
6019 {
6020         unsigned int offset;
6021         u8 *byte = buf;
6022         ssize_t ret, bytes_to_recv, len;
6023         struct hdcp2_dp_msg_data *hdcp2_msg_data;
6024
6025         hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6026         if (!hdcp2_msg_data)
6027                 return -EINVAL;
6028         offset = hdcp2_msg_data->offset;
6029
6030         ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6031         if (ret < 0)
6032                 return ret;
6033
6034         if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6035                 ret = get_receiver_id_list_size(intel_dig_port);
6036                 if (ret < 0)
6037                         return ret;
6038
6039                 size = ret;
6040         }
6041         bytes_to_recv = size - 1;
6042
6043         /* DP adaptation msgs has no msg_id */
6044         byte++;
6045
6046         while (bytes_to_recv) {
6047                 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6048                       DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6049
6050                 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6051                                        (void *)byte, len);
6052                 if (ret < 0) {
6053                         DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6054                         return ret;
6055                 }
6056
6057                 bytes_to_recv -= ret;
6058                 byte += ret;
6059                 offset += ret;
6060         }
6061         byte = buf;
6062         *byte = msg_id;
6063
6064         return size;
6065 }
6066
6067 static
6068 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6069                                       bool is_repeater, u8 content_type)
6070 {
6071         struct hdcp2_dp_errata_stream_type stream_type_msg;
6072
6073         if (is_repeater)
6074                 return 0;
6075
6076         /*
6077          * Errata for DP: As Stream type is used for encryption, Receiver
6078          * should be communicated with stream type for the decryption of the
6079          * content.
6080          * Repeater will be communicated with stream type as a part of it's
6081          * auth later in time.
6082          */
6083         stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6084         stream_type_msg.stream_type = content_type;
6085
6086         return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6087                                         sizeof(stream_type_msg));
6088 }
6089
6090 static
6091 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6092 {
6093         u8 rx_status;
6094         int ret;
6095
6096         ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6097         if (ret)
6098                 return ret;
6099
6100         if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6101                 ret = HDCP_REAUTH_REQUEST;
6102         else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6103                 ret = HDCP_LINK_INTEGRITY_FAILURE;
6104         else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6105                 ret = HDCP_TOPOLOGY_CHANGE;
6106
6107         return ret;
6108 }
6109
6110 static
6111 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6112                            bool *capable)
6113 {
6114         u8 rx_caps[3];
6115         int ret;
6116
6117         *capable = false;
6118         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6119                                DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6120                                rx_caps, HDCP_2_2_RXCAPS_LEN);
6121         if (ret != HDCP_2_2_RXCAPS_LEN)
6122                 return ret >= 0 ? -EIO : ret;
6123
6124         if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6125             HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6126                 *capable = true;
6127
6128         return 0;
6129 }
6130
6131 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6132         .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6133         .read_bksv = intel_dp_hdcp_read_bksv,
6134         .read_bstatus = intel_dp_hdcp_read_bstatus,
6135         .repeater_present = intel_dp_hdcp_repeater_present,
6136         .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6137         .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6138         .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6139         .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6140         .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6141         .check_link = intel_dp_hdcp_check_link,
6142         .hdcp_capable = intel_dp_hdcp_capable,
6143         .write_2_2_msg = intel_dp_hdcp2_write_msg,
6144         .read_2_2_msg = intel_dp_hdcp2_read_msg,
6145         .config_stream_type = intel_dp_hdcp2_config_stream_type,
6146         .check_2_2_link = intel_dp_hdcp2_check_link,
6147         .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6148         .protocol = HDCP_PROTOCOL_DP,
6149 };
6150
6151 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6152 {
6153         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6154         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6155
6156         lockdep_assert_held(&dev_priv->pps_mutex);
6157
6158         if (!edp_have_panel_vdd(intel_dp))
6159                 return;
6160
6161         /*
6162          * The VDD bit needs a power domain reference, so if the bit is
6163          * already enabled when we boot or resume, grab this reference and
6164          * schedule a vdd off, so we don't hold on to the reference
6165          * indefinitely.
6166          */
6167         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6168         intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6169
6170         edp_panel_vdd_schedule_off(intel_dp);
6171 }
6172
6173 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6174 {
6175         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6176         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6177         enum pipe pipe;
6178
6179         if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6180                                   encoder->port, &pipe))
6181                 return pipe;
6182
6183         return INVALID_PIPE;
6184 }
6185
6186 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6187 {
6188         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6189         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6190         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6191         intel_wakeref_t wakeref;
6192
6193         if (!HAS_DDI(dev_priv))
6194                 intel_dp->DP = I915_READ(intel_dp->output_reg);
6195
6196         if (lspcon->active)
6197                 lspcon_resume(lspcon);
6198
6199         intel_dp->reset_link_params = true;
6200
6201         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6202             !intel_dp_is_edp(intel_dp))
6203                 return;
6204
6205         with_pps_lock(intel_dp, wakeref) {
6206                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6207                         intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6208
6209                 if (intel_dp_is_edp(intel_dp)) {
6210                         /*
6211                          * Reinit the power sequencer, in case BIOS did
6212                          * something nasty with it.
6213                          */
6214                         intel_dp_pps_init(intel_dp);
6215                         intel_edp_panel_vdd_sanitize(intel_dp);
6216                 }
6217         }
6218 }
6219
6220 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6221         .force = intel_dp_force,
6222         .fill_modes = drm_helper_probe_single_connector_modes,
6223         .atomic_get_property = intel_digital_connector_atomic_get_property,
6224         .atomic_set_property = intel_digital_connector_atomic_set_property,
6225         .late_register = intel_dp_connector_register,
6226         .early_unregister = intel_dp_connector_unregister,
6227         .destroy = intel_connector_destroy,
6228         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6229         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6230 };
6231
6232 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6233         .detect_ctx = intel_dp_detect,
6234         .get_modes = intel_dp_get_modes,
6235         .mode_valid = intel_dp_mode_valid,
6236         .atomic_check = intel_digital_connector_atomic_check,
6237 };
6238
6239 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6240         .reset = intel_dp_encoder_reset,
6241         .destroy = intel_dp_encoder_destroy,
6242 };
6243
6244 enum irqreturn
6245 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6246 {
6247         struct intel_dp *intel_dp = &intel_dig_port->dp;
6248
6249         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6250                 /*
6251                  * vdd off can generate a long pulse on eDP which
6252                  * would require vdd on to handle it, and thus we
6253                  * would end up in an endless cycle of
6254                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6255                  */
6256                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
6257                               port_name(intel_dig_port->base.port));
6258                 return IRQ_HANDLED;
6259         }
6260
6261         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
6262                       port_name(intel_dig_port->base.port),
6263                       long_hpd ? "long" : "short");
6264
6265         if (long_hpd) {
6266                 intel_dp->reset_link_params = true;
6267                 return IRQ_NONE;
6268         }
6269
6270         if (intel_dp->is_mst) {
6271                 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6272                         /*
6273                          * If we were in MST mode, and device is not
6274                          * there, get out of MST mode
6275                          */
6276                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6277                                       intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6278                         intel_dp->is_mst = false;
6279                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6280                                                         intel_dp->is_mst);
6281
6282                         return IRQ_NONE;
6283                 }
6284         }
6285
6286         if (!intel_dp->is_mst) {
6287                 bool handled;
6288
6289                 handled = intel_dp_short_pulse(intel_dp);
6290
6291                 if (!handled)
6292                         return IRQ_NONE;
6293         }
6294
6295         return IRQ_HANDLED;
6296 }
6297
6298 /* check the VBT to see whether the eDP is on another port */
6299 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6300 {
6301         /*
6302          * eDP not supported on g4x. so bail out early just
6303          * for a bit extra safety in case the VBT is bonkers.
6304          */
6305         if (INTEL_GEN(dev_priv) < 5)
6306                 return false;
6307
6308         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6309                 return true;
6310
6311         return intel_bios_is_port_edp(dev_priv, port);
6312 }
6313
6314 static void
6315 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6316 {
6317         struct drm_i915_private *dev_priv = to_i915(connector->dev);
6318         enum port port = dp_to_dig_port(intel_dp)->base.port;
6319
6320         if (!IS_G4X(dev_priv) && port != PORT_A)
6321                 intel_attach_force_audio_property(connector);
6322
6323         intel_attach_broadcast_rgb_property(connector);
6324         if (HAS_GMCH(dev_priv))
6325                 drm_connector_attach_max_bpc_property(connector, 6, 10);
6326         else if (INTEL_GEN(dev_priv) >= 5)
6327                 drm_connector_attach_max_bpc_property(connector, 6, 12);
6328
6329         if (intel_dp_is_edp(intel_dp)) {
6330                 u32 allowed_scalers;
6331
6332                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6333                 if (!HAS_GMCH(dev_priv))
6334                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6335
6336                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6337
6338                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6339
6340         }
6341 }
6342
6343 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6344 {
6345         intel_dp->panel_power_off_time = ktime_get_boottime();
6346         intel_dp->last_power_on = jiffies;
6347         intel_dp->last_backlight_off = jiffies;
6348 }
6349
6350 static void
6351 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6352 {
6353         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6354         u32 pp_on, pp_off, pp_ctl;
6355         struct pps_registers regs;
6356
6357         intel_pps_get_registers(intel_dp, &regs);
6358
6359         pp_ctl = ironlake_get_pp_control(intel_dp);
6360
6361         /* Ensure PPS is unlocked */
6362         if (!HAS_DDI(dev_priv))
6363                 I915_WRITE(regs.pp_ctrl, pp_ctl);
6364
6365         pp_on = I915_READ(regs.pp_on);
6366         pp_off = I915_READ(regs.pp_off);
6367
6368         /* Pull timing values out of registers */
6369         seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6370         seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6371         seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6372         seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6373
6374         if (i915_mmio_reg_valid(regs.pp_div)) {
6375                 u32 pp_div;
6376
6377                 pp_div = I915_READ(regs.pp_div);
6378
6379                 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6380         } else {
6381                 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6382         }
6383 }
6384
6385 static void
6386 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6387 {
6388         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6389                       state_name,
6390                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6391 }
6392
6393 static void
6394 intel_pps_verify_state(struct intel_dp *intel_dp)
6395 {
6396         struct edp_power_seq hw;
6397         struct edp_power_seq *sw = &intel_dp->pps_delays;
6398
6399         intel_pps_readout_hw_state(intel_dp, &hw);
6400
6401         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6402             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6403                 DRM_ERROR("PPS state mismatch\n");
6404                 intel_pps_dump_state("sw", sw);
6405                 intel_pps_dump_state("hw", &hw);
6406         }
6407 }
6408
6409 static void
6410 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6411 {
6412         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6413         struct edp_power_seq cur, vbt, spec,
6414                 *final = &intel_dp->pps_delays;
6415
6416         lockdep_assert_held(&dev_priv->pps_mutex);
6417
6418         /* already initialized? */
6419         if (final->t11_t12 != 0)
6420                 return;
6421
6422         intel_pps_readout_hw_state(intel_dp, &cur);
6423
6424         intel_pps_dump_state("cur", &cur);
6425
6426         vbt = dev_priv->vbt.edp.pps;
6427         /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6428          * of 500ms appears to be too short. Ocassionally the panel
6429          * just fails to power back on. Increasing the delay to 800ms
6430          * seems sufficient to avoid this problem.
6431          */
6432         if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6433                 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6434                 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6435                               vbt.t11_t12);
6436         }
6437         /* T11_T12 delay is special and actually in units of 100ms, but zero
6438          * based in the hw (so we need to add 100 ms). But the sw vbt
6439          * table multiplies it with 1000 to make it in units of 100usec,
6440          * too. */
6441         vbt.t11_t12 += 100 * 10;
6442
6443         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6444          * our hw here, which are all in 100usec. */
6445         spec.t1_t3 = 210 * 10;
6446         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6447         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6448         spec.t10 = 500 * 10;
6449         /* This one is special and actually in units of 100ms, but zero
6450          * based in the hw (so we need to add 100 ms). But the sw vbt
6451          * table multiplies it with 1000 to make it in units of 100usec,
6452          * too. */
6453         spec.t11_t12 = (510 + 100) * 10;
6454
6455         intel_pps_dump_state("vbt", &vbt);
6456
6457         /* Use the max of the register settings and vbt. If both are
6458          * unset, fall back to the spec limits. */
6459 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
6460                                        spec.field : \
6461                                        max(cur.field, vbt.field))
6462         assign_final(t1_t3);
6463         assign_final(t8);
6464         assign_final(t9);
6465         assign_final(t10);
6466         assign_final(t11_t12);
6467 #undef assign_final
6468
6469 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
6470         intel_dp->panel_power_up_delay = get_delay(t1_t3);
6471         intel_dp->backlight_on_delay = get_delay(t8);
6472         intel_dp->backlight_off_delay = get_delay(t9);
6473         intel_dp->panel_power_down_delay = get_delay(t10);
6474         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6475 #undef get_delay
6476
6477         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6478                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6479                       intel_dp->panel_power_cycle_delay);
6480
6481         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6482                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6483
6484         /*
6485          * We override the HW backlight delays to 1 because we do manual waits
6486          * on them. For T8, even BSpec recommends doing it. For T9, if we
6487          * don't do this, we'll end up waiting for the backlight off delay
6488          * twice: once when we do the manual sleep, and once when we disable
6489          * the panel and wait for the PP_STATUS bit to become zero.
6490          */
6491         final->t8 = 1;
6492         final->t9 = 1;
6493
6494         /*
6495          * HW has only a 100msec granularity for t11_t12 so round it up
6496          * accordingly.
6497          */
6498         final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6499 }
6500
6501 static void
6502 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6503                                               bool force_disable_vdd)
6504 {
6505         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6506         u32 pp_on, pp_off, port_sel = 0;
6507         int div = dev_priv->rawclk_freq / 1000;
6508         struct pps_registers regs;
6509         enum port port = dp_to_dig_port(intel_dp)->base.port;
6510         const struct edp_power_seq *seq = &intel_dp->pps_delays;
6511
6512         lockdep_assert_held(&dev_priv->pps_mutex);
6513
6514         intel_pps_get_registers(intel_dp, &regs);
6515
6516         /*
6517          * On some VLV machines the BIOS can leave the VDD
6518          * enabled even on power sequencers which aren't
6519          * hooked up to any port. This would mess up the
6520          * power domain tracking the first time we pick
6521          * one of these power sequencers for use since
6522          * edp_panel_vdd_on() would notice that the VDD was
6523          * already on and therefore wouldn't grab the power
6524          * domain reference. Disable VDD first to avoid this.
6525          * This also avoids spuriously turning the VDD on as
6526          * soon as the new power sequencer gets initialized.
6527          */
6528         if (force_disable_vdd) {
6529                 u32 pp = ironlake_get_pp_control(intel_dp);
6530
6531                 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6532
6533                 if (pp & EDP_FORCE_VDD)
6534                         DRM_DEBUG_KMS("VDD already on, disabling first\n");
6535
6536                 pp &= ~EDP_FORCE_VDD;
6537
6538                 I915_WRITE(regs.pp_ctrl, pp);
6539         }
6540
6541         pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6542                 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6543         pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6544                 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6545
6546         /* Haswell doesn't have any port selection bits for the panel
6547          * power sequencer any more. */
6548         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6549                 port_sel = PANEL_PORT_SELECT_VLV(port);
6550         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6551                 switch (port) {
6552                 case PORT_A:
6553                         port_sel = PANEL_PORT_SELECT_DPA;
6554                         break;
6555                 case PORT_C:
6556                         port_sel = PANEL_PORT_SELECT_DPC;
6557                         break;
6558                 case PORT_D:
6559                         port_sel = PANEL_PORT_SELECT_DPD;
6560                         break;
6561                 default:
6562                         MISSING_CASE(port);
6563                         break;
6564                 }
6565         }
6566
6567         pp_on |= port_sel;
6568
6569         I915_WRITE(regs.pp_on, pp_on);
6570         I915_WRITE(regs.pp_off, pp_off);
6571
6572         /*
6573          * Compute the divisor for the pp clock, simply match the Bspec formula.
6574          */
6575         if (i915_mmio_reg_valid(regs.pp_div)) {
6576                 I915_WRITE(regs.pp_div,
6577                            REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6578                            REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6579         } else {
6580                 u32 pp_ctl;
6581
6582                 pp_ctl = I915_READ(regs.pp_ctrl);
6583                 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6584                 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6585                 I915_WRITE(regs.pp_ctrl, pp_ctl);
6586         }
6587
6588         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6589                       I915_READ(regs.pp_on),
6590                       I915_READ(regs.pp_off),
6591                       i915_mmio_reg_valid(regs.pp_div) ?
6592                       I915_READ(regs.pp_div) :
6593                       (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6594 }
6595
6596 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6597 {
6598         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6599
6600         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6601                 vlv_initial_power_sequencer_setup(intel_dp);
6602         } else {
6603                 intel_dp_init_panel_power_sequencer(intel_dp);
6604                 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6605         }
6606 }
6607
6608 /**
6609  * intel_dp_set_drrs_state - program registers for RR switch to take effect
6610  * @dev_priv: i915 device
6611  * @crtc_state: a pointer to the active intel_crtc_state
6612  * @refresh_rate: RR to be programmed
6613  *
6614  * This function gets called when refresh rate (RR) has to be changed from
6615  * one frequency to another. Switches can be between high and low RR
6616  * supported by the panel or to any other RR based on media playback (in
6617  * this case, RR value needs to be passed from user space).
6618  *
6619  * The caller of this function needs to take a lock on dev_priv->drrs.
6620  */
6621 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6622                                     const struct intel_crtc_state *crtc_state,
6623                                     int refresh_rate)
6624 {
6625         struct intel_dp *intel_dp = dev_priv->drrs.dp;
6626         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6627         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6628
6629         if (refresh_rate <= 0) {
6630                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6631                 return;
6632         }
6633
6634         if (intel_dp == NULL) {
6635                 DRM_DEBUG_KMS("DRRS not supported.\n");
6636                 return;
6637         }
6638
6639         if (!intel_crtc) {
6640                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6641                 return;
6642         }
6643
6644         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6645                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6646                 return;
6647         }
6648
6649         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6650                         refresh_rate)
6651                 index = DRRS_LOW_RR;
6652
6653         if (index == dev_priv->drrs.refresh_rate_type) {
6654                 DRM_DEBUG_KMS(
6655                         "DRRS requested for previously set RR...ignoring\n");
6656                 return;
6657         }
6658
6659         if (!crtc_state->base.active) {
6660                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6661                 return;
6662         }
6663
6664         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6665                 switch (index) {
6666                 case DRRS_HIGH_RR:
6667                         intel_dp_set_m_n(crtc_state, M1_N1);
6668                         break;
6669                 case DRRS_LOW_RR:
6670                         intel_dp_set_m_n(crtc_state, M2_N2);
6671                         break;
6672                 case DRRS_MAX_RR:
6673                 default:
6674                         DRM_ERROR("Unsupported refreshrate type\n");
6675                 }
6676         } else if (INTEL_GEN(dev_priv) > 6) {
6677                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6678                 u32 val;
6679
6680                 val = I915_READ(reg);
6681                 if (index > DRRS_HIGH_RR) {
6682                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6683                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6684                         else
6685                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6686                 } else {
6687                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6688                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6689                         else
6690                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6691                 }
6692                 I915_WRITE(reg, val);
6693         }
6694
6695         dev_priv->drrs.refresh_rate_type = index;
6696
6697         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6698 }
6699
6700 /**
6701  * intel_edp_drrs_enable - init drrs struct if supported
6702  * @intel_dp: DP struct
6703  * @crtc_state: A pointer to the active crtc state.
6704  *
6705  * Initializes frontbuffer_bits and drrs.dp
6706  */
6707 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6708                            const struct intel_crtc_state *crtc_state)
6709 {
6710         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6711
6712         if (!crtc_state->has_drrs) {
6713                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6714                 return;
6715         }
6716
6717         if (dev_priv->psr.enabled) {
6718                 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6719                 return;
6720         }
6721
6722         mutex_lock(&dev_priv->drrs.mutex);
6723         if (dev_priv->drrs.dp) {
6724                 DRM_DEBUG_KMS("DRRS already enabled\n");
6725                 goto unlock;
6726         }
6727
6728         dev_priv->drrs.busy_frontbuffer_bits = 0;
6729
6730         dev_priv->drrs.dp = intel_dp;
6731
6732 unlock:
6733         mutex_unlock(&dev_priv->drrs.mutex);
6734 }
6735
6736 /**
6737  * intel_edp_drrs_disable - Disable DRRS
6738  * @intel_dp: DP struct
6739  * @old_crtc_state: Pointer to old crtc_state.
6740  *
6741  */
6742 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6743                             const struct intel_crtc_state *old_crtc_state)
6744 {
6745         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6746
6747         if (!old_crtc_state->has_drrs)
6748                 return;
6749
6750         mutex_lock(&dev_priv->drrs.mutex);
6751         if (!dev_priv->drrs.dp) {
6752                 mutex_unlock(&dev_priv->drrs.mutex);
6753                 return;
6754         }
6755
6756         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6757                 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6758                         intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6759
6760         dev_priv->drrs.dp = NULL;
6761         mutex_unlock(&dev_priv->drrs.mutex);
6762
6763         cancel_delayed_work_sync(&dev_priv->drrs.work);
6764 }
6765
6766 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6767 {
6768         struct drm_i915_private *dev_priv =
6769                 container_of(work, typeof(*dev_priv), drrs.work.work);
6770         struct intel_dp *intel_dp;
6771
6772         mutex_lock(&dev_priv->drrs.mutex);
6773
6774         intel_dp = dev_priv->drrs.dp;
6775
6776         if (!intel_dp)
6777                 goto unlock;
6778
6779         /*
6780          * The delayed work can race with an invalidate hence we need to
6781          * recheck.
6782          */
6783
6784         if (dev_priv->drrs.busy_frontbuffer_bits)
6785                 goto unlock;
6786
6787         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6788                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6789
6790                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6791                         intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6792         }
6793
6794 unlock:
6795         mutex_unlock(&dev_priv->drrs.mutex);
6796 }
6797
6798 /**
6799  * intel_edp_drrs_invalidate - Disable Idleness DRRS
6800  * @dev_priv: i915 device
6801  * @frontbuffer_bits: frontbuffer plane tracking bits
6802  *
6803  * This function gets called everytime rendering on the given planes start.
6804  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6805  *
6806  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6807  */
6808 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6809                                unsigned int frontbuffer_bits)
6810 {
6811         struct drm_crtc *crtc;
6812         enum pipe pipe;
6813
6814         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6815                 return;
6816
6817         cancel_delayed_work(&dev_priv->drrs.work);
6818
6819         mutex_lock(&dev_priv->drrs.mutex);
6820         if (!dev_priv->drrs.dp) {
6821                 mutex_unlock(&dev_priv->drrs.mutex);
6822                 return;
6823         }
6824
6825         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6826         pipe = to_intel_crtc(crtc)->pipe;
6827
6828         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6829         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6830
6831         /* invalidate means busy screen hence upclock */
6832         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6833                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6834                         dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6835
6836         mutex_unlock(&dev_priv->drrs.mutex);
6837 }
6838
6839 /**
6840  * intel_edp_drrs_flush - Restart Idleness DRRS
6841  * @dev_priv: i915 device
6842  * @frontbuffer_bits: frontbuffer plane tracking bits
6843  *
6844  * This function gets called every time rendering on the given planes has
6845  * completed or flip on a crtc is completed. So DRRS should be upclocked
6846  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6847  * if no other planes are dirty.
6848  *
6849  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6850  */
6851 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6852                           unsigned int frontbuffer_bits)
6853 {
6854         struct drm_crtc *crtc;
6855         enum pipe pipe;
6856
6857         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6858                 return;
6859
6860         cancel_delayed_work(&dev_priv->drrs.work);
6861
6862         mutex_lock(&dev_priv->drrs.mutex);
6863         if (!dev_priv->drrs.dp) {
6864                 mutex_unlock(&dev_priv->drrs.mutex);
6865                 return;
6866         }
6867
6868         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6869         pipe = to_intel_crtc(crtc)->pipe;
6870
6871         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6872         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6873
6874         /* flush means busy screen hence upclock */
6875         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6876                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6877                                 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6878
6879         /*
6880          * flush also means no more activity hence schedule downclock, if all
6881          * other fbs are quiescent too
6882          */
6883         if (!dev_priv->drrs.busy_frontbuffer_bits)
6884                 schedule_delayed_work(&dev_priv->drrs.work,
6885                                 msecs_to_jiffies(1000));
6886         mutex_unlock(&dev_priv->drrs.mutex);
6887 }
6888
6889 /**
6890  * DOC: Display Refresh Rate Switching (DRRS)
6891  *
6892  * Display Refresh Rate Switching (DRRS) is a power conservation feature
6893  * which enables swtching between low and high refresh rates,
6894  * dynamically, based on the usage scenario. This feature is applicable
6895  * for internal panels.
6896  *
6897  * Indication that the panel supports DRRS is given by the panel EDID, which
6898  * would list multiple refresh rates for one resolution.
6899  *
6900  * DRRS is of 2 types - static and seamless.
6901  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6902  * (may appear as a blink on screen) and is used in dock-undock scenario.
6903  * Seamless DRRS involves changing RR without any visual effect to the user
6904  * and can be used during normal system usage. This is done by programming
6905  * certain registers.
6906  *
6907  * Support for static/seamless DRRS may be indicated in the VBT based on
6908  * inputs from the panel spec.
6909  *
6910  * DRRS saves power by switching to low RR based on usage scenarios.
6911  *
6912  * The implementation is based on frontbuffer tracking implementation.  When
6913  * there is a disturbance on the screen triggered by user activity or a periodic
6914  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
6915  * no movement on screen, after a timeout of 1 second, a switch to low RR is
6916  * made.
6917  *
6918  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6919  * and intel_edp_drrs_flush() are called.
6920  *
6921  * DRRS can be further extended to support other internal panels and also
6922  * the scenario of video playback wherein RR is set based on the rate
6923  * requested by userspace.
6924  */
6925
6926 /**
6927  * intel_dp_drrs_init - Init basic DRRS work and mutex.
6928  * @connector: eDP connector
6929  * @fixed_mode: preferred mode of panel
6930  *
6931  * This function is  called only once at driver load to initialize basic
6932  * DRRS stuff.
6933  *
6934  * Returns:
6935  * Downclock mode if panel supports it, else return NULL.
6936  * DRRS support is determined by the presence of downclock mode (apart
6937  * from VBT setting).
6938  */
6939 static struct drm_display_mode *
6940 intel_dp_drrs_init(struct intel_connector *connector,
6941                    struct drm_display_mode *fixed_mode)
6942 {
6943         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
6944         struct drm_display_mode *downclock_mode = NULL;
6945
6946         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6947         mutex_init(&dev_priv->drrs.mutex);
6948
6949         if (INTEL_GEN(dev_priv) <= 6) {
6950                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
6951                 return NULL;
6952         }
6953
6954         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6955                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6956                 return NULL;
6957         }
6958
6959         downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
6960         if (!downclock_mode) {
6961                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
6962                 return NULL;
6963         }
6964
6965         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6966
6967         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6968         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
6969         return downclock_mode;
6970 }
6971
6972 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6973                                      struct intel_connector *intel_connector)
6974 {
6975         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6976         struct drm_device *dev = &dev_priv->drm;
6977         struct drm_connector *connector = &intel_connector->base;
6978         struct drm_display_mode *fixed_mode = NULL;
6979         struct drm_display_mode *downclock_mode = NULL;
6980         bool has_dpcd;
6981         enum pipe pipe = INVALID_PIPE;
6982         intel_wakeref_t wakeref;
6983         struct edid *edid;
6984
6985         if (!intel_dp_is_edp(intel_dp))
6986                 return true;
6987
6988         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
6989
6990         /*
6991          * On IBX/CPT we may get here with LVDS already registered. Since the
6992          * driver uses the only internal power sequencer available for both
6993          * eDP and LVDS bail out early in this case to prevent interfering
6994          * with an already powered-on LVDS power sequencer.
6995          */
6996         if (intel_get_lvds_encoder(dev_priv)) {
6997                 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
6998                 DRM_INFO("LVDS was detected, not registering eDP\n");
6999
7000                 return false;
7001         }
7002
7003         with_pps_lock(intel_dp, wakeref) {
7004                 intel_dp_init_panel_power_timestamps(intel_dp);
7005                 intel_dp_pps_init(intel_dp);
7006                 intel_edp_panel_vdd_sanitize(intel_dp);
7007         }
7008
7009         /* Cache DPCD and EDID for edp. */
7010         has_dpcd = intel_edp_init_dpcd(intel_dp);
7011
7012         if (!has_dpcd) {
7013                 /* if this fails, presume the device is a ghost */
7014                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7015                 goto out_vdd_off;
7016         }
7017
7018         mutex_lock(&dev->mode_config.mutex);
7019         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7020         if (edid) {
7021                 if (drm_add_edid_modes(connector, edid)) {
7022                         drm_connector_update_edid_property(connector,
7023                                                                 edid);
7024                 } else {
7025                         kfree(edid);
7026                         edid = ERR_PTR(-EINVAL);
7027                 }
7028         } else {
7029                 edid = ERR_PTR(-ENOENT);
7030         }
7031         intel_connector->edid = edid;
7032
7033         fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7034         if (fixed_mode)
7035                 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7036
7037         /* fallback to VBT if available for eDP */
7038         if (!fixed_mode)
7039                 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7040         mutex_unlock(&dev->mode_config.mutex);
7041
7042         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7043                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7044                 register_reboot_notifier(&intel_dp->edp_notifier);
7045
7046                 /*
7047                  * Figure out the current pipe for the initial backlight setup.
7048                  * If the current pipe isn't valid, try the PPS pipe, and if that
7049                  * fails just assume pipe A.
7050                  */
7051                 pipe = vlv_active_pipe(intel_dp);
7052
7053                 if (pipe != PIPE_A && pipe != PIPE_B)
7054                         pipe = intel_dp->pps_pipe;
7055
7056                 if (pipe != PIPE_A && pipe != PIPE_B)
7057                         pipe = PIPE_A;
7058
7059                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7060                               pipe_name(pipe));
7061         }
7062
7063         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7064         intel_connector->panel.backlight.power = intel_edp_backlight_power;
7065         intel_panel_setup_backlight(connector, pipe);
7066
7067         if (fixed_mode)
7068                 drm_connector_init_panel_orientation_property(
7069                         connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7070
7071         return true;
7072
7073 out_vdd_off:
7074         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7075         /*
7076          * vdd might still be enabled do to the delayed vdd off.
7077          * Make sure vdd is actually turned off here.
7078          */
7079         with_pps_lock(intel_dp, wakeref)
7080                 edp_panel_vdd_off_sync(intel_dp);
7081
7082         return false;
7083 }
7084
7085 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7086 {
7087         struct intel_connector *intel_connector;
7088         struct drm_connector *connector;
7089
7090         intel_connector = container_of(work, typeof(*intel_connector),
7091                                        modeset_retry_work);
7092         connector = &intel_connector->base;
7093         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7094                       connector->name);
7095
7096         /* Grab the locks before changing connector property*/
7097         mutex_lock(&connector->dev->mode_config.mutex);
7098         /* Set connector link status to BAD and send a Uevent to notify
7099          * userspace to do a modeset.
7100          */
7101         drm_connector_set_link_status_property(connector,
7102                                                DRM_MODE_LINK_STATUS_BAD);
7103         mutex_unlock(&connector->dev->mode_config.mutex);
7104         /* Send Hotplug uevent so userspace can reprobe */
7105         drm_kms_helper_hotplug_event(connector->dev);
7106 }
7107
7108 bool
7109 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7110                         struct intel_connector *intel_connector)
7111 {
7112         struct drm_connector *connector = &intel_connector->base;
7113         struct intel_dp *intel_dp = &intel_dig_port->dp;
7114         struct intel_encoder *intel_encoder = &intel_dig_port->base;
7115         struct drm_device *dev = intel_encoder->base.dev;
7116         struct drm_i915_private *dev_priv = to_i915(dev);
7117         enum port port = intel_encoder->port;
7118         enum phy phy = intel_port_to_phy(dev_priv, port);
7119         int type;
7120
7121         /* Initialize the work for modeset in case of link train failure */
7122         INIT_WORK(&intel_connector->modeset_retry_work,
7123                   intel_dp_modeset_retry_work_fn);
7124
7125         if (WARN(intel_dig_port->max_lanes < 1,
7126                  "Not enough lanes (%d) for DP on port %c\n",
7127                  intel_dig_port->max_lanes, port_name(port)))
7128                 return false;
7129
7130         intel_dp_set_source_rates(intel_dp);
7131
7132         intel_dp->reset_link_params = true;
7133         intel_dp->pps_pipe = INVALID_PIPE;
7134         intel_dp->active_pipe = INVALID_PIPE;
7135
7136         /* Preserve the current hw state. */
7137         intel_dp->DP = I915_READ(intel_dp->output_reg);
7138         intel_dp->attached_connector = intel_connector;
7139
7140         if (intel_dp_is_port_edp(dev_priv, port)) {
7141                 /*
7142                  * Currently we don't support eDP on TypeC ports, although in
7143                  * theory it could work on TypeC legacy ports.
7144                  */
7145                 WARN_ON(intel_phy_is_tc(dev_priv, phy));
7146                 type = DRM_MODE_CONNECTOR_eDP;
7147         } else {
7148                 type = DRM_MODE_CONNECTOR_DisplayPort;
7149         }
7150
7151         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7152                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7153
7154         /*
7155          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7156          * for DP the encoder type can be set by the caller to
7157          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7158          */
7159         if (type == DRM_MODE_CONNECTOR_eDP)
7160                 intel_encoder->type = INTEL_OUTPUT_EDP;
7161
7162         /* eDP only on port B and/or C on vlv/chv */
7163         if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7164                     intel_dp_is_edp(intel_dp) &&
7165                     port != PORT_B && port != PORT_C))
7166                 return false;
7167
7168         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7169                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7170                         port_name(port));
7171
7172         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7173         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7174
7175         if (!HAS_GMCH(dev_priv))
7176                 connector->interlace_allowed = true;
7177         connector->doublescan_allowed = 0;
7178
7179         if (INTEL_GEN(dev_priv) >= 11)
7180                 connector->ycbcr_420_allowed = true;
7181
7182         intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7183
7184         intel_dp_aux_init(intel_dp);
7185
7186         intel_connector_attach_encoder(intel_connector, intel_encoder);
7187
7188         if (HAS_DDI(dev_priv))
7189                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7190         else
7191                 intel_connector->get_hw_state = intel_connector_get_hw_state;
7192
7193         /* init MST on ports that can support it */
7194         if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
7195             (port == PORT_B || port == PORT_C ||
7196              port == PORT_D || port == PORT_F))
7197                 intel_dp_mst_encoder_init(intel_dig_port,
7198                                           intel_connector->base.base.id);
7199
7200         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7201                 intel_dp_aux_fini(intel_dp);
7202                 intel_dp_mst_encoder_cleanup(intel_dig_port);
7203                 goto fail;
7204         }
7205
7206         intel_dp_add_properties(intel_dp, connector);
7207
7208         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7209                 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7210                 if (ret)
7211                         DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7212         }
7213
7214         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7215          * 0xd.  Failure to do so will result in spurious interrupts being
7216          * generated on the port when a cable is not attached.
7217          */
7218         if (IS_G45(dev_priv)) {
7219                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7220                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7221         }
7222
7223         return true;
7224
7225 fail:
7226         drm_connector_cleanup(connector);
7227
7228         return false;
7229 }
7230
7231 bool intel_dp_init(struct drm_i915_private *dev_priv,
7232                    i915_reg_t output_reg,
7233                    enum port port)
7234 {
7235         struct intel_digital_port *intel_dig_port;
7236         struct intel_encoder *intel_encoder;
7237         struct drm_encoder *encoder;
7238         struct intel_connector *intel_connector;
7239
7240         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7241         if (!intel_dig_port)
7242                 return false;
7243
7244         intel_connector = intel_connector_alloc();
7245         if (!intel_connector)
7246                 goto err_connector_alloc;
7247
7248         intel_encoder = &intel_dig_port->base;
7249         encoder = &intel_encoder->base;
7250
7251         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7252                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7253                              "DP %c", port_name(port)))
7254                 goto err_encoder_init;
7255
7256         intel_encoder->hotplug = intel_dp_hotplug;
7257         intel_encoder->compute_config = intel_dp_compute_config;
7258         intel_encoder->get_hw_state = intel_dp_get_hw_state;
7259         intel_encoder->get_config = intel_dp_get_config;
7260         intel_encoder->update_pipe = intel_panel_update_backlight;
7261         intel_encoder->suspend = intel_dp_encoder_suspend;
7262         if (IS_CHERRYVIEW(dev_priv)) {
7263                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7264                 intel_encoder->pre_enable = chv_pre_enable_dp;
7265                 intel_encoder->enable = vlv_enable_dp;
7266                 intel_encoder->disable = vlv_disable_dp;
7267                 intel_encoder->post_disable = chv_post_disable_dp;
7268                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7269         } else if (IS_VALLEYVIEW(dev_priv)) {
7270                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7271                 intel_encoder->pre_enable = vlv_pre_enable_dp;
7272                 intel_encoder->enable = vlv_enable_dp;
7273                 intel_encoder->disable = vlv_disable_dp;
7274                 intel_encoder->post_disable = vlv_post_disable_dp;
7275         } else {
7276                 intel_encoder->pre_enable = g4x_pre_enable_dp;
7277                 intel_encoder->enable = g4x_enable_dp;
7278                 intel_encoder->disable = g4x_disable_dp;
7279                 intel_encoder->post_disable = g4x_post_disable_dp;
7280         }
7281
7282         intel_dig_port->dp.output_reg = output_reg;
7283         intel_dig_port->max_lanes = 4;
7284
7285         intel_encoder->type = INTEL_OUTPUT_DP;
7286         intel_encoder->power_domain = intel_port_to_power_domain(port);
7287         if (IS_CHERRYVIEW(dev_priv)) {
7288                 if (port == PORT_D)
7289                         intel_encoder->crtc_mask = 1 << 2;
7290                 else
7291                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7292         } else {
7293                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7294         }
7295         intel_encoder->cloneable = 0;
7296         intel_encoder->port = port;
7297
7298         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7299
7300         if (port != PORT_A)
7301                 intel_infoframe_init(intel_dig_port);
7302
7303         intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7304         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7305                 goto err_init_connector;
7306
7307         return true;
7308
7309 err_init_connector:
7310         drm_encoder_cleanup(encoder);
7311 err_encoder_init:
7312         kfree(intel_connector);
7313 err_connector_alloc:
7314         kfree(intel_dig_port);
7315         return false;
7316 }
7317
7318 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7319 {
7320         struct intel_encoder *encoder;
7321
7322         for_each_intel_encoder(&dev_priv->drm, encoder) {
7323                 struct intel_dp *intel_dp;
7324
7325                 if (encoder->type != INTEL_OUTPUT_DDI)
7326                         continue;
7327
7328                 intel_dp = enc_to_intel_dp(&encoder->base);
7329
7330                 if (!intel_dp->can_mst)
7331                         continue;
7332
7333                 if (intel_dp->is_mst)
7334                         drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7335         }
7336 }
7337
7338 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7339 {
7340         struct intel_encoder *encoder;
7341
7342         for_each_intel_encoder(&dev_priv->drm, encoder) {
7343                 struct intel_dp *intel_dp;
7344                 int ret;
7345
7346                 if (encoder->type != INTEL_OUTPUT_DDI)
7347                         continue;
7348
7349                 intel_dp = enc_to_intel_dp(&encoder->base);
7350
7351                 if (!intel_dp->can_mst)
7352                         continue;
7353
7354                 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7355                 if (ret) {
7356                         intel_dp->is_mst = false;
7357                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7358                                                         false);
7359                 }
7360         }
7361 }