2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/types.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <asm/byteorder.h>
35 #include <drm/drm_atomic_helper.h>
36 #include <drm/drm_crtc.h>
37 #include <drm/drm_dp_helper.h>
38 #include <drm/drm_edid.h>
39 #include <drm/drm_hdcp.h>
40 #include <drm/drm_probe_helper.h>
41 #include "intel_drv.h"
42 #include <drm/i915_drm.h>
45 #define DP_DPRX_ESI_LEN 14
47 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
48 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
49 #define DP_DSC_MIN_SUPPORTED_BPC 8
50 #define DP_DSC_MAX_SUPPORTED_BPC 10
52 /* DP DSC throughput values used for slice count calculations KPixels/s */
53 #define DP_DSC_PEAK_PIXEL_RATE 2720000
54 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
55 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
57 /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
58 #define DP_DSC_FEC_OVERHEAD_FACTOR 976
60 /* Compliance test status bits */
61 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
62 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
63 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
64 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
71 static const struct dp_link_dpll g4x_dpll[] = {
73 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
75 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
78 static const struct dp_link_dpll pch_dpll[] = {
80 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
82 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
85 static const struct dp_link_dpll vlv_dpll[] = {
87 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
89 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
93 * CHV supports eDP 1.4 that have more link rates.
94 * Below only provides the fixed rate but exclude variable rate.
96 static const struct dp_link_dpll chv_dpll[] = {
98 * CHV requires to program fractional division for m2.
99 * m2 is stored in fixed point format using formula below
100 * (m2_int << 22) | m2_fraction
102 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
103 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
104 { 270000, /* m2_int = 27, m2_fraction = 0 */
105 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
108 /* Constants for DP DSC configurations */
109 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
111 /* With Single pipe configuration, HW is capable of supporting maximum
112 * of 4 slices per line.
114 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
117 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
118 * @intel_dp: DP struct
120 * If a CPU or PCH DP output is attached to an eDP panel, this function
121 * will return true, and false otherwise.
123 bool intel_dp_is_edp(struct intel_dp *intel_dp)
125 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
127 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
130 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
132 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
135 static void intel_dp_link_down(struct intel_encoder *encoder,
136 const struct intel_crtc_state *old_crtc_state);
137 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
138 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
139 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
140 const struct intel_crtc_state *crtc_state);
141 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
143 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
145 /* update sink rates from dpcd */
146 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
148 static const int dp_rates[] = {
149 162000, 270000, 540000, 810000
153 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
155 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
156 if (dp_rates[i] > max_rate)
158 intel_dp->sink_rates[i] = dp_rates[i];
161 intel_dp->num_sink_rates = i;
164 /* Get length of rates array potentially limited by max_rate. */
165 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
169 /* Limit results by potentially reduced max rate */
170 for (i = 0; i < len; i++) {
171 if (rates[len - i - 1] <= max_rate)
178 /* Get length of common rates array potentially limited by max_rate. */
179 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
182 return intel_dp_rate_limit_len(intel_dp->common_rates,
183 intel_dp->num_common_rates, max_rate);
186 /* Theoretical max between source and sink */
187 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
189 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
192 static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
194 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
195 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
196 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
199 if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
202 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
203 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
204 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
208 MISSING_CASE(lane_info);
222 /* Theoretical max between source and sink */
223 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
225 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
226 int source_max = intel_dig_port->max_lanes;
227 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
228 int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
230 return min3(source_max, sink_max, fia_max);
233 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
235 return intel_dp->max_link_lane_count;
239 intel_dp_link_required(int pixel_clock, int bpp)
241 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
242 return DIV_ROUND_UP(pixel_clock * bpp, 8);
246 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
248 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
249 * link rate that is generally expressed in Gbps. Since, 8 bits of data
250 * is transmitted every LS_Clk per lane, there is no need to account for
251 * the channel encoding that is done in the PHY layer here.
254 return max_link_clock * max_lanes;
258 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
260 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
261 struct intel_encoder *encoder = &intel_dig_port->base;
262 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
263 int max_dotclk = dev_priv->max_dotclk_freq;
266 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
268 if (type != DP_DS_PORT_TYPE_VGA)
271 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
272 intel_dp->downstream_ports);
274 if (ds_max_dotclk != 0)
275 max_dotclk = min(max_dotclk, ds_max_dotclk);
280 static int cnl_max_source_rate(struct intel_dp *intel_dp)
282 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
283 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
284 enum port port = dig_port->base.port;
286 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
288 /* Low voltage SKUs are limited to max of 5.4G */
289 if (voltage == VOLTAGE_INFO_0_85V)
292 /* For this SKU 8.1G is supported in all ports */
293 if (IS_CNL_WITH_PORT_F(dev_priv))
296 /* For other SKUs, max rate on ports A and D is 5.4G */
297 if (port == PORT_A || port == PORT_D)
303 static int icl_max_source_rate(struct intel_dp *intel_dp)
305 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
306 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
307 enum port port = dig_port->base.port;
309 if (intel_port_is_combophy(dev_priv, port) &&
310 !intel_dp_is_edp(intel_dp))
317 intel_dp_set_source_rates(struct intel_dp *intel_dp)
319 /* The values must be in increasing order */
320 static const int cnl_rates[] = {
321 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
323 static const int bxt_rates[] = {
324 162000, 216000, 243000, 270000, 324000, 432000, 540000
326 static const int skl_rates[] = {
327 162000, 216000, 270000, 324000, 432000, 540000
329 static const int hsw_rates[] = {
330 162000, 270000, 540000
332 static const int g4x_rates[] = {
335 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
336 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
337 const struct ddi_vbt_port_info *info =
338 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
339 const int *source_rates;
340 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
342 /* This should only be done once */
343 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
345 if (INTEL_GEN(dev_priv) >= 10) {
346 source_rates = cnl_rates;
347 size = ARRAY_SIZE(cnl_rates);
348 if (IS_GEN(dev_priv, 10))
349 max_rate = cnl_max_source_rate(intel_dp);
351 max_rate = icl_max_source_rate(intel_dp);
352 } else if (IS_GEN9_LP(dev_priv)) {
353 source_rates = bxt_rates;
354 size = ARRAY_SIZE(bxt_rates);
355 } else if (IS_GEN9_BC(dev_priv)) {
356 source_rates = skl_rates;
357 size = ARRAY_SIZE(skl_rates);
358 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
359 IS_BROADWELL(dev_priv)) {
360 source_rates = hsw_rates;
361 size = ARRAY_SIZE(hsw_rates);
363 source_rates = g4x_rates;
364 size = ARRAY_SIZE(g4x_rates);
367 if (max_rate && vbt_max_rate)
368 max_rate = min(max_rate, vbt_max_rate);
369 else if (vbt_max_rate)
370 max_rate = vbt_max_rate;
373 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
375 intel_dp->source_rates = source_rates;
376 intel_dp->num_source_rates = size;
379 static int intersect_rates(const int *source_rates, int source_len,
380 const int *sink_rates, int sink_len,
383 int i = 0, j = 0, k = 0;
385 while (i < source_len && j < sink_len) {
386 if (source_rates[i] == sink_rates[j]) {
387 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
389 common_rates[k] = source_rates[i];
393 } else if (source_rates[i] < sink_rates[j]) {
402 /* return index of rate in rates array, or -1 if not found */
403 static int intel_dp_rate_index(const int *rates, int len, int rate)
407 for (i = 0; i < len; i++)
408 if (rate == rates[i])
414 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
416 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
418 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
419 intel_dp->num_source_rates,
420 intel_dp->sink_rates,
421 intel_dp->num_sink_rates,
422 intel_dp->common_rates);
424 /* Paranoia, there should always be something in common. */
425 if (WARN_ON(intel_dp->num_common_rates == 0)) {
426 intel_dp->common_rates[0] = 162000;
427 intel_dp->num_common_rates = 1;
431 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
435 * FIXME: we need to synchronize the current link parameters with
436 * hardware readout. Currently fast link training doesn't work on
439 if (link_rate == 0 ||
440 link_rate > intel_dp->max_link_rate)
443 if (lane_count == 0 ||
444 lane_count > intel_dp_max_lane_count(intel_dp))
450 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
454 const struct drm_display_mode *fixed_mode =
455 intel_dp->attached_connector->panel.fixed_mode;
456 int mode_rate, max_rate;
458 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
459 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
460 if (mode_rate > max_rate)
466 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
467 int link_rate, u8 lane_count)
471 index = intel_dp_rate_index(intel_dp->common_rates,
472 intel_dp->num_common_rates,
475 if (intel_dp_is_edp(intel_dp) &&
476 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
477 intel_dp->common_rates[index - 1],
479 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
482 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
483 intel_dp->max_link_lane_count = lane_count;
484 } else if (lane_count > 1) {
485 if (intel_dp_is_edp(intel_dp) &&
486 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
487 intel_dp_max_common_rate(intel_dp),
489 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
492 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
493 intel_dp->max_link_lane_count = lane_count >> 1;
495 DRM_ERROR("Link Training Unsuccessful\n");
502 static enum drm_mode_status
503 intel_dp_mode_valid(struct drm_connector *connector,
504 struct drm_display_mode *mode)
506 struct intel_dp *intel_dp = intel_attached_dp(connector);
507 struct intel_connector *intel_connector = to_intel_connector(connector);
508 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
509 struct drm_i915_private *dev_priv = to_i915(connector->dev);
510 int target_clock = mode->clock;
511 int max_rate, mode_rate, max_lanes, max_link_clock;
513 u16 dsc_max_output_bpp = 0;
514 u8 dsc_slice_count = 0;
516 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
517 return MODE_NO_DBLESCAN;
519 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
521 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
522 if (mode->hdisplay > fixed_mode->hdisplay)
525 if (mode->vdisplay > fixed_mode->vdisplay)
528 target_clock = fixed_mode->clock;
531 max_link_clock = intel_dp_max_link_rate(intel_dp);
532 max_lanes = intel_dp_max_lane_count(intel_dp);
534 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
535 mode_rate = intel_dp_link_required(target_clock, 18);
538 * Output bpp is stored in 6.4 format so right shift by 4 to get the
539 * integer value since we support only integer values of bpp.
541 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
542 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
543 if (intel_dp_is_edp(intel_dp)) {
545 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
547 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
549 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
551 intel_dp_dsc_get_output_bpp(max_link_clock,
554 mode->hdisplay) >> 4;
556 intel_dp_dsc_get_slice_count(intel_dp,
562 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
563 target_clock > max_dotclk)
564 return MODE_CLOCK_HIGH;
566 if (mode->clock < 10000)
567 return MODE_CLOCK_LOW;
569 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
570 return MODE_H_ILLEGAL;
575 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
582 for (i = 0; i < src_bytes; i++)
583 v |= ((u32)src[i]) << ((3 - i) * 8);
587 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
592 for (i = 0; i < dst_bytes; i++)
593 dst[i] = src >> ((3-i) * 8);
597 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
599 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
600 bool force_disable_vdd);
602 intel_dp_pps_init(struct intel_dp *intel_dp);
604 static intel_wakeref_t
605 pps_lock(struct intel_dp *intel_dp)
607 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
608 intel_wakeref_t wakeref;
611 * See intel_power_sequencer_reset() why we need
612 * a power domain reference here.
614 wakeref = intel_display_power_get(dev_priv,
615 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
617 mutex_lock(&dev_priv->pps_mutex);
622 static intel_wakeref_t
623 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
625 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
627 mutex_unlock(&dev_priv->pps_mutex);
628 intel_display_power_put(dev_priv,
629 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
634 #define with_pps_lock(dp, wf) \
635 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
638 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
640 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
641 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
642 enum pipe pipe = intel_dp->pps_pipe;
643 bool pll_enabled, release_cl_override = false;
644 enum dpio_phy phy = DPIO_PHY(pipe);
645 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
648 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
649 "skipping pipe %c power sequencer kick due to port %c being active\n",
650 pipe_name(pipe), port_name(intel_dig_port->base.port)))
653 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
654 pipe_name(pipe), port_name(intel_dig_port->base.port));
656 /* Preserve the BIOS-computed detected bit. This is
657 * supposed to be read-only.
659 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
660 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
661 DP |= DP_PORT_WIDTH(1);
662 DP |= DP_LINK_TRAIN_PAT_1;
664 if (IS_CHERRYVIEW(dev_priv))
665 DP |= DP_PIPE_SEL_CHV(pipe);
667 DP |= DP_PIPE_SEL(pipe);
669 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
672 * The DPLL for the pipe must be enabled for this to work.
673 * So enable temporarily it if it's not already enabled.
676 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
677 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
679 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
680 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
681 DRM_ERROR("Failed to force on pll for pipe %c!\n",
688 * Similar magic as in intel_dp_enable_port().
689 * We _must_ do this port enable + disable trick
690 * to make this power sequencer lock onto the port.
691 * Otherwise even VDD force bit won't work.
693 I915_WRITE(intel_dp->output_reg, DP);
694 POSTING_READ(intel_dp->output_reg);
696 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
697 POSTING_READ(intel_dp->output_reg);
699 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
700 POSTING_READ(intel_dp->output_reg);
703 vlv_force_pll_off(dev_priv, pipe);
705 if (release_cl_override)
706 chv_phy_powergate_ch(dev_priv, phy, ch, false);
710 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
712 struct intel_encoder *encoder;
713 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
716 * We don't have power sequencer currently.
717 * Pick one that's not used by other ports.
719 for_each_intel_dp(&dev_priv->drm, encoder) {
720 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
722 if (encoder->type == INTEL_OUTPUT_EDP) {
723 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
724 intel_dp->active_pipe != intel_dp->pps_pipe);
726 if (intel_dp->pps_pipe != INVALID_PIPE)
727 pipes &= ~(1 << intel_dp->pps_pipe);
729 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
731 if (intel_dp->active_pipe != INVALID_PIPE)
732 pipes &= ~(1 << intel_dp->active_pipe);
739 return ffs(pipes) - 1;
743 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
745 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
746 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
749 lockdep_assert_held(&dev_priv->pps_mutex);
751 /* We should never land here with regular DP ports */
752 WARN_ON(!intel_dp_is_edp(intel_dp));
754 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
755 intel_dp->active_pipe != intel_dp->pps_pipe);
757 if (intel_dp->pps_pipe != INVALID_PIPE)
758 return intel_dp->pps_pipe;
760 pipe = vlv_find_free_pps(dev_priv);
763 * Didn't find one. This should not happen since there
764 * are two power sequencers and up to two eDP ports.
766 if (WARN_ON(pipe == INVALID_PIPE))
769 vlv_steal_power_sequencer(dev_priv, pipe);
770 intel_dp->pps_pipe = pipe;
772 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
773 pipe_name(intel_dp->pps_pipe),
774 port_name(intel_dig_port->base.port));
776 /* init power sequencer on this pipe and port */
777 intel_dp_init_panel_power_sequencer(intel_dp);
778 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
781 * Even vdd force doesn't work until we've made
782 * the power sequencer lock in on the port.
784 vlv_power_sequencer_kick(intel_dp);
786 return intel_dp->pps_pipe;
790 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
792 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
793 int backlight_controller = dev_priv->vbt.backlight.controller;
795 lockdep_assert_held(&dev_priv->pps_mutex);
797 /* We should never land here with regular DP ports */
798 WARN_ON(!intel_dp_is_edp(intel_dp));
800 if (!intel_dp->pps_reset)
801 return backlight_controller;
803 intel_dp->pps_reset = false;
806 * Only the HW needs to be reprogrammed, the SW state is fixed and
807 * has been setup during connector init.
809 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
811 return backlight_controller;
814 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
817 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
820 return I915_READ(PP_STATUS(pipe)) & PP_ON;
823 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
826 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
829 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
836 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
838 vlv_pipe_check pipe_check)
842 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
843 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
844 PANEL_PORT_SELECT_MASK;
846 if (port_sel != PANEL_PORT_SELECT_VLV(port))
849 if (!pipe_check(dev_priv, pipe))
859 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
861 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
862 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
863 enum port port = intel_dig_port->base.port;
865 lockdep_assert_held(&dev_priv->pps_mutex);
867 /* try to find a pipe with this port selected */
868 /* first pick one where the panel is on */
869 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
871 /* didn't find one? pick one where vdd is on */
872 if (intel_dp->pps_pipe == INVALID_PIPE)
873 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
874 vlv_pipe_has_vdd_on);
875 /* didn't find one? pick one with just the correct port */
876 if (intel_dp->pps_pipe == INVALID_PIPE)
877 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
880 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
881 if (intel_dp->pps_pipe == INVALID_PIPE) {
882 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
887 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
888 port_name(port), pipe_name(intel_dp->pps_pipe));
890 intel_dp_init_panel_power_sequencer(intel_dp);
891 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
894 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
896 struct intel_encoder *encoder;
898 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
899 !IS_GEN9_LP(dev_priv)))
903 * We can't grab pps_mutex here due to deadlock with power_domain
904 * mutex when power_domain functions are called while holding pps_mutex.
905 * That also means that in order to use pps_pipe the code needs to
906 * hold both a power domain reference and pps_mutex, and the power domain
907 * reference get/put must be done while _not_ holding pps_mutex.
908 * pps_{lock,unlock}() do these steps in the correct order, so one
909 * should use them always.
912 for_each_intel_dp(&dev_priv->drm, encoder) {
913 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
915 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
917 if (encoder->type != INTEL_OUTPUT_EDP)
920 if (IS_GEN9_LP(dev_priv))
921 intel_dp->pps_reset = true;
923 intel_dp->pps_pipe = INVALID_PIPE;
927 struct pps_registers {
935 static void intel_pps_get_registers(struct intel_dp *intel_dp,
936 struct pps_registers *regs)
938 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
941 memset(regs, 0, sizeof(*regs));
943 if (IS_GEN9_LP(dev_priv))
944 pps_idx = bxt_power_sequencer_idx(intel_dp);
945 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
946 pps_idx = vlv_power_sequencer_pipe(intel_dp);
948 regs->pp_ctrl = PP_CONTROL(pps_idx);
949 regs->pp_stat = PP_STATUS(pps_idx);
950 regs->pp_on = PP_ON_DELAYS(pps_idx);
951 regs->pp_off = PP_OFF_DELAYS(pps_idx);
953 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
954 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
955 regs->pp_div = INVALID_MMIO_REG;
957 regs->pp_div = PP_DIVISOR(pps_idx);
961 _pp_ctrl_reg(struct intel_dp *intel_dp)
963 struct pps_registers regs;
965 intel_pps_get_registers(intel_dp, ®s);
971 _pp_stat_reg(struct intel_dp *intel_dp)
973 struct pps_registers regs;
975 intel_pps_get_registers(intel_dp, ®s);
980 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
981 This function only applicable when panel PM state is not to be tracked */
982 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
985 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
987 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
988 intel_wakeref_t wakeref;
990 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
993 with_pps_lock(intel_dp, wakeref) {
994 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
995 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
996 i915_reg_t pp_ctrl_reg, pp_div_reg;
999 pp_ctrl_reg = PP_CONTROL(pipe);
1000 pp_div_reg = PP_DIVISOR(pipe);
1001 pp_div = I915_READ(pp_div_reg);
1002 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1004 /* 0x1F write to PP_DIV_REG sets max cycle delay */
1005 I915_WRITE(pp_div_reg, pp_div | 0x1F);
1006 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1007 msleep(intel_dp->panel_power_cycle_delay);
1014 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1016 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1018 lockdep_assert_held(&dev_priv->pps_mutex);
1020 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1021 intel_dp->pps_pipe == INVALID_PIPE)
1024 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1027 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1029 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1031 lockdep_assert_held(&dev_priv->pps_mutex);
1033 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1034 intel_dp->pps_pipe == INVALID_PIPE)
1037 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1041 intel_dp_check_edp(struct intel_dp *intel_dp)
1043 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1045 if (!intel_dp_is_edp(intel_dp))
1048 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1049 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1050 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1051 I915_READ(_pp_stat_reg(intel_dp)),
1052 I915_READ(_pp_ctrl_reg(intel_dp)));
1057 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1059 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1060 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1064 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1065 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
1066 msecs_to_jiffies_timeout(10));
1068 /* just trace the final value */
1069 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1072 DRM_ERROR("dp aux hw did not signal timeout!\n");
1078 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1080 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1086 * The clock divider is based off the hrawclk, and would like to run at
1087 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
1089 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1092 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1094 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1095 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1101 * The clock divider is based off the cdclk or PCH rawclk, and would
1102 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1103 * divide by 2000 and use that
1105 if (dig_port->aux_ch == AUX_CH_A)
1106 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1108 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1111 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1113 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1114 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1116 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1117 /* Workaround for non-ULT HSW */
1125 return ilk_get_aux_clock_divider(intel_dp, index);
1128 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1131 * SKL doesn't need us to program the AUX clock divider (Hardware will
1132 * derive the clock from CDCLK automatically). We still implement the
1133 * get_aux_clock_divider vfunc to plug-in into the existing code.
1135 return index ? 0 : 1;
1138 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1140 u32 aux_clock_divider)
1142 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1143 struct drm_i915_private *dev_priv =
1144 to_i915(intel_dig_port->base.base.dev);
1145 u32 precharge, timeout;
1147 if (IS_GEN(dev_priv, 6))
1152 if (IS_BROADWELL(dev_priv))
1153 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1155 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1157 return DP_AUX_CH_CTL_SEND_BUSY |
1158 DP_AUX_CH_CTL_DONE |
1159 DP_AUX_CH_CTL_INTERRUPT |
1160 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1162 DP_AUX_CH_CTL_RECEIVE_ERROR |
1163 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1164 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1165 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1168 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1172 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1175 ret = DP_AUX_CH_CTL_SEND_BUSY |
1176 DP_AUX_CH_CTL_DONE |
1177 DP_AUX_CH_CTL_INTERRUPT |
1178 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1179 DP_AUX_CH_CTL_TIME_OUT_MAX |
1180 DP_AUX_CH_CTL_RECEIVE_ERROR |
1181 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1182 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1183 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1185 if (intel_dig_port->tc_type == TC_PORT_TBT)
1186 ret |= DP_AUX_CH_CTL_TBT_IO;
1192 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1193 const u8 *send, int send_bytes,
1194 u8 *recv, int recv_size,
1195 u32 aux_send_ctl_flags)
1197 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1198 struct drm_i915_private *dev_priv =
1199 to_i915(intel_dig_port->base.base.dev);
1200 i915_reg_t ch_ctl, ch_data[5];
1201 u32 aux_clock_divider;
1202 intel_wakeref_t wakeref;
1203 int i, ret, recv_bytes;
1208 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1209 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1210 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1212 wakeref = pps_lock(intel_dp);
1215 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1216 * In such cases we want to leave VDD enabled and it's up to upper layers
1217 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1220 vdd = edp_panel_vdd_on(intel_dp);
1222 /* dp aux is extremely sensitive to irq latency, hence request the
1223 * lowest possible wakeup latency and so prevent the cpu from going into
1224 * deep sleep states.
1226 pm_qos_update_request(&dev_priv->pm_qos, 0);
1228 intel_dp_check_edp(intel_dp);
1230 /* Try to wait for any previous AUX channel activity */
1231 for (try = 0; try < 3; try++) {
1232 status = I915_READ_NOTRACE(ch_ctl);
1233 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1237 /* just trace the final value */
1238 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1241 static u32 last_status = -1;
1242 const u32 status = I915_READ(ch_ctl);
1244 if (status != last_status) {
1245 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1247 last_status = status;
1254 /* Only 5 data registers! */
1255 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1260 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1261 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1265 send_ctl |= aux_send_ctl_flags;
1267 /* Must try at least 3 times according to DP spec */
1268 for (try = 0; try < 5; try++) {
1269 /* Load the send data into the aux channel data registers */
1270 for (i = 0; i < send_bytes; i += 4)
1271 I915_WRITE(ch_data[i >> 2],
1272 intel_dp_pack_aux(send + i,
1275 /* Send the command and wait for it to complete */
1276 I915_WRITE(ch_ctl, send_ctl);
1278 status = intel_dp_aux_wait_done(intel_dp);
1280 /* Clear done status and any errors */
1283 DP_AUX_CH_CTL_DONE |
1284 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1285 DP_AUX_CH_CTL_RECEIVE_ERROR);
1287 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1288 * 400us delay required for errors and timeouts
1289 * Timeout errors from the HW already meet this
1290 * requirement so skip to next iteration
1292 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1295 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1296 usleep_range(400, 500);
1299 if (status & DP_AUX_CH_CTL_DONE)
1304 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1305 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1311 /* Check for timeout or receive error.
1312 * Timeouts occur when the sink is not connected
1314 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1315 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1320 /* Timeouts occur when the device isn't connected, so they're
1321 * "normal" -- don't fill the kernel log with these */
1322 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1323 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1328 /* Unload any bytes sent back from the other side */
1329 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1330 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1333 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1334 * We have no idea of what happened so we return -EBUSY so
1335 * drm layer takes care for the necessary retries.
1337 if (recv_bytes == 0 || recv_bytes > 20) {
1338 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1344 if (recv_bytes > recv_size)
1345 recv_bytes = recv_size;
1347 for (i = 0; i < recv_bytes; i += 4)
1348 intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
1349 recv + i, recv_bytes - i);
1353 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1356 edp_panel_vdd_off(intel_dp, false);
1358 pps_unlock(intel_dp, wakeref);
1363 #define BARE_ADDRESS_SIZE 3
1364 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1367 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1368 const struct drm_dp_aux_msg *msg)
1370 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1371 txbuf[1] = (msg->address >> 8) & 0xff;
1372 txbuf[2] = msg->address & 0xff;
1373 txbuf[3] = msg->size - 1;
1377 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1379 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1380 u8 txbuf[20], rxbuf[20];
1381 size_t txsize, rxsize;
1384 intel_dp_aux_header(txbuf, msg);
1386 switch (msg->request & ~DP_AUX_I2C_MOT) {
1387 case DP_AUX_NATIVE_WRITE:
1388 case DP_AUX_I2C_WRITE:
1389 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1390 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1391 rxsize = 2; /* 0 or 1 data bytes */
1393 if (WARN_ON(txsize > 20))
1396 WARN_ON(!msg->buffer != !msg->size);
1399 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1401 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1404 msg->reply = rxbuf[0] >> 4;
1407 /* Number of bytes written in a short write. */
1408 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1410 /* Return payload size. */
1416 case DP_AUX_NATIVE_READ:
1417 case DP_AUX_I2C_READ:
1418 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1419 rxsize = msg->size + 1;
1421 if (WARN_ON(rxsize > 20))
1424 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1427 msg->reply = rxbuf[0] >> 4;
1429 * Assume happy day, and copy the data. The caller is
1430 * expected to check msg->reply before touching it.
1432 * Return payload size.
1435 memcpy(msg->buffer, rxbuf + 1, ret);
1448 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1450 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1451 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1452 enum aux_ch aux_ch = dig_port->aux_ch;
1458 return DP_AUX_CH_CTL(aux_ch);
1460 MISSING_CASE(aux_ch);
1461 return DP_AUX_CH_CTL(AUX_CH_B);
1465 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1467 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1468 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1469 enum aux_ch aux_ch = dig_port->aux_ch;
1475 return DP_AUX_CH_DATA(aux_ch, index);
1477 MISSING_CASE(aux_ch);
1478 return DP_AUX_CH_DATA(AUX_CH_B, index);
1482 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1484 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1485 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1486 enum aux_ch aux_ch = dig_port->aux_ch;
1490 return DP_AUX_CH_CTL(aux_ch);
1494 return PCH_DP_AUX_CH_CTL(aux_ch);
1496 MISSING_CASE(aux_ch);
1497 return DP_AUX_CH_CTL(AUX_CH_A);
1501 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1503 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1504 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1505 enum aux_ch aux_ch = dig_port->aux_ch;
1509 return DP_AUX_CH_DATA(aux_ch, index);
1513 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1515 MISSING_CASE(aux_ch);
1516 return DP_AUX_CH_DATA(AUX_CH_A, index);
1520 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1522 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1523 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1524 enum aux_ch aux_ch = dig_port->aux_ch;
1533 return DP_AUX_CH_CTL(aux_ch);
1535 MISSING_CASE(aux_ch);
1536 return DP_AUX_CH_CTL(AUX_CH_A);
1540 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1542 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1543 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1544 enum aux_ch aux_ch = dig_port->aux_ch;
1553 return DP_AUX_CH_DATA(aux_ch, index);
1555 MISSING_CASE(aux_ch);
1556 return DP_AUX_CH_DATA(AUX_CH_A, index);
1561 intel_dp_aux_fini(struct intel_dp *intel_dp)
1563 kfree(intel_dp->aux.name);
1567 intel_dp_aux_init(struct intel_dp *intel_dp)
1569 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1570 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1571 struct intel_encoder *encoder = &dig_port->base;
1573 if (INTEL_GEN(dev_priv) >= 9) {
1574 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1575 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1576 } else if (HAS_PCH_SPLIT(dev_priv)) {
1577 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1578 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1580 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1581 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1584 if (INTEL_GEN(dev_priv) >= 9)
1585 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1586 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1587 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1588 else if (HAS_PCH_SPLIT(dev_priv))
1589 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1591 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1593 if (INTEL_GEN(dev_priv) >= 9)
1594 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1596 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1598 drm_dp_aux_init(&intel_dp->aux);
1600 /* Failure to allocate our preferred name is not critical */
1601 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1602 port_name(encoder->port));
1603 intel_dp->aux.transfer = intel_dp_aux_transfer;
1606 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1608 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1610 return max_rate >= 540000;
1613 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1615 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1617 return max_rate >= 810000;
1621 intel_dp_set_clock(struct intel_encoder *encoder,
1622 struct intel_crtc_state *pipe_config)
1624 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1625 const struct dp_link_dpll *divisor = NULL;
1628 if (IS_G4X(dev_priv)) {
1630 count = ARRAY_SIZE(g4x_dpll);
1631 } else if (HAS_PCH_SPLIT(dev_priv)) {
1633 count = ARRAY_SIZE(pch_dpll);
1634 } else if (IS_CHERRYVIEW(dev_priv)) {
1636 count = ARRAY_SIZE(chv_dpll);
1637 } else if (IS_VALLEYVIEW(dev_priv)) {
1639 count = ARRAY_SIZE(vlv_dpll);
1642 if (divisor && count) {
1643 for (i = 0; i < count; i++) {
1644 if (pipe_config->port_clock == divisor[i].clock) {
1645 pipe_config->dpll = divisor[i].dpll;
1646 pipe_config->clock_set = true;
1653 static void snprintf_int_array(char *str, size_t len,
1654 const int *array, int nelem)
1660 for (i = 0; i < nelem; i++) {
1661 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1669 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1671 char str[128]; /* FIXME: too big for stack? */
1673 if ((drm_debug & DRM_UT_KMS) == 0)
1676 snprintf_int_array(str, sizeof(str),
1677 intel_dp->source_rates, intel_dp->num_source_rates);
1678 DRM_DEBUG_KMS("source rates: %s\n", str);
1680 snprintf_int_array(str, sizeof(str),
1681 intel_dp->sink_rates, intel_dp->num_sink_rates);
1682 DRM_DEBUG_KMS("sink rates: %s\n", str);
1684 snprintf_int_array(str, sizeof(str),
1685 intel_dp->common_rates, intel_dp->num_common_rates);
1686 DRM_DEBUG_KMS("common rates: %s\n", str);
1690 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1694 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1695 if (WARN_ON(len <= 0))
1698 return intel_dp->common_rates[len - 1];
1701 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1703 int i = intel_dp_rate_index(intel_dp->sink_rates,
1704 intel_dp->num_sink_rates, rate);
1712 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1713 u8 *link_bw, u8 *rate_select)
1715 /* eDP 1.4 rate select method. */
1716 if (intel_dp->use_rate_select) {
1719 intel_dp_rate_select(intel_dp, port_clock);
1721 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1726 struct link_config_limits {
1727 int min_clock, max_clock;
1728 int min_lane_count, max_lane_count;
1729 int min_bpp, max_bpp;
1732 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1733 const struct intel_crtc_state *pipe_config)
1735 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1737 return INTEL_GEN(dev_priv) >= 11 &&
1738 pipe_config->cpu_transcoder != TRANSCODER_A;
1741 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1742 const struct intel_crtc_state *pipe_config)
1744 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1745 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1748 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1749 const struct intel_crtc_state *pipe_config)
1751 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1753 return INTEL_GEN(dev_priv) >= 10 &&
1754 pipe_config->cpu_transcoder != TRANSCODER_A;
1757 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1758 const struct intel_crtc_state *pipe_config)
1760 if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1763 return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1764 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1767 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1768 struct intel_crtc_state *pipe_config)
1770 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1771 struct intel_connector *intel_connector = intel_dp->attached_connector;
1774 bpp = pipe_config->pipe_bpp;
1775 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1778 bpp = min(bpp, 3*bpc);
1780 if (intel_dp_is_edp(intel_dp)) {
1781 /* Get bpp from vbt only for panels that dont have bpp in edid */
1782 if (intel_connector->base.display_info.bpc == 0 &&
1783 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1784 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1785 dev_priv->vbt.edp.bpp);
1786 bpp = dev_priv->vbt.edp.bpp;
1793 /* Adjust link config limits based on compliance test requests. */
1795 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1796 struct intel_crtc_state *pipe_config,
1797 struct link_config_limits *limits)
1799 /* For DP Compliance we override the computed bpp for the pipe */
1800 if (intel_dp->compliance.test_data.bpc != 0) {
1801 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1803 limits->min_bpp = limits->max_bpp = bpp;
1804 pipe_config->dither_force_disable = bpp == 6 * 3;
1806 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1809 /* Use values requested by Compliance Test Request */
1810 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1813 /* Validate the compliance test data since max values
1814 * might have changed due to link train fallback.
1816 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1817 intel_dp->compliance.test_lane_count)) {
1818 index = intel_dp_rate_index(intel_dp->common_rates,
1819 intel_dp->num_common_rates,
1820 intel_dp->compliance.test_link_rate);
1822 limits->min_clock = limits->max_clock = index;
1823 limits->min_lane_count = limits->max_lane_count =
1824 intel_dp->compliance.test_lane_count;
1829 /* Optimize link config in order: max bpp, min clock, min lanes */
1831 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1832 struct intel_crtc_state *pipe_config,
1833 const struct link_config_limits *limits)
1835 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1836 int bpp, clock, lane_count;
1837 int mode_rate, link_clock, link_avail;
1839 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1840 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1843 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1844 for (lane_count = limits->min_lane_count;
1845 lane_count <= limits->max_lane_count;
1847 link_clock = intel_dp->common_rates[clock];
1848 link_avail = intel_dp_max_data_rate(link_clock,
1851 if (mode_rate <= link_avail) {
1852 pipe_config->lane_count = lane_count;
1853 pipe_config->pipe_bpp = bpp;
1854 pipe_config->port_clock = link_clock;
1865 /* Optimize link config in order: max bpp, min lanes, min clock */
1867 intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
1868 struct intel_crtc_state *pipe_config,
1869 const struct link_config_limits *limits)
1871 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1872 int bpp, clock, lane_count;
1873 int mode_rate, link_clock, link_avail;
1875 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1876 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1879 for (lane_count = limits->min_lane_count;
1880 lane_count <= limits->max_lane_count;
1882 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1883 link_clock = intel_dp->common_rates[clock];
1884 link_avail = intel_dp_max_data_rate(link_clock,
1887 if (mode_rate <= link_avail) {
1888 pipe_config->lane_count = lane_count;
1889 pipe_config->pipe_bpp = bpp;
1890 pipe_config->port_clock = link_clock;
1901 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1904 u8 dsc_bpc[3] = {0};
1906 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1908 for (i = 0; i < num_bpc; i++) {
1909 if (dsc_max_bpc >= dsc_bpc[i])
1910 return dsc_bpc[i] * 3;
1916 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1917 struct intel_crtc_state *pipe_config,
1918 struct drm_connector_state *conn_state,
1919 struct link_config_limits *limits)
1921 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1922 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1923 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1928 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1931 dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
1932 conn_state->max_requested_bpc);
1934 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1935 if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
1936 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
1941 * For now enable DSC for max bpp, max link rate, max lane count.
1942 * Optimize this later for the minimum possible link rate/lane count
1943 * with DSC enabled for the requested mode.
1945 pipe_config->pipe_bpp = pipe_bpp;
1946 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1947 pipe_config->lane_count = limits->max_lane_count;
1949 if (intel_dp_is_edp(intel_dp)) {
1950 pipe_config->dsc_params.compressed_bpp =
1951 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1952 pipe_config->pipe_bpp);
1953 pipe_config->dsc_params.slice_count =
1954 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1957 u16 dsc_max_output_bpp;
1958 u8 dsc_dp_slice_count;
1960 dsc_max_output_bpp =
1961 intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1962 pipe_config->lane_count,
1963 adjusted_mode->crtc_clock,
1964 adjusted_mode->crtc_hdisplay);
1965 dsc_dp_slice_count =
1966 intel_dp_dsc_get_slice_count(intel_dp,
1967 adjusted_mode->crtc_clock,
1968 adjusted_mode->crtc_hdisplay);
1969 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1970 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
1973 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1974 dsc_max_output_bpp >> 4,
1975 pipe_config->pipe_bpp);
1976 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1979 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1980 * is greater than the maximum Cdclock and if slice count is even
1981 * then we need to use 2 VDSC instances.
1983 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1984 if (pipe_config->dsc_params.slice_count > 1) {
1985 pipe_config->dsc_params.dsc_split = true;
1987 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
1992 ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
1994 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
1995 "Compressed BPP = %d\n",
1996 pipe_config->pipe_bpp,
1997 pipe_config->dsc_params.compressed_bpp);
2001 pipe_config->dsc_params.compression_enable = true;
2002 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2003 "Compressed Bpp = %d Slice Count = %d\n",
2004 pipe_config->pipe_bpp,
2005 pipe_config->dsc_params.compressed_bpp,
2006 pipe_config->dsc_params.slice_count);
2012 intel_dp_compute_link_config(struct intel_encoder *encoder,
2013 struct intel_crtc_state *pipe_config,
2014 struct drm_connector_state *conn_state)
2016 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2017 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2018 struct link_config_limits limits;
2022 common_len = intel_dp_common_len_rate_limit(intel_dp,
2023 intel_dp->max_link_rate);
2025 /* No common link rates between source and sink */
2026 WARN_ON(common_len <= 0);
2028 limits.min_clock = 0;
2029 limits.max_clock = common_len - 1;
2031 limits.min_lane_count = 1;
2032 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2034 limits.min_bpp = 6 * 3;
2035 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2037 if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
2039 * Use the maximum clock and number of lanes the eDP panel
2040 * advertizes being capable of. The eDP 1.3 and earlier panels
2041 * are generally designed to support only a single clock and
2042 * lane configuration, and typically these values correspond to
2043 * the native resolution of the panel. With eDP 1.4 rate select
2044 * and DSC, this is decreasingly the case, and we need to be
2045 * able to select less than maximum link config.
2047 limits.min_lane_count = limits.max_lane_count;
2048 limits.min_clock = limits.max_clock;
2051 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2053 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2054 "max rate %d max bpp %d pixel clock %iKHz\n",
2055 limits.max_lane_count,
2056 intel_dp->common_rates[limits.max_clock],
2057 limits.max_bpp, adjusted_mode->crtc_clock);
2059 if (intel_dp_is_edp(intel_dp))
2061 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
2062 * section A.1: "It is recommended that the minimum number of
2063 * lanes be used, using the minimum link rate allowed for that
2064 * lane configuration."
2066 * Note that we use the max clock and lane count for eDP 1.3 and
2067 * earlier, and fast vs. wide is irrelevant.
2069 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
2072 /* Optimize for slow and wide. */
2073 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
2076 /* enable compression if the mode doesn't fit available BW */
2077 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2078 if (ret || intel_dp->force_dsc_en) {
2079 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2080 conn_state, &limits);
2085 if (pipe_config->dsc_params.compression_enable) {
2086 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2087 pipe_config->lane_count, pipe_config->port_clock,
2088 pipe_config->pipe_bpp,
2089 pipe_config->dsc_params.compressed_bpp);
2091 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2092 intel_dp_link_required(adjusted_mode->crtc_clock,
2093 pipe_config->dsc_params.compressed_bpp),
2094 intel_dp_max_data_rate(pipe_config->port_clock,
2095 pipe_config->lane_count));
2097 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2098 pipe_config->lane_count, pipe_config->port_clock,
2099 pipe_config->pipe_bpp);
2101 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2102 intel_dp_link_required(adjusted_mode->crtc_clock,
2103 pipe_config->pipe_bpp),
2104 intel_dp_max_data_rate(pipe_config->port_clock,
2105 pipe_config->lane_count));
2110 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2111 const struct drm_connector_state *conn_state)
2113 const struct intel_digital_connector_state *intel_conn_state =
2114 to_intel_digital_connector_state(conn_state);
2115 const struct drm_display_mode *adjusted_mode =
2116 &crtc_state->base.adjusted_mode;
2118 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2121 * CEA-861-E - 5.1 Default Encoding Parameters
2122 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2124 return crtc_state->pipe_bpp != 18 &&
2125 drm_default_rgb_quant_range(adjusted_mode) ==
2126 HDMI_QUANTIZATION_RANGE_LIMITED;
2128 return intel_conn_state->broadcast_rgb ==
2129 INTEL_BROADCAST_RGB_LIMITED;
2134 intel_dp_compute_config(struct intel_encoder *encoder,
2135 struct intel_crtc_state *pipe_config,
2136 struct drm_connector_state *conn_state)
2138 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2139 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2140 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2141 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2142 enum port port = encoder->port;
2143 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2144 struct intel_connector *intel_connector = intel_dp->attached_connector;
2145 struct intel_digital_connector_state *intel_conn_state =
2146 to_intel_digital_connector_state(conn_state);
2147 bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2148 DP_DPCD_QUIRK_CONSTANT_N);
2151 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2152 pipe_config->has_pch_encoder = true;
2154 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2156 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2158 pipe_config->has_drrs = false;
2159 if (IS_G4X(dev_priv) || port == PORT_A)
2160 pipe_config->has_audio = false;
2161 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2162 pipe_config->has_audio = intel_dp->has_audio;
2164 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2166 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2167 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2170 if (INTEL_GEN(dev_priv) >= 9) {
2171 ret = skl_update_scaler_crtc(pipe_config);
2176 if (HAS_GMCH(dev_priv))
2177 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2178 conn_state->scaling_mode);
2180 intel_pch_panel_fitting(intel_crtc, pipe_config,
2181 conn_state->scaling_mode);
2184 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2187 if (HAS_GMCH(dev_priv) &&
2188 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2191 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2194 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2195 intel_dp_supports_fec(intel_dp, pipe_config);
2197 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2201 pipe_config->limited_color_range =
2202 intel_dp_limited_color_range(pipe_config, conn_state);
2204 if (!pipe_config->dsc_params.compression_enable)
2205 intel_link_compute_m_n(pipe_config->pipe_bpp,
2206 pipe_config->lane_count,
2207 adjusted_mode->crtc_clock,
2208 pipe_config->port_clock,
2209 &pipe_config->dp_m_n,
2212 intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
2213 pipe_config->lane_count,
2214 adjusted_mode->crtc_clock,
2215 pipe_config->port_clock,
2216 &pipe_config->dp_m_n,
2219 if (intel_connector->panel.downclock_mode != NULL &&
2220 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2221 pipe_config->has_drrs = true;
2222 intel_link_compute_m_n(pipe_config->pipe_bpp,
2223 pipe_config->lane_count,
2224 intel_connector->panel.downclock_mode->clock,
2225 pipe_config->port_clock,
2226 &pipe_config->dp_m2_n2,
2230 if (!HAS_DDI(dev_priv))
2231 intel_dp_set_clock(encoder, pipe_config);
2233 intel_psr_compute_config(intel_dp, pipe_config);
2238 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2239 int link_rate, u8 lane_count,
2242 intel_dp->link_trained = false;
2243 intel_dp->link_rate = link_rate;
2244 intel_dp->lane_count = lane_count;
2245 intel_dp->link_mst = link_mst;
2248 static void intel_dp_prepare(struct intel_encoder *encoder,
2249 const struct intel_crtc_state *pipe_config)
2251 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2252 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2253 enum port port = encoder->port;
2254 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2255 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2257 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2258 pipe_config->lane_count,
2259 intel_crtc_has_type(pipe_config,
2260 INTEL_OUTPUT_DP_MST));
2263 * There are four kinds of DP registers:
2270 * IBX PCH and CPU are the same for almost everything,
2271 * except that the CPU DP PLL is configured in this
2274 * CPT PCH is quite different, having many bits moved
2275 * to the TRANS_DP_CTL register instead. That
2276 * configuration happens (oddly) in ironlake_pch_enable
2279 /* Preserve the BIOS-computed detected bit. This is
2280 * supposed to be read-only.
2282 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2284 /* Handle DP bits in common between all three register formats */
2285 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2286 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2288 /* Split out the IBX/CPU vs CPT settings */
2290 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2291 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2292 intel_dp->DP |= DP_SYNC_HS_HIGH;
2293 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2294 intel_dp->DP |= DP_SYNC_VS_HIGH;
2295 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2297 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2298 intel_dp->DP |= DP_ENHANCED_FRAMING;
2300 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2301 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2304 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2306 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2307 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2308 trans_dp |= TRANS_DP_ENH_FRAMING;
2310 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2311 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2313 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2314 intel_dp->DP |= DP_COLOR_RANGE_16_235;
2316 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2317 intel_dp->DP |= DP_SYNC_HS_HIGH;
2318 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2319 intel_dp->DP |= DP_SYNC_VS_HIGH;
2320 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2322 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2323 intel_dp->DP |= DP_ENHANCED_FRAMING;
2325 if (IS_CHERRYVIEW(dev_priv))
2326 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2328 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2332 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2333 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
2335 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2336 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
2338 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2339 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
2341 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2343 static void wait_panel_status(struct intel_dp *intel_dp,
2347 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2348 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2350 lockdep_assert_held(&dev_priv->pps_mutex);
2352 intel_pps_verify_state(intel_dp);
2354 pp_stat_reg = _pp_stat_reg(intel_dp);
2355 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2357 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2359 I915_READ(pp_stat_reg),
2360 I915_READ(pp_ctrl_reg));
2362 if (intel_wait_for_register(&dev_priv->uncore,
2363 pp_stat_reg, mask, value,
2365 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2366 I915_READ(pp_stat_reg),
2367 I915_READ(pp_ctrl_reg));
2369 DRM_DEBUG_KMS("Wait complete\n");
2372 static void wait_panel_on(struct intel_dp *intel_dp)
2374 DRM_DEBUG_KMS("Wait for panel power on\n");
2375 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2378 static void wait_panel_off(struct intel_dp *intel_dp)
2380 DRM_DEBUG_KMS("Wait for panel power off time\n");
2381 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2384 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2386 ktime_t panel_power_on_time;
2387 s64 panel_power_off_duration;
2389 DRM_DEBUG_KMS("Wait for panel power cycle\n");
2391 /* take the difference of currrent time and panel power off time
2392 * and then make panel wait for t11_t12 if needed. */
2393 panel_power_on_time = ktime_get_boottime();
2394 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2396 /* When we disable the VDD override bit last we have to do the manual
2398 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2399 wait_remaining_ms_from_jiffies(jiffies,
2400 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2402 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2405 static void wait_backlight_on(struct intel_dp *intel_dp)
2407 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2408 intel_dp->backlight_on_delay);
2411 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2413 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2414 intel_dp->backlight_off_delay);
2417 /* Read the current pp_control value, unlocking the register if it
2421 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2423 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2426 lockdep_assert_held(&dev_priv->pps_mutex);
2428 control = I915_READ(_pp_ctrl_reg(intel_dp));
2429 if (WARN_ON(!HAS_DDI(dev_priv) &&
2430 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2431 control &= ~PANEL_UNLOCK_MASK;
2432 control |= PANEL_UNLOCK_REGS;
2438 * Must be paired with edp_panel_vdd_off().
2439 * Must hold pps_mutex around the whole on/off sequence.
2440 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2442 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2444 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2445 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2447 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2448 bool need_to_disable = !intel_dp->want_panel_vdd;
2450 lockdep_assert_held(&dev_priv->pps_mutex);
2452 if (!intel_dp_is_edp(intel_dp))
2455 cancel_delayed_work(&intel_dp->panel_vdd_work);
2456 intel_dp->want_panel_vdd = true;
2458 if (edp_have_panel_vdd(intel_dp))
2459 return need_to_disable;
2461 intel_display_power_get(dev_priv,
2462 intel_aux_power_domain(intel_dig_port));
2464 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2465 port_name(intel_dig_port->base.port));
2467 if (!edp_have_panel_power(intel_dp))
2468 wait_panel_power_cycle(intel_dp);
2470 pp = ironlake_get_pp_control(intel_dp);
2471 pp |= EDP_FORCE_VDD;
2473 pp_stat_reg = _pp_stat_reg(intel_dp);
2474 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2476 I915_WRITE(pp_ctrl_reg, pp);
2477 POSTING_READ(pp_ctrl_reg);
2478 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2479 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2481 * If the panel wasn't on, delay before accessing aux channel
2483 if (!edp_have_panel_power(intel_dp)) {
2484 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2485 port_name(intel_dig_port->base.port));
2486 msleep(intel_dp->panel_power_up_delay);
2489 return need_to_disable;
2493 * Must be paired with intel_edp_panel_vdd_off() or
2494 * intel_edp_panel_off().
2495 * Nested calls to these functions are not allowed since
2496 * we drop the lock. Caller must use some higher level
2497 * locking to prevent nested calls from other threads.
2499 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2501 intel_wakeref_t wakeref;
2504 if (!intel_dp_is_edp(intel_dp))
2508 with_pps_lock(intel_dp, wakeref)
2509 vdd = edp_panel_vdd_on(intel_dp);
2510 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2511 port_name(dp_to_dig_port(intel_dp)->base.port));
2514 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2516 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2517 struct intel_digital_port *intel_dig_port =
2518 dp_to_dig_port(intel_dp);
2520 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2522 lockdep_assert_held(&dev_priv->pps_mutex);
2524 WARN_ON(intel_dp->want_panel_vdd);
2526 if (!edp_have_panel_vdd(intel_dp))
2529 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2530 port_name(intel_dig_port->base.port));
2532 pp = ironlake_get_pp_control(intel_dp);
2533 pp &= ~EDP_FORCE_VDD;
2535 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2536 pp_stat_reg = _pp_stat_reg(intel_dp);
2538 I915_WRITE(pp_ctrl_reg, pp);
2539 POSTING_READ(pp_ctrl_reg);
2541 /* Make sure sequencer is idle before allowing subsequent activity */
2542 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2543 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2545 if ((pp & PANEL_POWER_ON) == 0)
2546 intel_dp->panel_power_off_time = ktime_get_boottime();
2548 intel_display_power_put_unchecked(dev_priv,
2549 intel_aux_power_domain(intel_dig_port));
2552 static void edp_panel_vdd_work(struct work_struct *__work)
2554 struct intel_dp *intel_dp =
2555 container_of(to_delayed_work(__work),
2556 struct intel_dp, panel_vdd_work);
2557 intel_wakeref_t wakeref;
2559 with_pps_lock(intel_dp, wakeref) {
2560 if (!intel_dp->want_panel_vdd)
2561 edp_panel_vdd_off_sync(intel_dp);
2565 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2567 unsigned long delay;
2570 * Queue the timer to fire a long time from now (relative to the power
2571 * down delay) to keep the panel power up across a sequence of
2574 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2575 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2579 * Must be paired with edp_panel_vdd_on().
2580 * Must hold pps_mutex around the whole on/off sequence.
2581 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2583 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2585 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2587 lockdep_assert_held(&dev_priv->pps_mutex);
2589 if (!intel_dp_is_edp(intel_dp))
2592 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2593 port_name(dp_to_dig_port(intel_dp)->base.port));
2595 intel_dp->want_panel_vdd = false;
2598 edp_panel_vdd_off_sync(intel_dp);
2600 edp_panel_vdd_schedule_off(intel_dp);
2603 static void edp_panel_on(struct intel_dp *intel_dp)
2605 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2607 i915_reg_t pp_ctrl_reg;
2609 lockdep_assert_held(&dev_priv->pps_mutex);
2611 if (!intel_dp_is_edp(intel_dp))
2614 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2615 port_name(dp_to_dig_port(intel_dp)->base.port));
2617 if (WARN(edp_have_panel_power(intel_dp),
2618 "eDP port %c panel power already on\n",
2619 port_name(dp_to_dig_port(intel_dp)->base.port)))
2622 wait_panel_power_cycle(intel_dp);
2624 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2625 pp = ironlake_get_pp_control(intel_dp);
2626 if (IS_GEN(dev_priv, 5)) {
2627 /* ILK workaround: disable reset around power sequence */
2628 pp &= ~PANEL_POWER_RESET;
2629 I915_WRITE(pp_ctrl_reg, pp);
2630 POSTING_READ(pp_ctrl_reg);
2633 pp |= PANEL_POWER_ON;
2634 if (!IS_GEN(dev_priv, 5))
2635 pp |= PANEL_POWER_RESET;
2637 I915_WRITE(pp_ctrl_reg, pp);
2638 POSTING_READ(pp_ctrl_reg);
2640 wait_panel_on(intel_dp);
2641 intel_dp->last_power_on = jiffies;
2643 if (IS_GEN(dev_priv, 5)) {
2644 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2645 I915_WRITE(pp_ctrl_reg, pp);
2646 POSTING_READ(pp_ctrl_reg);
2650 void intel_edp_panel_on(struct intel_dp *intel_dp)
2652 intel_wakeref_t wakeref;
2654 if (!intel_dp_is_edp(intel_dp))
2657 with_pps_lock(intel_dp, wakeref)
2658 edp_panel_on(intel_dp);
2662 static void edp_panel_off(struct intel_dp *intel_dp)
2664 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2665 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2667 i915_reg_t pp_ctrl_reg;
2669 lockdep_assert_held(&dev_priv->pps_mutex);
2671 if (!intel_dp_is_edp(intel_dp))
2674 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2675 port_name(dig_port->base.port));
2677 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2678 port_name(dig_port->base.port));
2680 pp = ironlake_get_pp_control(intel_dp);
2681 /* We need to switch off panel power _and_ force vdd, for otherwise some
2682 * panels get very unhappy and cease to work. */
2683 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2686 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2688 intel_dp->want_panel_vdd = false;
2690 I915_WRITE(pp_ctrl_reg, pp);
2691 POSTING_READ(pp_ctrl_reg);
2693 wait_panel_off(intel_dp);
2694 intel_dp->panel_power_off_time = ktime_get_boottime();
2696 /* We got a reference when we enabled the VDD. */
2697 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2700 void intel_edp_panel_off(struct intel_dp *intel_dp)
2702 intel_wakeref_t wakeref;
2704 if (!intel_dp_is_edp(intel_dp))
2707 with_pps_lock(intel_dp, wakeref)
2708 edp_panel_off(intel_dp);
2711 /* Enable backlight in the panel power control. */
2712 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2714 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2715 intel_wakeref_t wakeref;
2718 * If we enable the backlight right away following a panel power
2719 * on, we may see slight flicker as the panel syncs with the eDP
2720 * link. So delay a bit to make sure the image is solid before
2721 * allowing it to appear.
2723 wait_backlight_on(intel_dp);
2725 with_pps_lock(intel_dp, wakeref) {
2726 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2729 pp = ironlake_get_pp_control(intel_dp);
2730 pp |= EDP_BLC_ENABLE;
2732 I915_WRITE(pp_ctrl_reg, pp);
2733 POSTING_READ(pp_ctrl_reg);
2737 /* Enable backlight PWM and backlight PP control. */
2738 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2739 const struct drm_connector_state *conn_state)
2741 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2743 if (!intel_dp_is_edp(intel_dp))
2746 DRM_DEBUG_KMS("\n");
2748 intel_panel_enable_backlight(crtc_state, conn_state);
2749 _intel_edp_backlight_on(intel_dp);
2752 /* Disable backlight in the panel power control. */
2753 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2755 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2756 intel_wakeref_t wakeref;
2758 if (!intel_dp_is_edp(intel_dp))
2761 with_pps_lock(intel_dp, wakeref) {
2762 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2765 pp = ironlake_get_pp_control(intel_dp);
2766 pp &= ~EDP_BLC_ENABLE;
2768 I915_WRITE(pp_ctrl_reg, pp);
2769 POSTING_READ(pp_ctrl_reg);
2772 intel_dp->last_backlight_off = jiffies;
2773 edp_wait_backlight_off(intel_dp);
2776 /* Disable backlight PP control and backlight PWM. */
2777 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2779 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2781 if (!intel_dp_is_edp(intel_dp))
2784 DRM_DEBUG_KMS("\n");
2786 _intel_edp_backlight_off(intel_dp);
2787 intel_panel_disable_backlight(old_conn_state);
2791 * Hook for controlling the panel power control backlight through the bl_power
2792 * sysfs attribute. Take care to handle multiple calls.
2794 static void intel_edp_backlight_power(struct intel_connector *connector,
2797 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2798 intel_wakeref_t wakeref;
2802 with_pps_lock(intel_dp, wakeref)
2803 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2804 if (is_enabled == enable)
2807 DRM_DEBUG_KMS("panel power control backlight %s\n",
2808 enable ? "enable" : "disable");
2811 _intel_edp_backlight_on(intel_dp);
2813 _intel_edp_backlight_off(intel_dp);
2816 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2818 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2819 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2820 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2822 I915_STATE_WARN(cur_state != state,
2823 "DP port %c state assertion failure (expected %s, current %s)\n",
2824 port_name(dig_port->base.port),
2825 onoff(state), onoff(cur_state));
2827 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2829 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2831 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2833 I915_STATE_WARN(cur_state != state,
2834 "eDP PLL state assertion failure (expected %s, current %s)\n",
2835 onoff(state), onoff(cur_state));
2837 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2838 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2840 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2841 const struct intel_crtc_state *pipe_config)
2843 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2844 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2846 assert_pipe_disabled(dev_priv, crtc->pipe);
2847 assert_dp_port_disabled(intel_dp);
2848 assert_edp_pll_disabled(dev_priv);
2850 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2851 pipe_config->port_clock);
2853 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2855 if (pipe_config->port_clock == 162000)
2856 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2858 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2860 I915_WRITE(DP_A, intel_dp->DP);
2865 * [DevILK] Work around required when enabling DP PLL
2866 * while a pipe is enabled going to FDI:
2867 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2868 * 2. Program DP PLL enable
2870 if (IS_GEN(dev_priv, 5))
2871 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2873 intel_dp->DP |= DP_PLL_ENABLE;
2875 I915_WRITE(DP_A, intel_dp->DP);
2880 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2881 const struct intel_crtc_state *old_crtc_state)
2883 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2884 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2886 assert_pipe_disabled(dev_priv, crtc->pipe);
2887 assert_dp_port_disabled(intel_dp);
2888 assert_edp_pll_enabled(dev_priv);
2890 DRM_DEBUG_KMS("disabling eDP PLL\n");
2892 intel_dp->DP &= ~DP_PLL_ENABLE;
2894 I915_WRITE(DP_A, intel_dp->DP);
2899 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2902 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2903 * be capable of signalling downstream hpd with a long pulse.
2904 * Whether or not that means D3 is safe to use is not clear,
2905 * but let's assume so until proven otherwise.
2907 * FIXME should really check all downstream ports...
2909 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2910 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2911 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2914 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2915 const struct intel_crtc_state *crtc_state,
2920 if (!crtc_state->dsc_params.compression_enable)
2923 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2924 enable ? DP_DECOMPRESSION_EN : 0);
2926 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2927 enable ? "enable" : "disable");
2930 /* If the sink supports it, try to set the power state appropriately */
2931 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2935 /* Should have a valid DPCD by this point */
2936 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2939 if (mode != DRM_MODE_DPMS_ON) {
2940 if (downstream_hpd_needs_d0(intel_dp))
2943 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2946 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2949 * When turning on, we need to retry for 1ms to give the sink
2952 for (i = 0; i < 3; i++) {
2953 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2960 if (ret == 1 && lspcon->active)
2961 lspcon_wait_pcon_mode(lspcon);
2965 DRM_DEBUG_KMS("failed to %s sink power state\n",
2966 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2969 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
2970 enum port port, enum pipe *pipe)
2974 for_each_pipe(dev_priv, p) {
2975 u32 val = I915_READ(TRANS_DP_CTL(p));
2977 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
2983 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
2985 /* must initialize pipe to something for the asserts */
2991 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
2992 i915_reg_t dp_reg, enum port port,
2998 val = I915_READ(dp_reg);
3000 ret = val & DP_PORT_EN;
3002 /* asserts want to know the pipe even if the port is disabled */
3003 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3004 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3005 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3006 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3007 else if (IS_CHERRYVIEW(dev_priv))
3008 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3010 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3015 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3018 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3019 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3020 intel_wakeref_t wakeref;
3023 wakeref = intel_display_power_get_if_enabled(dev_priv,
3024 encoder->power_domain);
3028 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3029 encoder->port, pipe);
3031 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3036 static void intel_dp_get_config(struct intel_encoder *encoder,
3037 struct intel_crtc_state *pipe_config)
3039 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3040 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3042 enum port port = encoder->port;
3043 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3045 if (encoder->type == INTEL_OUTPUT_EDP)
3046 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3048 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3050 tmp = I915_READ(intel_dp->output_reg);
3052 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3054 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3055 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3057 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3058 flags |= DRM_MODE_FLAG_PHSYNC;
3060 flags |= DRM_MODE_FLAG_NHSYNC;
3062 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3063 flags |= DRM_MODE_FLAG_PVSYNC;
3065 flags |= DRM_MODE_FLAG_NVSYNC;
3067 if (tmp & DP_SYNC_HS_HIGH)
3068 flags |= DRM_MODE_FLAG_PHSYNC;
3070 flags |= DRM_MODE_FLAG_NHSYNC;
3072 if (tmp & DP_SYNC_VS_HIGH)
3073 flags |= DRM_MODE_FLAG_PVSYNC;
3075 flags |= DRM_MODE_FLAG_NVSYNC;
3078 pipe_config->base.adjusted_mode.flags |= flags;
3080 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3081 pipe_config->limited_color_range = true;
3083 pipe_config->lane_count =
3084 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3086 intel_dp_get_m_n(crtc, pipe_config);
3088 if (port == PORT_A) {
3089 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3090 pipe_config->port_clock = 162000;
3092 pipe_config->port_clock = 270000;
3095 pipe_config->base.adjusted_mode.crtc_clock =
3096 intel_dotclock_calculate(pipe_config->port_clock,
3097 &pipe_config->dp_m_n);
3099 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3100 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3102 * This is a big fat ugly hack.
3104 * Some machines in UEFI boot mode provide us a VBT that has 18
3105 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3106 * unknown we fail to light up. Yet the same BIOS boots up with
3107 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3108 * max, not what it tells us to use.
3110 * Note: This will still be broken if the eDP panel is not lit
3111 * up by the BIOS, and thus we can't get the mode at module
3114 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3115 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3116 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3120 static void intel_disable_dp(struct intel_encoder *encoder,
3121 const struct intel_crtc_state *old_crtc_state,
3122 const struct drm_connector_state *old_conn_state)
3124 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3126 intel_dp->link_trained = false;
3128 if (old_crtc_state->has_audio)
3129 intel_audio_codec_disable(encoder,
3130 old_crtc_state, old_conn_state);
3132 /* Make sure the panel is off before trying to change the mode. But also
3133 * ensure that we have vdd while we switch off the panel. */
3134 intel_edp_panel_vdd_on(intel_dp);
3135 intel_edp_backlight_off(old_conn_state);
3136 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3137 intel_edp_panel_off(intel_dp);
3140 static void g4x_disable_dp(struct intel_encoder *encoder,
3141 const struct intel_crtc_state *old_crtc_state,
3142 const struct drm_connector_state *old_conn_state)
3144 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3147 static void vlv_disable_dp(struct intel_encoder *encoder,
3148 const struct intel_crtc_state *old_crtc_state,
3149 const struct drm_connector_state *old_conn_state)
3151 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3154 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3155 const struct intel_crtc_state *old_crtc_state,
3156 const struct drm_connector_state *old_conn_state)
3158 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3159 enum port port = encoder->port;
3162 * Bspec does not list a specific disable sequence for g4x DP.
3163 * Follow the ilk+ sequence (disable pipe before the port) for
3164 * g4x DP as it does not suffer from underruns like the normal
3165 * g4x modeset sequence (disable pipe after the port).
3167 intel_dp_link_down(encoder, old_crtc_state);
3169 /* Only ilk+ has port A */
3171 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3174 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3175 const struct intel_crtc_state *old_crtc_state,
3176 const struct drm_connector_state *old_conn_state)
3178 intel_dp_link_down(encoder, old_crtc_state);
3181 static void chv_post_disable_dp(struct intel_encoder *encoder,
3182 const struct intel_crtc_state *old_crtc_state,
3183 const struct drm_connector_state *old_conn_state)
3185 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3187 intel_dp_link_down(encoder, old_crtc_state);
3189 mutex_lock(&dev_priv->sb_lock);
3191 /* Assert data lane reset */
3192 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3194 mutex_unlock(&dev_priv->sb_lock);
3198 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3202 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3203 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3204 enum port port = intel_dig_port->base.port;
3205 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3207 if (dp_train_pat & train_pat_mask)
3208 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3209 dp_train_pat & train_pat_mask);
3211 if (HAS_DDI(dev_priv)) {
3212 u32 temp = I915_READ(DP_TP_CTL(port));
3214 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3215 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3217 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3219 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3220 switch (dp_train_pat & train_pat_mask) {
3221 case DP_TRAINING_PATTERN_DISABLE:
3222 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3225 case DP_TRAINING_PATTERN_1:
3226 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3228 case DP_TRAINING_PATTERN_2:
3229 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3231 case DP_TRAINING_PATTERN_3:
3232 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3234 case DP_TRAINING_PATTERN_4:
3235 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3238 I915_WRITE(DP_TP_CTL(port), temp);
3240 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3241 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3242 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3244 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3245 case DP_TRAINING_PATTERN_DISABLE:
3246 *DP |= DP_LINK_TRAIN_OFF_CPT;
3248 case DP_TRAINING_PATTERN_1:
3249 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3251 case DP_TRAINING_PATTERN_2:
3252 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3254 case DP_TRAINING_PATTERN_3:
3255 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3256 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3261 *DP &= ~DP_LINK_TRAIN_MASK;
3263 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3264 case DP_TRAINING_PATTERN_DISABLE:
3265 *DP |= DP_LINK_TRAIN_OFF;
3267 case DP_TRAINING_PATTERN_1:
3268 *DP |= DP_LINK_TRAIN_PAT_1;
3270 case DP_TRAINING_PATTERN_2:
3271 *DP |= DP_LINK_TRAIN_PAT_2;
3273 case DP_TRAINING_PATTERN_3:
3274 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3275 *DP |= DP_LINK_TRAIN_PAT_2;
3281 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3282 const struct intel_crtc_state *old_crtc_state)
3284 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3286 /* enable with pattern 1 (as per spec) */
3288 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3291 * Magic for VLV/CHV. We _must_ first set up the register
3292 * without actually enabling the port, and then do another
3293 * write to enable the port. Otherwise link training will
3294 * fail when the power sequencer is freshly used for this port.
3296 intel_dp->DP |= DP_PORT_EN;
3297 if (old_crtc_state->has_audio)
3298 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3300 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3301 POSTING_READ(intel_dp->output_reg);
3304 static void intel_enable_dp(struct intel_encoder *encoder,
3305 const struct intel_crtc_state *pipe_config,
3306 const struct drm_connector_state *conn_state)
3308 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3309 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3310 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3311 u32 dp_reg = I915_READ(intel_dp->output_reg);
3312 enum pipe pipe = crtc->pipe;
3313 intel_wakeref_t wakeref;
3315 if (WARN_ON(dp_reg & DP_PORT_EN))
3318 with_pps_lock(intel_dp, wakeref) {
3319 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3320 vlv_init_panel_power_sequencer(encoder, pipe_config);
3322 intel_dp_enable_port(intel_dp, pipe_config);
3324 edp_panel_vdd_on(intel_dp);
3325 edp_panel_on(intel_dp);
3326 edp_panel_vdd_off(intel_dp, true);
3329 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3330 unsigned int lane_mask = 0x0;
3332 if (IS_CHERRYVIEW(dev_priv))
3333 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3335 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3339 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3340 intel_dp_start_link_train(intel_dp);
3341 intel_dp_stop_link_train(intel_dp);
3343 if (pipe_config->has_audio) {
3344 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3346 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3350 static void g4x_enable_dp(struct intel_encoder *encoder,
3351 const struct intel_crtc_state *pipe_config,
3352 const struct drm_connector_state *conn_state)
3354 intel_enable_dp(encoder, pipe_config, conn_state);
3355 intel_edp_backlight_on(pipe_config, conn_state);
3358 static void vlv_enable_dp(struct intel_encoder *encoder,
3359 const struct intel_crtc_state *pipe_config,
3360 const struct drm_connector_state *conn_state)
3362 intel_edp_backlight_on(pipe_config, conn_state);
3365 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3366 const struct intel_crtc_state *pipe_config,
3367 const struct drm_connector_state *conn_state)
3369 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3370 enum port port = encoder->port;
3372 intel_dp_prepare(encoder, pipe_config);
3374 /* Only ilk+ has port A */
3376 ironlake_edp_pll_on(intel_dp, pipe_config);
3379 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3381 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3382 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3383 enum pipe pipe = intel_dp->pps_pipe;
3384 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3386 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3388 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3391 edp_panel_vdd_off_sync(intel_dp);
3394 * VLV seems to get confused when multiple power sequencers
3395 * have the same port selected (even if only one has power/vdd
3396 * enabled). The failure manifests as vlv_wait_port_ready() failing
3397 * CHV on the other hand doesn't seem to mind having the same port
3398 * selected in multiple power sequencers, but let's clear the
3399 * port select always when logically disconnecting a power sequencer
3402 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3403 pipe_name(pipe), port_name(intel_dig_port->base.port));
3404 I915_WRITE(pp_on_reg, 0);
3405 POSTING_READ(pp_on_reg);
3407 intel_dp->pps_pipe = INVALID_PIPE;
3410 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3413 struct intel_encoder *encoder;
3415 lockdep_assert_held(&dev_priv->pps_mutex);
3417 for_each_intel_dp(&dev_priv->drm, encoder) {
3418 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3419 enum port port = encoder->port;
3421 WARN(intel_dp->active_pipe == pipe,
3422 "stealing pipe %c power sequencer from active (e)DP port %c\n",
3423 pipe_name(pipe), port_name(port));
3425 if (intel_dp->pps_pipe != pipe)
3428 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3429 pipe_name(pipe), port_name(port));
3431 /* make sure vdd is off before we steal it */
3432 vlv_detach_power_sequencer(intel_dp);
3436 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3437 const struct intel_crtc_state *crtc_state)
3439 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3440 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3441 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3443 lockdep_assert_held(&dev_priv->pps_mutex);
3445 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3447 if (intel_dp->pps_pipe != INVALID_PIPE &&
3448 intel_dp->pps_pipe != crtc->pipe) {
3450 * If another power sequencer was being used on this
3451 * port previously make sure to turn off vdd there while
3452 * we still have control of it.
3454 vlv_detach_power_sequencer(intel_dp);
3458 * We may be stealing the power
3459 * sequencer from another port.
3461 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3463 intel_dp->active_pipe = crtc->pipe;
3465 if (!intel_dp_is_edp(intel_dp))
3468 /* now it's all ours */
3469 intel_dp->pps_pipe = crtc->pipe;
3471 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3472 pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3474 /* init power sequencer on this pipe and port */
3475 intel_dp_init_panel_power_sequencer(intel_dp);
3476 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3479 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3480 const struct intel_crtc_state *pipe_config,
3481 const struct drm_connector_state *conn_state)
3483 vlv_phy_pre_encoder_enable(encoder, pipe_config);
3485 intel_enable_dp(encoder, pipe_config, conn_state);
3488 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3489 const struct intel_crtc_state *pipe_config,
3490 const struct drm_connector_state *conn_state)
3492 intel_dp_prepare(encoder, pipe_config);
3494 vlv_phy_pre_pll_enable(encoder, pipe_config);
3497 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3498 const struct intel_crtc_state *pipe_config,
3499 const struct drm_connector_state *conn_state)
3501 chv_phy_pre_encoder_enable(encoder, pipe_config);
3503 intel_enable_dp(encoder, pipe_config, conn_state);
3505 /* Second common lane will stay alive on its own now */
3506 chv_phy_release_cl2_override(encoder);
3509 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3510 const struct intel_crtc_state *pipe_config,
3511 const struct drm_connector_state *conn_state)
3513 intel_dp_prepare(encoder, pipe_config);
3515 chv_phy_pre_pll_enable(encoder, pipe_config);
3518 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3519 const struct intel_crtc_state *old_crtc_state,
3520 const struct drm_connector_state *old_conn_state)
3522 chv_phy_post_pll_disable(encoder, old_crtc_state);
3526 * Fetch AUX CH registers 0x202 - 0x207 which contain
3527 * link status information
3530 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3532 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3533 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3536 /* These are source-specific values. */
3538 intel_dp_voltage_max(struct intel_dp *intel_dp)
3540 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3541 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3542 enum port port = encoder->port;
3544 if (HAS_DDI(dev_priv))
3545 return intel_ddi_dp_voltage_max(encoder);
3546 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3547 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3548 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3549 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3550 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3551 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3553 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3557 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3559 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3560 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3561 enum port port = encoder->port;
3563 if (HAS_DDI(dev_priv)) {
3564 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3565 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3566 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3567 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3568 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3569 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3570 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3571 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3572 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3573 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3575 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3577 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3578 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3579 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3580 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3581 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3582 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3583 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3585 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3588 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3589 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3590 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3591 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3592 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3593 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3594 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3595 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3597 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3602 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3604 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3605 unsigned long demph_reg_value, preemph_reg_value,
3606 uniqtranscale_reg_value;
3607 u8 train_set = intel_dp->train_set[0];
3609 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3610 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3611 preemph_reg_value = 0x0004000;
3612 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3613 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3614 demph_reg_value = 0x2B405555;
3615 uniqtranscale_reg_value = 0x552AB83A;
3617 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3618 demph_reg_value = 0x2B404040;
3619 uniqtranscale_reg_value = 0x5548B83A;
3621 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3622 demph_reg_value = 0x2B245555;
3623 uniqtranscale_reg_value = 0x5560B83A;
3625 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3626 demph_reg_value = 0x2B405555;
3627 uniqtranscale_reg_value = 0x5598DA3A;
3633 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3634 preemph_reg_value = 0x0002000;
3635 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3636 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3637 demph_reg_value = 0x2B404040;
3638 uniqtranscale_reg_value = 0x5552B83A;
3640 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3641 demph_reg_value = 0x2B404848;
3642 uniqtranscale_reg_value = 0x5580B83A;
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3645 demph_reg_value = 0x2B404040;
3646 uniqtranscale_reg_value = 0x55ADDA3A;
3652 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3653 preemph_reg_value = 0x0000000;
3654 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3655 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3656 demph_reg_value = 0x2B305555;
3657 uniqtranscale_reg_value = 0x5570B83A;
3659 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3660 demph_reg_value = 0x2B2B4040;
3661 uniqtranscale_reg_value = 0x55ADDA3A;
3667 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3668 preemph_reg_value = 0x0006000;
3669 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3670 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3671 demph_reg_value = 0x1B405555;
3672 uniqtranscale_reg_value = 0x55ADDA3A;
3682 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3683 uniqtranscale_reg_value, 0);
3688 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3690 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3691 u32 deemph_reg_value, margin_reg_value;
3692 bool uniq_trans_scale = false;
3693 u8 train_set = intel_dp->train_set[0];
3695 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3696 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3697 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3698 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3699 deemph_reg_value = 128;
3700 margin_reg_value = 52;
3702 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3703 deemph_reg_value = 128;
3704 margin_reg_value = 77;
3706 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3707 deemph_reg_value = 128;
3708 margin_reg_value = 102;
3710 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3711 deemph_reg_value = 128;
3712 margin_reg_value = 154;
3713 uniq_trans_scale = true;
3719 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3720 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3721 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3722 deemph_reg_value = 85;
3723 margin_reg_value = 78;
3725 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3726 deemph_reg_value = 85;
3727 margin_reg_value = 116;
3729 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3730 deemph_reg_value = 85;
3731 margin_reg_value = 154;
3737 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3738 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3739 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3740 deemph_reg_value = 64;
3741 margin_reg_value = 104;
3743 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3744 deemph_reg_value = 64;
3745 margin_reg_value = 154;
3751 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3752 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3753 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3754 deemph_reg_value = 43;
3755 margin_reg_value = 154;
3765 chv_set_phy_signal_level(encoder, deemph_reg_value,
3766 margin_reg_value, uniq_trans_scale);
3772 g4x_signal_levels(u8 train_set)
3774 u32 signal_levels = 0;
3776 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3777 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3779 signal_levels |= DP_VOLTAGE_0_4;
3781 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3782 signal_levels |= DP_VOLTAGE_0_6;
3784 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3785 signal_levels |= DP_VOLTAGE_0_8;
3787 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3788 signal_levels |= DP_VOLTAGE_1_2;
3791 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3792 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3794 signal_levels |= DP_PRE_EMPHASIS_0;
3796 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3797 signal_levels |= DP_PRE_EMPHASIS_3_5;
3799 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3800 signal_levels |= DP_PRE_EMPHASIS_6;
3802 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3803 signal_levels |= DP_PRE_EMPHASIS_9_5;
3806 return signal_levels;
3809 /* SNB CPU eDP voltage swing and pre-emphasis control */
3811 snb_cpu_edp_signal_levels(u8 train_set)
3813 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3814 DP_TRAIN_PRE_EMPHASIS_MASK);
3815 switch (signal_levels) {
3816 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3817 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3818 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3819 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3820 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3821 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3822 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3823 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3824 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3825 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3826 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3827 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3828 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3829 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3831 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3832 "0x%x\n", signal_levels);
3833 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3837 /* IVB CPU eDP voltage swing and pre-emphasis control */
3839 ivb_cpu_edp_signal_levels(u8 train_set)
3841 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3842 DP_TRAIN_PRE_EMPHASIS_MASK);
3843 switch (signal_levels) {
3844 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3845 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3846 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3847 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3848 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3849 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3851 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3852 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3853 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3854 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3856 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3857 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3859 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3862 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3863 "0x%x\n", signal_levels);
3864 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3869 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3871 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3872 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3873 enum port port = intel_dig_port->base.port;
3874 u32 signal_levels, mask = 0;
3875 u8 train_set = intel_dp->train_set[0];
3877 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3878 signal_levels = bxt_signal_levels(intel_dp);
3879 } else if (HAS_DDI(dev_priv)) {
3880 signal_levels = ddi_signal_levels(intel_dp);
3881 mask = DDI_BUF_EMP_MASK;
3882 } else if (IS_CHERRYVIEW(dev_priv)) {
3883 signal_levels = chv_signal_levels(intel_dp);
3884 } else if (IS_VALLEYVIEW(dev_priv)) {
3885 signal_levels = vlv_signal_levels(intel_dp);
3886 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3887 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3888 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3889 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3890 signal_levels = snb_cpu_edp_signal_levels(train_set);
3891 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3893 signal_levels = g4x_signal_levels(train_set);
3894 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3898 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3900 DRM_DEBUG_KMS("Using vswing level %d\n",
3901 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3902 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3903 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3904 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3906 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3908 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3909 POSTING_READ(intel_dp->output_reg);
3913 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3916 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3917 struct drm_i915_private *dev_priv =
3918 to_i915(intel_dig_port->base.base.dev);
3920 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3922 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3923 POSTING_READ(intel_dp->output_reg);
3926 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3928 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3929 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3930 enum port port = intel_dig_port->base.port;
3933 if (!HAS_DDI(dev_priv))
3936 val = I915_READ(DP_TP_CTL(port));
3937 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3938 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3939 I915_WRITE(DP_TP_CTL(port), val);
3942 * On PORT_A we can have only eDP in SST mode. There the only reason
3943 * we need to set idle transmission mode is to work around a HW issue
3944 * where we enable the pipe while not in idle link-training mode.
3945 * In this case there is requirement to wait for a minimum number of
3946 * idle patterns to be sent.
3951 if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
3952 DP_TP_STATUS_IDLE_DONE,
3953 DP_TP_STATUS_IDLE_DONE,
3955 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3959 intel_dp_link_down(struct intel_encoder *encoder,
3960 const struct intel_crtc_state *old_crtc_state)
3962 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3963 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3964 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3965 enum port port = encoder->port;
3966 u32 DP = intel_dp->DP;
3968 if (WARN_ON(HAS_DDI(dev_priv)))
3971 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3974 DRM_DEBUG_KMS("\n");
3976 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3977 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3978 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3979 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3981 DP &= ~DP_LINK_TRAIN_MASK;
3982 DP |= DP_LINK_TRAIN_PAT_IDLE;
3984 I915_WRITE(intel_dp->output_reg, DP);
3985 POSTING_READ(intel_dp->output_reg);
3987 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3988 I915_WRITE(intel_dp->output_reg, DP);
3989 POSTING_READ(intel_dp->output_reg);
3992 * HW workaround for IBX, we need to move the port
3993 * to transcoder A after disabling it to allow the
3994 * matching HDMI port to be enabled on transcoder A.
3996 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3998 * We get CPU/PCH FIFO underruns on the other pipe when
3999 * doing the workaround. Sweep them under the rug.
4001 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4002 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4004 /* always enable with pattern 1 (as per spec) */
4005 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4006 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4007 DP_LINK_TRAIN_PAT_1;
4008 I915_WRITE(intel_dp->output_reg, DP);
4009 POSTING_READ(intel_dp->output_reg);
4012 I915_WRITE(intel_dp->output_reg, DP);
4013 POSTING_READ(intel_dp->output_reg);
4015 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4016 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4017 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4020 msleep(intel_dp->panel_power_down_delay);
4024 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4025 intel_wakeref_t wakeref;
4027 with_pps_lock(intel_dp, wakeref)
4028 intel_dp->active_pipe = INVALID_PIPE;
4033 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4038 * Prior to DP1.3 the bit represented by
4039 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4040 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4041 * the true capability of the panel. The only way to check is to
4042 * then compare 0000h and 2200h.
4044 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4045 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4048 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4049 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4050 DRM_ERROR("DPCD failed read at extended capabilities\n");
4054 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4055 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4059 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4062 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4063 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4065 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4069 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4071 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4072 sizeof(intel_dp->dpcd)) < 0)
4073 return false; /* aux transfer failed */
4075 intel_dp_extended_receiver_capabilities(intel_dp);
4077 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4079 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4082 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4085 * Clear the cached register set to avoid using stale values
4086 * for the sinks that do not support DSC.
4088 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4090 /* Clear fec_capable to avoid using stale values */
4091 intel_dp->fec_capable = 0;
4093 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4094 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4095 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4096 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4098 sizeof(intel_dp->dsc_dpcd)) < 0)
4099 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4102 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4103 (int)sizeof(intel_dp->dsc_dpcd),
4104 intel_dp->dsc_dpcd);
4106 /* FEC is supported only on DP 1.4 */
4107 if (!intel_dp_is_edp(intel_dp) &&
4108 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4109 &intel_dp->fec_capable) < 0)
4110 DRM_ERROR("Failed to read FEC DPCD register\n");
4112 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4117 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4119 struct drm_i915_private *dev_priv =
4120 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4122 /* this function is meant to be called only once */
4123 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4125 if (!intel_dp_read_dpcd(intel_dp))
4128 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4129 drm_dp_is_branch(intel_dp->dpcd));
4131 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4132 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4133 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4136 * Read the eDP display control registers.
4138 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4139 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4140 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4141 * method). The display control registers should read zero if they're
4142 * not supported anyway.
4144 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4145 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4146 sizeof(intel_dp->edp_dpcd))
4147 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4148 intel_dp->edp_dpcd);
4151 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4152 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4154 intel_psr_init_dpcd(intel_dp);
4156 /* Read the eDP 1.4+ supported link rates. */
4157 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4158 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4161 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4162 sink_rates, sizeof(sink_rates));
4164 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4165 int val = le16_to_cpu(sink_rates[i]);
4170 /* Value read multiplied by 200kHz gives the per-lane
4171 * link rate in kHz. The source rates are, however,
4172 * stored in terms of LS_Clk kHz. The full conversion
4173 * back to symbols is
4174 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4176 intel_dp->sink_rates[i] = (val * 200) / 10;
4178 intel_dp->num_sink_rates = i;
4182 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4183 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4185 if (intel_dp->num_sink_rates)
4186 intel_dp->use_rate_select = true;
4188 intel_dp_set_sink_rates(intel_dp);
4190 intel_dp_set_common_rates(intel_dp);
4192 /* Read the eDP DSC DPCD registers */
4193 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4194 intel_dp_get_dsc_sink_cap(intel_dp);
4201 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4203 if (!intel_dp_read_dpcd(intel_dp))
4206 /* Don't clobber cached eDP rates. */
4207 if (!intel_dp_is_edp(intel_dp)) {
4208 intel_dp_set_sink_rates(intel_dp);
4209 intel_dp_set_common_rates(intel_dp);
4213 * Some eDP panels do not set a valid value for sink count, that is why
4214 * it don't care about read it here and in intel_edp_init_dpcd().
4216 if (!intel_dp_is_edp(intel_dp)) {
4220 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4225 * Sink count can change between short pulse hpd hence
4226 * a member variable in intel_dp will track any changes
4227 * between short pulse interrupts.
4229 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4232 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4233 * a dongle is present but no display. Unless we require to know
4234 * if a dongle is present or not, we don't need to update
4235 * downstream port information. So, an early return here saves
4236 * time from performing other operations which are not required.
4238 if (!intel_dp->sink_count)
4242 if (!drm_dp_is_branch(intel_dp->dpcd))
4243 return true; /* native DP sink */
4245 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4246 return true; /* no per-port downstream info */
4248 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4249 intel_dp->downstream_ports,
4250 DP_MAX_DOWNSTREAM_PORTS) < 0)
4251 return false; /* downstream port status fetch failed */
4257 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4261 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4264 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4267 return mstm_cap & DP_MST_CAP;
4271 intel_dp_can_mst(struct intel_dp *intel_dp)
4273 return i915_modparams.enable_dp_mst &&
4274 intel_dp->can_mst &&
4275 intel_dp_sink_can_mst(intel_dp);
4279 intel_dp_configure_mst(struct intel_dp *intel_dp)
4281 struct intel_encoder *encoder =
4282 &dp_to_dig_port(intel_dp)->base;
4283 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4285 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4286 port_name(encoder->port), yesno(intel_dp->can_mst),
4287 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4289 if (!intel_dp->can_mst)
4292 intel_dp->is_mst = sink_can_mst &&
4293 i915_modparams.enable_dp_mst;
4295 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4300 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4302 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4303 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4307 u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
4308 int mode_clock, int mode_hdisplay)
4310 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4314 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4315 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4316 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4317 * for MST -> TimeSlotsPerMTP has to be calculated
4319 bits_per_pixel = (link_clock * lane_count * 8 *
4320 DP_DSC_FEC_OVERHEAD_FACTOR) /
4323 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4324 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4328 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4329 * check, output bpp from small joiner RAM check)
4331 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4333 /* Error out if the max bpp is less than smallest allowed valid bpp */
4334 if (bits_per_pixel < valid_dsc_bpp[0]) {
4335 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4339 /* Find the nearest match in the array of known BPPs from VESA */
4340 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4341 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4344 bits_per_pixel = valid_dsc_bpp[i];
4347 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4348 * fractional part is 0
4350 return bits_per_pixel << 4;
4353 u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4357 u8 min_slice_count, i;
4358 int max_slice_width;
4360 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4361 min_slice_count = DIV_ROUND_UP(mode_clock,
4362 DP_DSC_MAX_ENC_THROUGHPUT_0);
4364 min_slice_count = DIV_ROUND_UP(mode_clock,
4365 DP_DSC_MAX_ENC_THROUGHPUT_1);
4367 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4368 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4369 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4373 /* Also take into account max slice width */
4374 min_slice_count = min_t(u8, min_slice_count,
4375 DIV_ROUND_UP(mode_hdisplay,
4378 /* Find the closest match to the valid slice count values */
4379 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4380 if (valid_dsc_slicecount[i] >
4381 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4384 if (min_slice_count <= valid_dsc_slicecount[i])
4385 return valid_dsc_slicecount[i];
4388 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4392 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4396 u8 test_lane_count, test_link_bw;
4400 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4401 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4405 DRM_DEBUG_KMS("Lane count read failed\n");
4408 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4410 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4413 DRM_DEBUG_KMS("Link Rate read failed\n");
4416 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4418 /* Validate the requested link rate and lane count */
4419 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4423 intel_dp->compliance.test_lane_count = test_lane_count;
4424 intel_dp->compliance.test_link_rate = test_link_rate;
4429 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4433 __be16 h_width, v_height;
4436 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4437 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4440 DRM_DEBUG_KMS("Test pattern read failed\n");
4443 if (test_pattern != DP_COLOR_RAMP)
4446 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4449 DRM_DEBUG_KMS("H Width read failed\n");
4453 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4456 DRM_DEBUG_KMS("V Height read failed\n");
4460 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4463 DRM_DEBUG_KMS("TEST MISC read failed\n");
4466 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4468 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4470 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4471 case DP_TEST_BIT_DEPTH_6:
4472 intel_dp->compliance.test_data.bpc = 6;
4474 case DP_TEST_BIT_DEPTH_8:
4475 intel_dp->compliance.test_data.bpc = 8;
4481 intel_dp->compliance.test_data.video_pattern = test_pattern;
4482 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4483 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4484 /* Set test active flag here so userspace doesn't interrupt things */
4485 intel_dp->compliance.test_active = 1;
4490 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4492 u8 test_result = DP_TEST_ACK;
4493 struct intel_connector *intel_connector = intel_dp->attached_connector;
4494 struct drm_connector *connector = &intel_connector->base;
4496 if (intel_connector->detect_edid == NULL ||
4497 connector->edid_corrupt ||
4498 intel_dp->aux.i2c_defer_count > 6) {
4499 /* Check EDID read for NACKs, DEFERs and corruption
4500 * (DP CTS 1.2 Core r1.1)
4501 * 4.2.2.4 : Failed EDID read, I2C_NAK
4502 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4503 * 4.2.2.6 : EDID corruption detected
4504 * Use failsafe mode for all cases
4506 if (intel_dp->aux.i2c_nack_count > 0 ||
4507 intel_dp->aux.i2c_defer_count > 0)
4508 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4509 intel_dp->aux.i2c_nack_count,
4510 intel_dp->aux.i2c_defer_count);
4511 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4513 struct edid *block = intel_connector->detect_edid;
4515 /* We have to write the checksum
4516 * of the last block read
4518 block += intel_connector->detect_edid->extensions;
4520 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4521 block->checksum) <= 0)
4522 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4524 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4525 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4528 /* Set test active flag here so userspace doesn't interrupt things */
4529 intel_dp->compliance.test_active = 1;
4534 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4536 u8 test_result = DP_TEST_NAK;
4540 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4542 u8 response = DP_TEST_NAK;
4546 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4548 DRM_DEBUG_KMS("Could not read test request from sink\n");
4553 case DP_TEST_LINK_TRAINING:
4554 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4555 response = intel_dp_autotest_link_training(intel_dp);
4557 case DP_TEST_LINK_VIDEO_PATTERN:
4558 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4559 response = intel_dp_autotest_video_pattern(intel_dp);
4561 case DP_TEST_LINK_EDID_READ:
4562 DRM_DEBUG_KMS("EDID test requested\n");
4563 response = intel_dp_autotest_edid(intel_dp);
4565 case DP_TEST_LINK_PHY_TEST_PATTERN:
4566 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4567 response = intel_dp_autotest_phy_pattern(intel_dp);
4570 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4574 if (response & DP_TEST_ACK)
4575 intel_dp->compliance.test_type = request;
4578 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4580 DRM_DEBUG_KMS("Could not write test response to sink\n");
4584 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4588 if (intel_dp->is_mst) {
4589 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4594 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4595 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4599 /* check link status - esi[10] = 0x200c */
4600 if (intel_dp->active_mst_links > 0 &&
4601 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4602 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4603 intel_dp_start_link_train(intel_dp);
4604 intel_dp_stop_link_train(intel_dp);
4607 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4608 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4611 for (retry = 0; retry < 3; retry++) {
4613 wret = drm_dp_dpcd_write(&intel_dp->aux,
4614 DP_SINK_COUNT_ESI+1,
4621 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4623 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4631 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4632 intel_dp->is_mst = false;
4633 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4641 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4643 u8 link_status[DP_LINK_STATUS_SIZE];
4645 if (!intel_dp->link_trained)
4649 * While PSR source HW is enabled, it will control main-link sending
4650 * frames, enabling and disabling it so trying to do a retrain will fail
4651 * as the link would or not be on or it could mix training patterns
4652 * and frame data at the same time causing retrain to fail.
4653 * Also when exiting PSR, HW will retrain the link anyways fixing
4654 * any link status error.
4656 if (intel_psr_enabled(intel_dp))
4659 if (!intel_dp_get_link_status(intel_dp, link_status))
4663 * Validate the cached values of intel_dp->link_rate and
4664 * intel_dp->lane_count before attempting to retrain.
4666 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4667 intel_dp->lane_count))
4670 /* Retrain if Channel EQ or CR not ok */
4671 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4674 int intel_dp_retrain_link(struct intel_encoder *encoder,
4675 struct drm_modeset_acquire_ctx *ctx)
4677 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4678 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4679 struct intel_connector *connector = intel_dp->attached_connector;
4680 struct drm_connector_state *conn_state;
4681 struct intel_crtc_state *crtc_state;
4682 struct intel_crtc *crtc;
4685 /* FIXME handle the MST connectors as well */
4687 if (!connector || connector->base.status != connector_status_connected)
4690 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4695 conn_state = connector->base.state;
4697 crtc = to_intel_crtc(conn_state->crtc);
4701 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4705 crtc_state = to_intel_crtc_state(crtc->base.state);
4707 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4709 if (!crtc_state->base.active)
4712 if (conn_state->commit &&
4713 !try_wait_for_completion(&conn_state->commit->hw_done))
4716 if (!intel_dp_needs_link_retrain(intel_dp))
4719 /* Suppress underruns caused by re-training */
4720 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4721 if (crtc_state->has_pch_encoder)
4722 intel_set_pch_fifo_underrun_reporting(dev_priv,
4723 intel_crtc_pch_transcoder(crtc), false);
4725 intel_dp_start_link_train(intel_dp);
4726 intel_dp_stop_link_train(intel_dp);
4728 /* Keep underrun reporting disabled until things are stable */
4729 intel_wait_for_vblank(dev_priv, crtc->pipe);
4731 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4732 if (crtc_state->has_pch_encoder)
4733 intel_set_pch_fifo_underrun_reporting(dev_priv,
4734 intel_crtc_pch_transcoder(crtc), true);
4740 * If display is now connected check links status,
4741 * there has been known issues of link loss triggering
4744 * Some sinks (eg. ASUS PB287Q) seem to perform some
4745 * weird HPD ping pong during modesets. So we can apparently
4746 * end up with HPD going low during a modeset, and then
4747 * going back up soon after. And once that happens we must
4748 * retrain the link to get a picture. That's in case no
4749 * userspace component reacted to intermittent HPD dip.
4751 static bool intel_dp_hotplug(struct intel_encoder *encoder,
4752 struct intel_connector *connector)
4754 struct drm_modeset_acquire_ctx ctx;
4758 changed = intel_encoder_hotplug(encoder, connector);
4760 drm_modeset_acquire_init(&ctx, 0);
4763 ret = intel_dp_retrain_link(encoder, &ctx);
4765 if (ret == -EDEADLK) {
4766 drm_modeset_backoff(&ctx);
4773 drm_modeset_drop_locks(&ctx);
4774 drm_modeset_acquire_fini(&ctx);
4775 WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4780 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4784 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4787 if (drm_dp_dpcd_readb(&intel_dp->aux,
4788 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4791 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4793 if (val & DP_AUTOMATED_TEST_REQUEST)
4794 intel_dp_handle_test_request(intel_dp);
4796 if (val & DP_CP_IRQ)
4797 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4799 if (val & DP_SINK_SPECIFIC_IRQ)
4800 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4804 * According to DP spec
4807 * 2. Configure link according to Receiver Capabilities
4808 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4809 * 4. Check link status on receipt of hot-plug interrupt
4811 * intel_dp_short_pulse - handles short pulse interrupts
4812 * when full detection is not required.
4813 * Returns %true if short pulse is handled and full detection
4814 * is NOT required and %false otherwise.
4817 intel_dp_short_pulse(struct intel_dp *intel_dp)
4819 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4820 u8 old_sink_count = intel_dp->sink_count;
4824 * Clearing compliance test variables to allow capturing
4825 * of values for next automated test request.
4827 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4830 * Now read the DPCD to see if it's actually running
4831 * If the current value of sink count doesn't match with
4832 * the value that was stored earlier or dpcd read failed
4833 * we need to do full detection
4835 ret = intel_dp_get_dpcd(intel_dp);
4837 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4838 /* No need to proceed if we are going to do full detect */
4842 intel_dp_check_service_irq(intel_dp);
4844 /* Handle CEC interrupts, if any */
4845 drm_dp_cec_irq(&intel_dp->aux);
4847 /* defer to the hotplug work for link retraining if needed */
4848 if (intel_dp_needs_link_retrain(intel_dp))
4851 intel_psr_short_pulse(intel_dp);
4853 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4854 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4855 /* Send a Hotplug Uevent to userspace to start modeset */
4856 drm_kms_helper_hotplug_event(&dev_priv->drm);
4862 /* XXX this is probably wrong for multiple downstream ports */
4863 static enum drm_connector_status
4864 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4866 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4867 u8 *dpcd = intel_dp->dpcd;
4871 lspcon_resume(lspcon);
4873 if (!intel_dp_get_dpcd(intel_dp))
4874 return connector_status_disconnected;
4876 if (intel_dp_is_edp(intel_dp))
4877 return connector_status_connected;
4879 /* if there's no downstream port, we're done */
4880 if (!drm_dp_is_branch(dpcd))
4881 return connector_status_connected;
4883 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4884 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4885 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4887 return intel_dp->sink_count ?
4888 connector_status_connected : connector_status_disconnected;
4891 if (intel_dp_can_mst(intel_dp))
4892 return connector_status_connected;
4894 /* If no HPD, poke DDC gently */
4895 if (drm_probe_ddc(&intel_dp->aux.ddc))
4896 return connector_status_connected;
4898 /* Well we tried, say unknown for unreliable port types */
4899 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4900 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4901 if (type == DP_DS_PORT_TYPE_VGA ||
4902 type == DP_DS_PORT_TYPE_NON_EDID)
4903 return connector_status_unknown;
4905 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4906 DP_DWN_STRM_PORT_TYPE_MASK;
4907 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4908 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4909 return connector_status_unknown;
4912 /* Anything else is out of spec, warn and ignore */
4913 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4914 return connector_status_disconnected;
4917 static enum drm_connector_status
4918 edp_detect(struct intel_dp *intel_dp)
4920 return connector_status_connected;
4923 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
4925 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4928 switch (encoder->hpd_pin) {
4930 bit = SDE_PORTB_HOTPLUG;
4933 bit = SDE_PORTC_HOTPLUG;
4936 bit = SDE_PORTD_HOTPLUG;
4939 MISSING_CASE(encoder->hpd_pin);
4943 return I915_READ(SDEISR) & bit;
4946 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
4948 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4951 switch (encoder->hpd_pin) {
4953 bit = SDE_PORTB_HOTPLUG_CPT;
4956 bit = SDE_PORTC_HOTPLUG_CPT;
4959 bit = SDE_PORTD_HOTPLUG_CPT;
4962 MISSING_CASE(encoder->hpd_pin);
4966 return I915_READ(SDEISR) & bit;
4969 static bool spt_digital_port_connected(struct intel_encoder *encoder)
4971 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4974 switch (encoder->hpd_pin) {
4976 bit = SDE_PORTA_HOTPLUG_SPT;
4979 bit = SDE_PORTE_HOTPLUG_SPT;
4982 return cpt_digital_port_connected(encoder);
4985 return I915_READ(SDEISR) & bit;
4988 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
4990 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4993 switch (encoder->hpd_pin) {
4995 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4998 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5001 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5004 MISSING_CASE(encoder->hpd_pin);
5008 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5011 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5013 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5016 switch (encoder->hpd_pin) {
5018 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5021 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5024 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5027 MISSING_CASE(encoder->hpd_pin);
5031 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5034 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5036 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5038 if (encoder->hpd_pin == HPD_PORT_A)
5039 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5041 return ibx_digital_port_connected(encoder);
5044 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5046 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5048 if (encoder->hpd_pin == HPD_PORT_A)
5049 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5051 return cpt_digital_port_connected(encoder);
5054 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5056 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5058 if (encoder->hpd_pin == HPD_PORT_A)
5059 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5061 return cpt_digital_port_connected(encoder);
5064 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5066 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5068 if (encoder->hpd_pin == HPD_PORT_A)
5069 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5071 return cpt_digital_port_connected(encoder);
5074 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5076 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5079 switch (encoder->hpd_pin) {
5081 bit = BXT_DE_PORT_HP_DDIA;
5084 bit = BXT_DE_PORT_HP_DDIB;
5087 bit = BXT_DE_PORT_HP_DDIC;
5090 MISSING_CASE(encoder->hpd_pin);
5094 return I915_READ(GEN8_DE_PORT_ISR) & bit;
5097 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5098 struct intel_digital_port *intel_dig_port)
5100 enum port port = intel_dig_port->base.port;
5102 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5105 static const char *tc_type_name(enum tc_port_type type)
5107 static const char * const names[] = {
5108 [TC_PORT_UNKNOWN] = "unknown",
5109 [TC_PORT_LEGACY] = "legacy",
5110 [TC_PORT_TYPEC] = "typec",
5111 [TC_PORT_TBT] = "tbt",
5114 if (WARN_ON(type >= ARRAY_SIZE(names)))
5115 type = TC_PORT_UNKNOWN;
5120 static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
5121 struct intel_digital_port *intel_dig_port,
5122 bool is_legacy, bool is_typec, bool is_tbt)
5124 enum port port = intel_dig_port->base.port;
5125 enum tc_port_type old_type = intel_dig_port->tc_type;
5127 WARN_ON(is_legacy + is_typec + is_tbt != 1);
5130 intel_dig_port->tc_type = TC_PORT_LEGACY;
5132 intel_dig_port->tc_type = TC_PORT_TYPEC;
5134 intel_dig_port->tc_type = TC_PORT_TBT;
5138 /* Types are not supposed to be changed at runtime. */
5139 WARN_ON(old_type != TC_PORT_UNKNOWN &&
5140 old_type != intel_dig_port->tc_type);
5142 if (old_type != intel_dig_port->tc_type)
5143 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
5144 tc_type_name(intel_dig_port->tc_type));
5148 * This function implements the first part of the Connect Flow described by our
5149 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
5150 * lanes, EDID, etc) is done as needed in the typical places.
5152 * Unlike the other ports, type-C ports are not available to use as soon as we
5153 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
5154 * display, USB, etc. As a result, handshaking through FIA is required around
5155 * connect and disconnect to cleanly transfer ownership with the controller and
5156 * set the type-C power state.
5158 * We could opt to only do the connect flow when we actually try to use the AUX
5159 * channels or do a modeset, then immediately run the disconnect flow after
5160 * usage, but there are some implications on this for a dynamic environment:
5161 * things may go away or change behind our backs. So for now our driver is
5162 * always trying to acquire ownership of the controller as soon as it gets an
5163 * interrupt (or polls state and sees a port is connected) and only gives it
5164 * back when it sees a disconnect. Implementation of a more fine-grained model
5165 * will require a lot of coordination with user space and thorough testing for
5166 * the extra possible cases.
5168 static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
5169 struct intel_digital_port *dig_port)
5171 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5174 if (dig_port->tc_type != TC_PORT_LEGACY &&
5175 dig_port->tc_type != TC_PORT_TYPEC)
5178 val = I915_READ(PORT_TX_DFLEXDPPMS);
5179 if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
5180 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
5181 WARN_ON(dig_port->tc_legacy_port);
5186 * This function may be called many times in a row without an HPD event
5187 * in between, so try to avoid the write when we can.
5189 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5190 if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
5191 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5192 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5196 * Now we have to re-check the live state, in case the port recently
5197 * became disconnected. Not necessary for legacy mode.
5199 if (dig_port->tc_type == TC_PORT_TYPEC &&
5200 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
5201 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
5202 icl_tc_phy_disconnect(dev_priv, dig_port);
5210 * See the comment at the connect function. This implements the Disconnect
5213 void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5214 struct intel_digital_port *dig_port)
5216 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5218 if (dig_port->tc_type == TC_PORT_UNKNOWN)
5222 * TBT disconnection flow is read the live status, what was done in
5225 if (dig_port->tc_type == TC_PORT_TYPEC ||
5226 dig_port->tc_type == TC_PORT_LEGACY) {
5229 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5230 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5231 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5234 DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
5235 port_name(dig_port->base.port),
5236 tc_type_name(dig_port->tc_type));
5238 dig_port->tc_type = TC_PORT_UNKNOWN;
5242 * The type-C ports are different because even when they are connected, they may
5243 * not be available/usable by the graphics driver: see the comment on
5244 * icl_tc_phy_connect(). So in our driver instead of adding the additional
5245 * concept of "usable" and make everything check for "connected and usable" we
5246 * define a port as "connected" when it is not only connected, but also when it
5247 * is usable by the rest of the driver. That maintains the old assumption that
5248 * connected ports are usable, and avoids exposing to the users objects they
5251 static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
5252 struct intel_digital_port *intel_dig_port)
5254 enum port port = intel_dig_port->base.port;
5255 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
5256 bool is_legacy, is_typec, is_tbt;
5260 * WARN if we got a legacy port HPD, but VBT didn't mark the port as
5261 * legacy. Treat the port as legacy from now on.
5263 if (WARN_ON(!intel_dig_port->tc_legacy_port &&
5264 I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
5265 intel_dig_port->tc_legacy_port = true;
5266 is_legacy = intel_dig_port->tc_legacy_port;
5269 * The spec says we shouldn't be using the ISR bits for detecting
5270 * between TC and TBT. We should use DFLEXDPSP.
5272 dpsp = I915_READ(PORT_TX_DFLEXDPSP);
5273 is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
5274 is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
5276 if (!is_legacy && !is_typec && !is_tbt) {
5277 icl_tc_phy_disconnect(dev_priv, intel_dig_port);
5282 icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
5285 if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
5291 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5293 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5294 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5296 if (intel_port_is_combophy(dev_priv, encoder->port))
5297 return icl_combo_port_connected(dev_priv, dig_port);
5298 else if (intel_port_is_tc(dev_priv, encoder->port))
5299 return icl_tc_port_connected(dev_priv, dig_port);
5301 MISSING_CASE(encoder->hpd_pin);
5307 * intel_digital_port_connected - is the specified port connected?
5308 * @encoder: intel_encoder
5310 * In cases where there's a connector physically connected but it can't be used
5311 * by our hardware we also return false, since the rest of the driver should
5312 * pretty much treat the port as disconnected. This is relevant for type-C
5313 * (starting on ICL) where there's ownership involved.
5315 * Return %true if port is connected, %false otherwise.
5317 bool intel_digital_port_connected(struct intel_encoder *encoder)
5319 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5321 if (HAS_GMCH(dev_priv)) {
5322 if (IS_GM45(dev_priv))
5323 return gm45_digital_port_connected(encoder);
5325 return g4x_digital_port_connected(encoder);
5328 if (INTEL_GEN(dev_priv) >= 11)
5329 return icl_digital_port_connected(encoder);
5330 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5331 return spt_digital_port_connected(encoder);
5332 else if (IS_GEN9_LP(dev_priv))
5333 return bxt_digital_port_connected(encoder);
5334 else if (IS_GEN(dev_priv, 8))
5335 return bdw_digital_port_connected(encoder);
5336 else if (IS_GEN(dev_priv, 7))
5337 return ivb_digital_port_connected(encoder);
5338 else if (IS_GEN(dev_priv, 6))
5339 return snb_digital_port_connected(encoder);
5340 else if (IS_GEN(dev_priv, 5))
5341 return ilk_digital_port_connected(encoder);
5343 MISSING_CASE(INTEL_GEN(dev_priv));
5347 static struct edid *
5348 intel_dp_get_edid(struct intel_dp *intel_dp)
5350 struct intel_connector *intel_connector = intel_dp->attached_connector;
5352 /* use cached edid if we have one */
5353 if (intel_connector->edid) {
5355 if (IS_ERR(intel_connector->edid))
5358 return drm_edid_duplicate(intel_connector->edid);
5360 return drm_get_edid(&intel_connector->base,
5361 &intel_dp->aux.ddc);
5365 intel_dp_set_edid(struct intel_dp *intel_dp)
5367 struct intel_connector *intel_connector = intel_dp->attached_connector;
5370 intel_dp_unset_edid(intel_dp);
5371 edid = intel_dp_get_edid(intel_dp);
5372 intel_connector->detect_edid = edid;
5374 intel_dp->has_audio = drm_detect_monitor_audio(edid);
5375 drm_dp_cec_set_edid(&intel_dp->aux, edid);
5379 intel_dp_unset_edid(struct intel_dp *intel_dp)
5381 struct intel_connector *intel_connector = intel_dp->attached_connector;
5383 drm_dp_cec_unset_edid(&intel_dp->aux);
5384 kfree(intel_connector->detect_edid);
5385 intel_connector->detect_edid = NULL;
5387 intel_dp->has_audio = false;
5391 intel_dp_detect(struct drm_connector *connector,
5392 struct drm_modeset_acquire_ctx *ctx,
5395 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5396 struct intel_dp *intel_dp = intel_attached_dp(connector);
5397 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5398 struct intel_encoder *encoder = &dig_port->base;
5399 enum drm_connector_status status;
5400 enum intel_display_power_domain aux_domain =
5401 intel_aux_power_domain(dig_port);
5402 intel_wakeref_t wakeref;
5404 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5405 connector->base.id, connector->name);
5406 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5408 wakeref = intel_display_power_get(dev_priv, aux_domain);
5410 /* Can't disconnect eDP */
5411 if (intel_dp_is_edp(intel_dp))
5412 status = edp_detect(intel_dp);
5413 else if (intel_digital_port_connected(encoder))
5414 status = intel_dp_detect_dpcd(intel_dp);
5416 status = connector_status_disconnected;
5418 if (status == connector_status_disconnected) {
5419 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5420 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5422 if (intel_dp->is_mst) {
5423 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5425 intel_dp->mst_mgr.mst_state);
5426 intel_dp->is_mst = false;
5427 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5434 if (intel_dp->reset_link_params) {
5435 /* Initial max link lane count */
5436 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5438 /* Initial max link rate */
5439 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5441 intel_dp->reset_link_params = false;
5444 intel_dp_print_rates(intel_dp);
5446 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5447 if (INTEL_GEN(dev_priv) >= 11)
5448 intel_dp_get_dsc_sink_cap(intel_dp);
5450 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5451 drm_dp_is_branch(intel_dp->dpcd));
5453 intel_dp_configure_mst(intel_dp);
5455 if (intel_dp->is_mst) {
5457 * If we are in MST mode then this connector
5458 * won't appear connected or have anything
5461 status = connector_status_disconnected;
5466 * Some external monitors do not signal loss of link synchronization
5467 * with an IRQ_HPD, so force a link status check.
5469 if (!intel_dp_is_edp(intel_dp)) {
5472 ret = intel_dp_retrain_link(encoder, ctx);
5474 intel_display_power_put(dev_priv, aux_domain, wakeref);
5480 * Clearing NACK and defer counts to get their exact values
5481 * while reading EDID which are required by Compliance tests
5482 * 4.2.2.4 and 4.2.2.5
5484 intel_dp->aux.i2c_nack_count = 0;
5485 intel_dp->aux.i2c_defer_count = 0;
5487 intel_dp_set_edid(intel_dp);
5488 if (intel_dp_is_edp(intel_dp) ||
5489 to_intel_connector(connector)->detect_edid)
5490 status = connector_status_connected;
5492 intel_dp_check_service_irq(intel_dp);
5495 if (status != connector_status_connected && !intel_dp->is_mst)
5496 intel_dp_unset_edid(intel_dp);
5498 intel_display_power_put(dev_priv, aux_domain, wakeref);
5503 intel_dp_force(struct drm_connector *connector)
5505 struct intel_dp *intel_dp = intel_attached_dp(connector);
5506 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5507 struct intel_encoder *intel_encoder = &dig_port->base;
5508 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5509 enum intel_display_power_domain aux_domain =
5510 intel_aux_power_domain(dig_port);
5511 intel_wakeref_t wakeref;
5513 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5514 connector->base.id, connector->name);
5515 intel_dp_unset_edid(intel_dp);
5517 if (connector->status != connector_status_connected)
5520 wakeref = intel_display_power_get(dev_priv, aux_domain);
5522 intel_dp_set_edid(intel_dp);
5524 intel_display_power_put(dev_priv, aux_domain, wakeref);
5527 static int intel_dp_get_modes(struct drm_connector *connector)
5529 struct intel_connector *intel_connector = to_intel_connector(connector);
5532 edid = intel_connector->detect_edid;
5534 int ret = intel_connector_update_modes(connector, edid);
5539 /* if eDP has no EDID, fall back to fixed mode */
5540 if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5541 intel_connector->panel.fixed_mode) {
5542 struct drm_display_mode *mode;
5544 mode = drm_mode_duplicate(connector->dev,
5545 intel_connector->panel.fixed_mode);
5547 drm_mode_probed_add(connector, mode);
5556 intel_dp_connector_register(struct drm_connector *connector)
5558 struct intel_dp *intel_dp = intel_attached_dp(connector);
5559 struct drm_device *dev = connector->dev;
5562 ret = intel_connector_register(connector);
5566 i915_debugfs_connector_add(connector);
5568 DRM_DEBUG_KMS("registering %s bus for %s\n",
5569 intel_dp->aux.name, connector->kdev->kobj.name);
5571 intel_dp->aux.dev = connector->kdev;
5572 ret = drm_dp_aux_register(&intel_dp->aux);
5574 drm_dp_cec_register_connector(&intel_dp->aux,
5575 connector->name, dev->dev);
5580 intel_dp_connector_unregister(struct drm_connector *connector)
5582 struct intel_dp *intel_dp = intel_attached_dp(connector);
5584 drm_dp_cec_unregister_connector(&intel_dp->aux);
5585 drm_dp_aux_unregister(&intel_dp->aux);
5586 intel_connector_unregister(connector);
5589 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5591 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5592 struct intel_dp *intel_dp = &intel_dig_port->dp;
5594 intel_dp_mst_encoder_cleanup(intel_dig_port);
5595 if (intel_dp_is_edp(intel_dp)) {
5596 intel_wakeref_t wakeref;
5598 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5600 * vdd might still be enabled do to the delayed vdd off.
5601 * Make sure vdd is actually turned off here.
5603 with_pps_lock(intel_dp, wakeref)
5604 edp_panel_vdd_off_sync(intel_dp);
5606 if (intel_dp->edp_notifier.notifier_call) {
5607 unregister_reboot_notifier(&intel_dp->edp_notifier);
5608 intel_dp->edp_notifier.notifier_call = NULL;
5612 intel_dp_aux_fini(intel_dp);
5615 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5617 intel_dp_encoder_flush_work(encoder);
5619 drm_encoder_cleanup(encoder);
5620 kfree(enc_to_dig_port(encoder));
5623 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5625 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5626 intel_wakeref_t wakeref;
5628 if (!intel_dp_is_edp(intel_dp))
5632 * vdd might still be enabled do to the delayed vdd off.
5633 * Make sure vdd is actually turned off here.
5635 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5636 with_pps_lock(intel_dp, wakeref)
5637 edp_panel_vdd_off_sync(intel_dp);
5640 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5644 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5645 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5646 msecs_to_jiffies(timeout));
5649 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5653 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5656 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5657 static const struct drm_dp_aux_msg msg = {
5658 .request = DP_AUX_NATIVE_WRITE,
5659 .address = DP_AUX_HDCP_AKSV,
5660 .size = DRM_HDCP_KSV_LEN,
5662 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5666 /* Output An first, that's easy */
5667 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5668 an, DRM_HDCP_AN_LEN);
5669 if (dpcd_ret != DRM_HDCP_AN_LEN) {
5670 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5672 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5676 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5677 * order to get it on the wire, we need to create the AUX header as if
5678 * we were writing the data, and then tickle the hardware to output the
5679 * data once the header is sent out.
5681 intel_dp_aux_header(txbuf, &msg);
5683 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5684 rxbuf, sizeof(rxbuf),
5685 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5687 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5689 } else if (ret == 0) {
5690 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5694 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5695 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5696 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5703 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5707 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5709 if (ret != DRM_HDCP_KSV_LEN) {
5710 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5711 return ret >= 0 ? -EIO : ret;
5716 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5721 * For some reason the HDMI and DP HDCP specs call this register
5722 * definition by different names. In the HDMI spec, it's called BSTATUS,
5723 * but in DP it's called BINFO.
5725 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5726 bstatus, DRM_HDCP_BSTATUS_LEN);
5727 if (ret != DRM_HDCP_BSTATUS_LEN) {
5728 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5729 return ret >= 0 ? -EIO : ret;
5735 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5740 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5743 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5744 return ret >= 0 ? -EIO : ret;
5751 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5752 bool *repeater_present)
5757 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5761 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5766 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5770 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5771 ri_prime, DRM_HDCP_RI_LEN);
5772 if (ret != DRM_HDCP_RI_LEN) {
5773 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5774 return ret >= 0 ? -EIO : ret;
5780 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5785 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5788 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5789 return ret >= 0 ? -EIO : ret;
5791 *ksv_ready = bstatus & DP_BSTATUS_READY;
5796 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5797 int num_downstream, u8 *ksv_fifo)
5802 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5803 for (i = 0; i < num_downstream; i += 3) {
5804 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5805 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5806 DP_AUX_HDCP_KSV_FIFO,
5807 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5810 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5812 return ret >= 0 ? -EIO : ret;
5819 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5824 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5827 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5828 DP_AUX_HDCP_V_PRIME(i), part,
5829 DRM_HDCP_V_PRIME_PART_LEN);
5830 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5831 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5832 return ret >= 0 ? -EIO : ret;
5838 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5841 /* Not used for single stream DisplayPort setups */
5846 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5851 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5854 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5858 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5862 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5868 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5872 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5876 struct hdcp2_dp_errata_stream_type {
5881 static struct hdcp2_dp_msg_data {
5884 bool msg_detectable;
5886 u32 timeout2; /* Added for non_paired situation */
5887 } hdcp2_msg_data[] = {
5888 {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
5889 {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
5890 false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
5891 {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
5893 {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
5895 {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
5896 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
5897 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
5898 {HDCP_2_2_AKE_SEND_PAIRING_INFO,
5899 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
5900 HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
5901 {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
5902 {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
5903 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
5904 {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
5906 {HDCP_2_2_REP_SEND_RECVID_LIST,
5907 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
5908 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
5909 {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
5911 {HDCP_2_2_REP_STREAM_MANAGE,
5912 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
5914 {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
5915 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
5916 /* local define to shovel this through the write_2_2 interface */
5917 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
5918 {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
5919 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
5924 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
5929 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5930 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
5931 HDCP_2_2_DP_RXSTATUS_LEN);
5932 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
5933 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5934 return ret >= 0 ? -EIO : ret;
5941 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
5942 u8 msg_id, bool *msg_ready)
5948 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
5953 case HDCP_2_2_AKE_SEND_HPRIME:
5954 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
5957 case HDCP_2_2_AKE_SEND_PAIRING_INFO:
5958 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
5961 case HDCP_2_2_REP_SEND_RECVID_LIST:
5962 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
5966 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
5974 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
5975 struct hdcp2_dp_msg_data *hdcp2_msg_data)
5977 struct intel_dp *dp = &intel_dig_port->dp;
5978 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5979 u8 msg_id = hdcp2_msg_data->msg_id;
5981 bool msg_ready = false;
5983 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
5984 timeout = hdcp2_msg_data->timeout2;
5986 timeout = hdcp2_msg_data->timeout;
5989 * There is no way to detect the CERT, LPRIME and STREAM_READY
5990 * availability. So Wait for timeout and read the msg.
5992 if (!hdcp2_msg_data->msg_detectable) {
5997 * As we want to check the msg availability at timeout, Ignoring
5998 * the timeout at wait for CP_IRQ.
6000 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
6001 ret = hdcp2_detect_msg_availability(intel_dig_port,
6002 msg_id, &msg_ready);
6008 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
6009 hdcp2_msg_data->msg_id, ret, timeout);
6014 static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
6018 for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
6019 if (hdcp2_msg_data[i].msg_id == msg_id)
6020 return &hdcp2_msg_data[i];
6026 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6027 void *buf, size_t size)
6029 struct intel_dp *dp = &intel_dig_port->dp;
6030 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6031 unsigned int offset;
6033 ssize_t ret, bytes_to_write, len;
6034 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6036 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6037 if (!hdcp2_msg_data)
6040 offset = hdcp2_msg_data->offset;
6042 /* No msg_id in DP HDCP2.2 msgs */
6043 bytes_to_write = size - 1;
6046 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6048 while (bytes_to_write) {
6049 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6050 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6052 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6053 offset, (void *)byte, len);
6057 bytes_to_write -= ret;
6066 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6068 u8 rx_info[HDCP_2_2_RXINFO_LEN];
6072 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6073 DP_HDCP_2_2_REG_RXINFO_OFFSET,
6074 (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6075 if (ret != HDCP_2_2_RXINFO_LEN)
6076 return ret >= 0 ? -EIO : ret;
6078 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6079 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6081 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6082 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6084 ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6085 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6086 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6092 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6093 u8 msg_id, void *buf, size_t size)
6095 unsigned int offset;
6097 ssize_t ret, bytes_to_recv, len;
6098 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6100 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6101 if (!hdcp2_msg_data)
6103 offset = hdcp2_msg_data->offset;
6105 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6109 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6110 ret = get_receiver_id_list_size(intel_dig_port);
6116 bytes_to_recv = size - 1;
6118 /* DP adaptation msgs has no msg_id */
6121 while (bytes_to_recv) {
6122 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6123 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6125 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6128 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6132 bytes_to_recv -= ret;
6143 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6144 bool is_repeater, u8 content_type)
6146 struct hdcp2_dp_errata_stream_type stream_type_msg;
6152 * Errata for DP: As Stream type is used for encryption, Receiver
6153 * should be communicated with stream type for the decryption of the
6155 * Repeater will be communicated with stream type as a part of it's
6156 * auth later in time.
6158 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6159 stream_type_msg.stream_type = content_type;
6161 return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6162 sizeof(stream_type_msg));
6166 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6171 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6175 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6176 ret = HDCP_REAUTH_REQUEST;
6177 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6178 ret = HDCP_LINK_INTEGRITY_FAILURE;
6179 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6180 ret = HDCP_TOPOLOGY_CHANGE;
6186 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6193 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6194 DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6195 rx_caps, HDCP_2_2_RXCAPS_LEN);
6196 if (ret != HDCP_2_2_RXCAPS_LEN)
6197 return ret >= 0 ? -EIO : ret;
6199 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6200 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6206 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6207 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6208 .read_bksv = intel_dp_hdcp_read_bksv,
6209 .read_bstatus = intel_dp_hdcp_read_bstatus,
6210 .repeater_present = intel_dp_hdcp_repeater_present,
6211 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6212 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6213 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6214 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6215 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6216 .check_link = intel_dp_hdcp_check_link,
6217 .hdcp_capable = intel_dp_hdcp_capable,
6218 .write_2_2_msg = intel_dp_hdcp2_write_msg,
6219 .read_2_2_msg = intel_dp_hdcp2_read_msg,
6220 .config_stream_type = intel_dp_hdcp2_config_stream_type,
6221 .check_2_2_link = intel_dp_hdcp2_check_link,
6222 .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6223 .protocol = HDCP_PROTOCOL_DP,
6226 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6228 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6229 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6231 lockdep_assert_held(&dev_priv->pps_mutex);
6233 if (!edp_have_panel_vdd(intel_dp))
6237 * The VDD bit needs a power domain reference, so if the bit is
6238 * already enabled when we boot or resume, grab this reference and
6239 * schedule a vdd off, so we don't hold on to the reference
6242 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6243 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6245 edp_panel_vdd_schedule_off(intel_dp);
6248 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6250 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6251 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6254 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6255 encoder->port, &pipe))
6258 return INVALID_PIPE;
6261 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6263 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6264 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6265 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6266 intel_wakeref_t wakeref;
6268 if (!HAS_DDI(dev_priv))
6269 intel_dp->DP = I915_READ(intel_dp->output_reg);
6272 lspcon_resume(lspcon);
6274 intel_dp->reset_link_params = true;
6276 with_pps_lock(intel_dp, wakeref) {
6277 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6278 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6280 if (intel_dp_is_edp(intel_dp)) {
6282 * Reinit the power sequencer, in case BIOS did
6283 * something nasty with it.
6285 intel_dp_pps_init(intel_dp);
6286 intel_edp_panel_vdd_sanitize(intel_dp);
6291 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6292 .force = intel_dp_force,
6293 .fill_modes = drm_helper_probe_single_connector_modes,
6294 .atomic_get_property = intel_digital_connector_atomic_get_property,
6295 .atomic_set_property = intel_digital_connector_atomic_set_property,
6296 .late_register = intel_dp_connector_register,
6297 .early_unregister = intel_dp_connector_unregister,
6298 .destroy = intel_connector_destroy,
6299 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6300 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6303 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6304 .detect_ctx = intel_dp_detect,
6305 .get_modes = intel_dp_get_modes,
6306 .mode_valid = intel_dp_mode_valid,
6307 .atomic_check = intel_digital_connector_atomic_check,
6310 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6311 .reset = intel_dp_encoder_reset,
6312 .destroy = intel_dp_encoder_destroy,
6316 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6318 struct intel_dp *intel_dp = &intel_dig_port->dp;
6319 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6320 enum irqreturn ret = IRQ_NONE;
6321 intel_wakeref_t wakeref;
6323 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6325 * vdd off can generate a long pulse on eDP which
6326 * would require vdd on to handle it, and thus we
6327 * would end up in an endless cycle of
6328 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6330 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
6331 port_name(intel_dig_port->base.port));
6335 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
6336 port_name(intel_dig_port->base.port),
6337 long_hpd ? "long" : "short");
6340 intel_dp->reset_link_params = true;
6344 wakeref = intel_display_power_get(dev_priv,
6345 intel_aux_power_domain(intel_dig_port));
6347 if (intel_dp->is_mst) {
6348 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6350 * If we were in MST mode, and device is not
6351 * there, get out of MST mode
6353 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6354 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6355 intel_dp->is_mst = false;
6356 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6362 if (!intel_dp->is_mst) {
6365 handled = intel_dp_short_pulse(intel_dp);
6374 intel_display_power_put(dev_priv,
6375 intel_aux_power_domain(intel_dig_port),
6381 /* check the VBT to see whether the eDP is on another port */
6382 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6385 * eDP not supported on g4x. so bail out early just
6386 * for a bit extra safety in case the VBT is bonkers.
6388 if (INTEL_GEN(dev_priv) < 5)
6391 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6394 return intel_bios_is_port_edp(dev_priv, port);
6398 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6400 struct drm_i915_private *dev_priv = to_i915(connector->dev);
6401 enum port port = dp_to_dig_port(intel_dp)->base.port;
6403 if (!IS_G4X(dev_priv) && port != PORT_A)
6404 intel_attach_force_audio_property(connector);
6406 intel_attach_broadcast_rgb_property(connector);
6407 if (HAS_GMCH(dev_priv))
6408 drm_connector_attach_max_bpc_property(connector, 6, 10);
6409 else if (INTEL_GEN(dev_priv) >= 5)
6410 drm_connector_attach_max_bpc_property(connector, 6, 12);
6412 if (intel_dp_is_edp(intel_dp)) {
6413 u32 allowed_scalers;
6415 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6416 if (!HAS_GMCH(dev_priv))
6417 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6419 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6421 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6426 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6428 intel_dp->panel_power_off_time = ktime_get_boottime();
6429 intel_dp->last_power_on = jiffies;
6430 intel_dp->last_backlight_off = jiffies;
6434 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6436 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6437 u32 pp_on, pp_off, pp_ctl;
6438 struct pps_registers regs;
6440 intel_pps_get_registers(intel_dp, ®s);
6442 pp_ctl = ironlake_get_pp_control(intel_dp);
6444 /* Ensure PPS is unlocked */
6445 if (!HAS_DDI(dev_priv))
6446 I915_WRITE(regs.pp_ctrl, pp_ctl);
6448 pp_on = I915_READ(regs.pp_on);
6449 pp_off = I915_READ(regs.pp_off);
6451 /* Pull timing values out of registers */
6452 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6453 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6454 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6455 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6457 if (i915_mmio_reg_valid(regs.pp_div)) {
6460 pp_div = I915_READ(regs.pp_div);
6462 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6464 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6469 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6471 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6473 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6477 intel_pps_verify_state(struct intel_dp *intel_dp)
6479 struct edp_power_seq hw;
6480 struct edp_power_seq *sw = &intel_dp->pps_delays;
6482 intel_pps_readout_hw_state(intel_dp, &hw);
6484 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6485 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6486 DRM_ERROR("PPS state mismatch\n");
6487 intel_pps_dump_state("sw", sw);
6488 intel_pps_dump_state("hw", &hw);
6493 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6495 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6496 struct edp_power_seq cur, vbt, spec,
6497 *final = &intel_dp->pps_delays;
6499 lockdep_assert_held(&dev_priv->pps_mutex);
6501 /* already initialized? */
6502 if (final->t11_t12 != 0)
6505 intel_pps_readout_hw_state(intel_dp, &cur);
6507 intel_pps_dump_state("cur", &cur);
6509 vbt = dev_priv->vbt.edp.pps;
6510 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6511 * of 500ms appears to be too short. Ocassionally the panel
6512 * just fails to power back on. Increasing the delay to 800ms
6513 * seems sufficient to avoid this problem.
6515 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6516 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6517 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6520 /* T11_T12 delay is special and actually in units of 100ms, but zero
6521 * based in the hw (so we need to add 100 ms). But the sw vbt
6522 * table multiplies it with 1000 to make it in units of 100usec,
6524 vbt.t11_t12 += 100 * 10;
6526 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6527 * our hw here, which are all in 100usec. */
6528 spec.t1_t3 = 210 * 10;
6529 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6530 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6531 spec.t10 = 500 * 10;
6532 /* This one is special and actually in units of 100ms, but zero
6533 * based in the hw (so we need to add 100 ms). But the sw vbt
6534 * table multiplies it with 1000 to make it in units of 100usec,
6536 spec.t11_t12 = (510 + 100) * 10;
6538 intel_pps_dump_state("vbt", &vbt);
6540 /* Use the max of the register settings and vbt. If both are
6541 * unset, fall back to the spec limits. */
6542 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
6544 max(cur.field, vbt.field))
6545 assign_final(t1_t3);
6549 assign_final(t11_t12);
6552 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
6553 intel_dp->panel_power_up_delay = get_delay(t1_t3);
6554 intel_dp->backlight_on_delay = get_delay(t8);
6555 intel_dp->backlight_off_delay = get_delay(t9);
6556 intel_dp->panel_power_down_delay = get_delay(t10);
6557 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6560 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6561 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6562 intel_dp->panel_power_cycle_delay);
6564 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6565 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6568 * We override the HW backlight delays to 1 because we do manual waits
6569 * on them. For T8, even BSpec recommends doing it. For T9, if we
6570 * don't do this, we'll end up waiting for the backlight off delay
6571 * twice: once when we do the manual sleep, and once when we disable
6572 * the panel and wait for the PP_STATUS bit to become zero.
6578 * HW has only a 100msec granularity for t11_t12 so round it up
6581 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6585 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6586 bool force_disable_vdd)
6588 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6589 u32 pp_on, pp_off, port_sel = 0;
6590 int div = dev_priv->rawclk_freq / 1000;
6591 struct pps_registers regs;
6592 enum port port = dp_to_dig_port(intel_dp)->base.port;
6593 const struct edp_power_seq *seq = &intel_dp->pps_delays;
6595 lockdep_assert_held(&dev_priv->pps_mutex);
6597 intel_pps_get_registers(intel_dp, ®s);
6600 * On some VLV machines the BIOS can leave the VDD
6601 * enabled even on power sequencers which aren't
6602 * hooked up to any port. This would mess up the
6603 * power domain tracking the first time we pick
6604 * one of these power sequencers for use since
6605 * edp_panel_vdd_on() would notice that the VDD was
6606 * already on and therefore wouldn't grab the power
6607 * domain reference. Disable VDD first to avoid this.
6608 * This also avoids spuriously turning the VDD on as
6609 * soon as the new power sequencer gets initialized.
6611 if (force_disable_vdd) {
6612 u32 pp = ironlake_get_pp_control(intel_dp);
6614 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6616 if (pp & EDP_FORCE_VDD)
6617 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6619 pp &= ~EDP_FORCE_VDD;
6621 I915_WRITE(regs.pp_ctrl, pp);
6624 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6625 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6626 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6627 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6629 /* Haswell doesn't have any port selection bits for the panel
6630 * power sequencer any more. */
6631 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6632 port_sel = PANEL_PORT_SELECT_VLV(port);
6633 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6636 port_sel = PANEL_PORT_SELECT_DPA;
6639 port_sel = PANEL_PORT_SELECT_DPC;
6642 port_sel = PANEL_PORT_SELECT_DPD;
6652 I915_WRITE(regs.pp_on, pp_on);
6653 I915_WRITE(regs.pp_off, pp_off);
6656 * Compute the divisor for the pp clock, simply match the Bspec formula.
6658 if (i915_mmio_reg_valid(regs.pp_div)) {
6659 I915_WRITE(regs.pp_div,
6660 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6661 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6665 pp_ctl = I915_READ(regs.pp_ctrl);
6666 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6667 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6668 I915_WRITE(regs.pp_ctrl, pp_ctl);
6671 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6672 I915_READ(regs.pp_on),
6673 I915_READ(regs.pp_off),
6674 i915_mmio_reg_valid(regs.pp_div) ?
6675 I915_READ(regs.pp_div) :
6676 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6679 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6681 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6683 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6684 vlv_initial_power_sequencer_setup(intel_dp);
6686 intel_dp_init_panel_power_sequencer(intel_dp);
6687 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6692 * intel_dp_set_drrs_state - program registers for RR switch to take effect
6693 * @dev_priv: i915 device
6694 * @crtc_state: a pointer to the active intel_crtc_state
6695 * @refresh_rate: RR to be programmed
6697 * This function gets called when refresh rate (RR) has to be changed from
6698 * one frequency to another. Switches can be between high and low RR
6699 * supported by the panel or to any other RR based on media playback (in
6700 * this case, RR value needs to be passed from user space).
6702 * The caller of this function needs to take a lock on dev_priv->drrs.
6704 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6705 const struct intel_crtc_state *crtc_state,
6708 struct intel_encoder *encoder;
6709 struct intel_digital_port *dig_port = NULL;
6710 struct intel_dp *intel_dp = dev_priv->drrs.dp;
6711 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6712 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6714 if (refresh_rate <= 0) {
6715 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6719 if (intel_dp == NULL) {
6720 DRM_DEBUG_KMS("DRRS not supported.\n");
6724 dig_port = dp_to_dig_port(intel_dp);
6725 encoder = &dig_port->base;
6728 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6732 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6733 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6737 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6739 index = DRRS_LOW_RR;
6741 if (index == dev_priv->drrs.refresh_rate_type) {
6743 "DRRS requested for previously set RR...ignoring\n");
6747 if (!crtc_state->base.active) {
6748 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6752 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6755 intel_dp_set_m_n(crtc_state, M1_N1);
6758 intel_dp_set_m_n(crtc_state, M2_N2);
6762 DRM_ERROR("Unsupported refreshrate type\n");
6764 } else if (INTEL_GEN(dev_priv) > 6) {
6765 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6768 val = I915_READ(reg);
6769 if (index > DRRS_HIGH_RR) {
6770 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6771 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6773 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6775 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6776 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6778 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6780 I915_WRITE(reg, val);
6783 dev_priv->drrs.refresh_rate_type = index;
6785 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6789 * intel_edp_drrs_enable - init drrs struct if supported
6790 * @intel_dp: DP struct
6791 * @crtc_state: A pointer to the active crtc state.
6793 * Initializes frontbuffer_bits and drrs.dp
6795 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6796 const struct intel_crtc_state *crtc_state)
6798 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6800 if (!crtc_state->has_drrs) {
6801 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6805 if (dev_priv->psr.enabled) {
6806 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6810 mutex_lock(&dev_priv->drrs.mutex);
6811 if (dev_priv->drrs.dp) {
6812 DRM_DEBUG_KMS("DRRS already enabled\n");
6816 dev_priv->drrs.busy_frontbuffer_bits = 0;
6818 dev_priv->drrs.dp = intel_dp;
6821 mutex_unlock(&dev_priv->drrs.mutex);
6825 * intel_edp_drrs_disable - Disable DRRS
6826 * @intel_dp: DP struct
6827 * @old_crtc_state: Pointer to old crtc_state.
6830 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6831 const struct intel_crtc_state *old_crtc_state)
6833 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6835 if (!old_crtc_state->has_drrs)
6838 mutex_lock(&dev_priv->drrs.mutex);
6839 if (!dev_priv->drrs.dp) {
6840 mutex_unlock(&dev_priv->drrs.mutex);
6844 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6845 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6846 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6848 dev_priv->drrs.dp = NULL;
6849 mutex_unlock(&dev_priv->drrs.mutex);
6851 cancel_delayed_work_sync(&dev_priv->drrs.work);
6854 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6856 struct drm_i915_private *dev_priv =
6857 container_of(work, typeof(*dev_priv), drrs.work.work);
6858 struct intel_dp *intel_dp;
6860 mutex_lock(&dev_priv->drrs.mutex);
6862 intel_dp = dev_priv->drrs.dp;
6868 * The delayed work can race with an invalidate hence we need to
6872 if (dev_priv->drrs.busy_frontbuffer_bits)
6875 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6876 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6878 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6879 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6883 mutex_unlock(&dev_priv->drrs.mutex);
6887 * intel_edp_drrs_invalidate - Disable Idleness DRRS
6888 * @dev_priv: i915 device
6889 * @frontbuffer_bits: frontbuffer plane tracking bits
6891 * This function gets called everytime rendering on the given planes start.
6892 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6894 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6896 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6897 unsigned int frontbuffer_bits)
6899 struct drm_crtc *crtc;
6902 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6905 cancel_delayed_work(&dev_priv->drrs.work);
6907 mutex_lock(&dev_priv->drrs.mutex);
6908 if (!dev_priv->drrs.dp) {
6909 mutex_unlock(&dev_priv->drrs.mutex);
6913 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6914 pipe = to_intel_crtc(crtc)->pipe;
6916 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6917 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6919 /* invalidate means busy screen hence upclock */
6920 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6921 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6922 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6924 mutex_unlock(&dev_priv->drrs.mutex);
6928 * intel_edp_drrs_flush - Restart Idleness DRRS
6929 * @dev_priv: i915 device
6930 * @frontbuffer_bits: frontbuffer plane tracking bits
6932 * This function gets called every time rendering on the given planes has
6933 * completed or flip on a crtc is completed. So DRRS should be upclocked
6934 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6935 * if no other planes are dirty.
6937 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6939 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6940 unsigned int frontbuffer_bits)
6942 struct drm_crtc *crtc;
6945 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6948 cancel_delayed_work(&dev_priv->drrs.work);
6950 mutex_lock(&dev_priv->drrs.mutex);
6951 if (!dev_priv->drrs.dp) {
6952 mutex_unlock(&dev_priv->drrs.mutex);
6956 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6957 pipe = to_intel_crtc(crtc)->pipe;
6959 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6960 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6962 /* flush means busy screen hence upclock */
6963 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6964 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6965 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6968 * flush also means no more activity hence schedule downclock, if all
6969 * other fbs are quiescent too
6971 if (!dev_priv->drrs.busy_frontbuffer_bits)
6972 schedule_delayed_work(&dev_priv->drrs.work,
6973 msecs_to_jiffies(1000));
6974 mutex_unlock(&dev_priv->drrs.mutex);
6978 * DOC: Display Refresh Rate Switching (DRRS)
6980 * Display Refresh Rate Switching (DRRS) is a power conservation feature
6981 * which enables swtching between low and high refresh rates,
6982 * dynamically, based on the usage scenario. This feature is applicable
6983 * for internal panels.
6985 * Indication that the panel supports DRRS is given by the panel EDID, which
6986 * would list multiple refresh rates for one resolution.
6988 * DRRS is of 2 types - static and seamless.
6989 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6990 * (may appear as a blink on screen) and is used in dock-undock scenario.
6991 * Seamless DRRS involves changing RR without any visual effect to the user
6992 * and can be used during normal system usage. This is done by programming
6993 * certain registers.
6995 * Support for static/seamless DRRS may be indicated in the VBT based on
6996 * inputs from the panel spec.
6998 * DRRS saves power by switching to low RR based on usage scenarios.
7000 * The implementation is based on frontbuffer tracking implementation. When
7001 * there is a disturbance on the screen triggered by user activity or a periodic
7002 * system activity, DRRS is disabled (RR is changed to high RR). When there is
7003 * no movement on screen, after a timeout of 1 second, a switch to low RR is
7006 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7007 * and intel_edp_drrs_flush() are called.
7009 * DRRS can be further extended to support other internal panels and also
7010 * the scenario of video playback wherein RR is set based on the rate
7011 * requested by userspace.
7015 * intel_dp_drrs_init - Init basic DRRS work and mutex.
7016 * @connector: eDP connector
7017 * @fixed_mode: preferred mode of panel
7019 * This function is called only once at driver load to initialize basic
7023 * Downclock mode if panel supports it, else return NULL.
7024 * DRRS support is determined by the presence of downclock mode (apart
7025 * from VBT setting).
7027 static struct drm_display_mode *
7028 intel_dp_drrs_init(struct intel_connector *connector,
7029 struct drm_display_mode *fixed_mode)
7031 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7032 struct drm_display_mode *downclock_mode = NULL;
7034 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7035 mutex_init(&dev_priv->drrs.mutex);
7037 if (INTEL_GEN(dev_priv) <= 6) {
7038 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
7042 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7043 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
7047 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7048 if (!downclock_mode) {
7049 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
7053 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7055 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7056 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
7057 return downclock_mode;
7060 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7061 struct intel_connector *intel_connector)
7063 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7064 struct drm_device *dev = &dev_priv->drm;
7065 struct drm_connector *connector = &intel_connector->base;
7066 struct drm_display_mode *fixed_mode = NULL;
7067 struct drm_display_mode *downclock_mode = NULL;
7069 enum pipe pipe = INVALID_PIPE;
7070 intel_wakeref_t wakeref;
7073 if (!intel_dp_is_edp(intel_dp))
7076 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7079 * On IBX/CPT we may get here with LVDS already registered. Since the
7080 * driver uses the only internal power sequencer available for both
7081 * eDP and LVDS bail out early in this case to prevent interfering
7082 * with an already powered-on LVDS power sequencer.
7084 if (intel_get_lvds_encoder(dev_priv)) {
7085 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7086 DRM_INFO("LVDS was detected, not registering eDP\n");
7091 with_pps_lock(intel_dp, wakeref) {
7092 intel_dp_init_panel_power_timestamps(intel_dp);
7093 intel_dp_pps_init(intel_dp);
7094 intel_edp_panel_vdd_sanitize(intel_dp);
7097 /* Cache DPCD and EDID for edp. */
7098 has_dpcd = intel_edp_init_dpcd(intel_dp);
7101 /* if this fails, presume the device is a ghost */
7102 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7106 mutex_lock(&dev->mode_config.mutex);
7107 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7109 if (drm_add_edid_modes(connector, edid)) {
7110 drm_connector_update_edid_property(connector,
7114 edid = ERR_PTR(-EINVAL);
7117 edid = ERR_PTR(-ENOENT);
7119 intel_connector->edid = edid;
7121 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7123 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7125 /* fallback to VBT if available for eDP */
7127 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7128 mutex_unlock(&dev->mode_config.mutex);
7130 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7131 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7132 register_reboot_notifier(&intel_dp->edp_notifier);
7135 * Figure out the current pipe for the initial backlight setup.
7136 * If the current pipe isn't valid, try the PPS pipe, and if that
7137 * fails just assume pipe A.
7139 pipe = vlv_active_pipe(intel_dp);
7141 if (pipe != PIPE_A && pipe != PIPE_B)
7142 pipe = intel_dp->pps_pipe;
7144 if (pipe != PIPE_A && pipe != PIPE_B)
7147 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7151 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7152 intel_connector->panel.backlight.power = intel_edp_backlight_power;
7153 intel_panel_setup_backlight(connector, pipe);
7156 drm_connector_init_panel_orientation_property(
7157 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7162 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7164 * vdd might still be enabled do to the delayed vdd off.
7165 * Make sure vdd is actually turned off here.
7167 with_pps_lock(intel_dp, wakeref)
7168 edp_panel_vdd_off_sync(intel_dp);
7173 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7175 struct intel_connector *intel_connector;
7176 struct drm_connector *connector;
7178 intel_connector = container_of(work, typeof(*intel_connector),
7179 modeset_retry_work);
7180 connector = &intel_connector->base;
7181 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7184 /* Grab the locks before changing connector property*/
7185 mutex_lock(&connector->dev->mode_config.mutex);
7186 /* Set connector link status to BAD and send a Uevent to notify
7187 * userspace to do a modeset.
7189 drm_connector_set_link_status_property(connector,
7190 DRM_MODE_LINK_STATUS_BAD);
7191 mutex_unlock(&connector->dev->mode_config.mutex);
7192 /* Send Hotplug uevent so userspace can reprobe */
7193 drm_kms_helper_hotplug_event(connector->dev);
7197 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7198 struct intel_connector *intel_connector)
7200 struct drm_connector *connector = &intel_connector->base;
7201 struct intel_dp *intel_dp = &intel_dig_port->dp;
7202 struct intel_encoder *intel_encoder = &intel_dig_port->base;
7203 struct drm_device *dev = intel_encoder->base.dev;
7204 struct drm_i915_private *dev_priv = to_i915(dev);
7205 enum port port = intel_encoder->port;
7208 /* Initialize the work for modeset in case of link train failure */
7209 INIT_WORK(&intel_connector->modeset_retry_work,
7210 intel_dp_modeset_retry_work_fn);
7212 if (WARN(intel_dig_port->max_lanes < 1,
7213 "Not enough lanes (%d) for DP on port %c\n",
7214 intel_dig_port->max_lanes, port_name(port)))
7217 intel_dp_set_source_rates(intel_dp);
7219 intel_dp->reset_link_params = true;
7220 intel_dp->pps_pipe = INVALID_PIPE;
7221 intel_dp->active_pipe = INVALID_PIPE;
7223 /* intel_dp vfuncs */
7224 if (HAS_DDI(dev_priv))
7225 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
7227 /* Preserve the current hw state. */
7228 intel_dp->DP = I915_READ(intel_dp->output_reg);
7229 intel_dp->attached_connector = intel_connector;
7231 if (intel_dp_is_port_edp(dev_priv, port))
7232 type = DRM_MODE_CONNECTOR_eDP;
7234 type = DRM_MODE_CONNECTOR_DisplayPort;
7236 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7237 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7240 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7241 * for DP the encoder type can be set by the caller to
7242 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7244 if (type == DRM_MODE_CONNECTOR_eDP)
7245 intel_encoder->type = INTEL_OUTPUT_EDP;
7247 /* eDP only on port B and/or C on vlv/chv */
7248 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7249 intel_dp_is_edp(intel_dp) &&
7250 port != PORT_B && port != PORT_C))
7253 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7254 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7257 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7258 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7260 if (!HAS_GMCH(dev_priv))
7261 connector->interlace_allowed = true;
7262 connector->doublescan_allowed = 0;
7264 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7266 intel_dp_aux_init(intel_dp);
7268 intel_connector_attach_encoder(intel_connector, intel_encoder);
7270 if (HAS_DDI(dev_priv))
7271 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7273 intel_connector->get_hw_state = intel_connector_get_hw_state;
7275 /* init MST on ports that can support it */
7276 if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
7277 (port == PORT_B || port == PORT_C ||
7278 port == PORT_D || port == PORT_F))
7279 intel_dp_mst_encoder_init(intel_dig_port,
7280 intel_connector->base.base.id);
7282 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7283 intel_dp_aux_fini(intel_dp);
7284 intel_dp_mst_encoder_cleanup(intel_dig_port);
7288 intel_dp_add_properties(intel_dp, connector);
7290 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7291 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7293 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7296 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7297 * 0xd. Failure to do so will result in spurious interrupts being
7298 * generated on the port when a cable is not attached.
7300 if (IS_G45(dev_priv)) {
7301 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7302 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7308 drm_connector_cleanup(connector);
7313 bool intel_dp_init(struct drm_i915_private *dev_priv,
7314 i915_reg_t output_reg,
7317 struct intel_digital_port *intel_dig_port;
7318 struct intel_encoder *intel_encoder;
7319 struct drm_encoder *encoder;
7320 struct intel_connector *intel_connector;
7322 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7323 if (!intel_dig_port)
7326 intel_connector = intel_connector_alloc();
7327 if (!intel_connector)
7328 goto err_connector_alloc;
7330 intel_encoder = &intel_dig_port->base;
7331 encoder = &intel_encoder->base;
7333 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7334 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7335 "DP %c", port_name(port)))
7336 goto err_encoder_init;
7338 intel_encoder->hotplug = intel_dp_hotplug;
7339 intel_encoder->compute_config = intel_dp_compute_config;
7340 intel_encoder->get_hw_state = intel_dp_get_hw_state;
7341 intel_encoder->get_config = intel_dp_get_config;
7342 intel_encoder->update_pipe = intel_panel_update_backlight;
7343 intel_encoder->suspend = intel_dp_encoder_suspend;
7344 if (IS_CHERRYVIEW(dev_priv)) {
7345 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7346 intel_encoder->pre_enable = chv_pre_enable_dp;
7347 intel_encoder->enable = vlv_enable_dp;
7348 intel_encoder->disable = vlv_disable_dp;
7349 intel_encoder->post_disable = chv_post_disable_dp;
7350 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7351 } else if (IS_VALLEYVIEW(dev_priv)) {
7352 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7353 intel_encoder->pre_enable = vlv_pre_enable_dp;
7354 intel_encoder->enable = vlv_enable_dp;
7355 intel_encoder->disable = vlv_disable_dp;
7356 intel_encoder->post_disable = vlv_post_disable_dp;
7358 intel_encoder->pre_enable = g4x_pre_enable_dp;
7359 intel_encoder->enable = g4x_enable_dp;
7360 intel_encoder->disable = g4x_disable_dp;
7361 intel_encoder->post_disable = g4x_post_disable_dp;
7364 intel_dig_port->dp.output_reg = output_reg;
7365 intel_dig_port->max_lanes = 4;
7367 intel_encoder->type = INTEL_OUTPUT_DP;
7368 intel_encoder->power_domain = intel_port_to_power_domain(port);
7369 if (IS_CHERRYVIEW(dev_priv)) {
7371 intel_encoder->crtc_mask = 1 << 2;
7373 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7375 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7377 intel_encoder->cloneable = 0;
7378 intel_encoder->port = port;
7380 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7383 intel_infoframe_init(intel_dig_port);
7385 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7386 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7387 goto err_init_connector;
7392 drm_encoder_cleanup(encoder);
7394 kfree(intel_connector);
7395 err_connector_alloc:
7396 kfree(intel_dig_port);
7400 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7402 struct intel_encoder *encoder;
7404 for_each_intel_encoder(&dev_priv->drm, encoder) {
7405 struct intel_dp *intel_dp;
7407 if (encoder->type != INTEL_OUTPUT_DDI)
7410 intel_dp = enc_to_intel_dp(&encoder->base);
7412 if (!intel_dp->can_mst)
7415 if (intel_dp->is_mst)
7416 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7420 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7422 struct intel_encoder *encoder;
7424 for_each_intel_encoder(&dev_priv->drm, encoder) {
7425 struct intel_dp *intel_dp;
7428 if (encoder->type != INTEL_OUTPUT_DDI)
7431 intel_dp = enc_to_intel_dp(&encoder->base);
7433 if (!intel_dp->can_mst)
7436 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7438 intel_dp->is_mst = false;
7439 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,