2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
35 #include <asm/byteorder.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_hdcp.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/i915_drm.h>
45 #include "i915_debugfs.h"
47 #include "intel_atomic.h"
48 #include "intel_audio.h"
49 #include "intel_connector.h"
50 #include "intel_ddi.h"
52 #include "intel_dp_link_training.h"
53 #include "intel_dp_mst.h"
54 #include "intel_dpio_phy.h"
55 #include "intel_drv.h"
56 #include "intel_fifo_underrun.h"
57 #include "intel_hdcp.h"
58 #include "intel_hdmi.h"
59 #include "intel_hotplug.h"
60 #include "intel_lspcon.h"
61 #include "intel_lvds.h"
62 #include "intel_panel.h"
63 #include "intel_psr.h"
64 #include "intel_sideband.h"
66 #include "intel_vdsc.h"
68 #define DP_DPRX_ESI_LEN 14
70 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
71 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
72 #define DP_DSC_MIN_SUPPORTED_BPC 8
73 #define DP_DSC_MAX_SUPPORTED_BPC 10
75 /* DP DSC throughput values used for slice count calculations KPixels/s */
76 #define DP_DSC_PEAK_PIXEL_RATE 2720000
77 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
78 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
80 /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
81 #define DP_DSC_FEC_OVERHEAD_FACTOR 976
83 /* Compliance test status bits */
84 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
85 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
86 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
87 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
94 static const struct dp_link_dpll g4x_dpll[] = {
96 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
98 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
101 static const struct dp_link_dpll pch_dpll[] = {
103 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
105 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
108 static const struct dp_link_dpll vlv_dpll[] = {
110 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
112 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
116 * CHV supports eDP 1.4 that have more link rates.
117 * Below only provides the fixed rate but exclude variable rate.
119 static const struct dp_link_dpll chv_dpll[] = {
121 * CHV requires to program fractional division for m2.
122 * m2 is stored in fixed point format using formula below
123 * (m2_int << 22) | m2_fraction
125 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
126 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
127 { 270000, /* m2_int = 27, m2_fraction = 0 */
128 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
131 /* Constants for DP DSC configurations */
132 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
134 /* With Single pipe configuration, HW is capable of supporting maximum
135 * of 4 slices per line.
137 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
140 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
141 * @intel_dp: DP struct
143 * If a CPU or PCH DP output is attached to an eDP panel, this function
144 * will return true, and false otherwise.
146 bool intel_dp_is_edp(struct intel_dp *intel_dp)
148 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
153 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
155 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
158 static void intel_dp_link_down(struct intel_encoder *encoder,
159 const struct intel_crtc_state *old_crtc_state);
160 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
161 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
162 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
163 const struct intel_crtc_state *crtc_state);
164 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
166 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
168 /* update sink rates from dpcd */
169 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
171 static const int dp_rates[] = {
172 162000, 270000, 540000, 810000
176 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
178 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
179 if (dp_rates[i] > max_rate)
181 intel_dp->sink_rates[i] = dp_rates[i];
184 intel_dp->num_sink_rates = i;
187 /* Get length of rates array potentially limited by max_rate. */
188 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
192 /* Limit results by potentially reduced max rate */
193 for (i = 0; i < len; i++) {
194 if (rates[len - i - 1] <= max_rate)
201 /* Get length of common rates array potentially limited by max_rate. */
202 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
205 return intel_dp_rate_limit_len(intel_dp->common_rates,
206 intel_dp->num_common_rates, max_rate);
209 /* Theoretical max between source and sink */
210 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
212 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
215 /* Theoretical max between source and sink */
216 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
218 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
219 int source_max = intel_dig_port->max_lanes;
220 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
221 int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
223 return min3(source_max, sink_max, fia_max);
226 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
228 return intel_dp->max_link_lane_count;
232 intel_dp_link_required(int pixel_clock, int bpp)
234 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
235 return DIV_ROUND_UP(pixel_clock * bpp, 8);
239 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
241 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
242 * link rate that is generally expressed in Gbps. Since, 8 bits of data
243 * is transmitted every LS_Clk per lane, there is no need to account for
244 * the channel encoding that is done in the PHY layer here.
247 return max_link_clock * max_lanes;
251 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
253 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
254 struct intel_encoder *encoder = &intel_dig_port->base;
255 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
256 int max_dotclk = dev_priv->max_dotclk_freq;
259 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
261 if (type != DP_DS_PORT_TYPE_VGA)
264 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
265 intel_dp->downstream_ports);
267 if (ds_max_dotclk != 0)
268 max_dotclk = min(max_dotclk, ds_max_dotclk);
273 static int cnl_max_source_rate(struct intel_dp *intel_dp)
275 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
276 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
277 enum port port = dig_port->base.port;
279 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
281 /* Low voltage SKUs are limited to max of 5.4G */
282 if (voltage == VOLTAGE_INFO_0_85V)
285 /* For this SKU 8.1G is supported in all ports */
286 if (IS_CNL_WITH_PORT_F(dev_priv))
289 /* For other SKUs, max rate on ports A and D is 5.4G */
290 if (port == PORT_A || port == PORT_D)
296 static int icl_max_source_rate(struct intel_dp *intel_dp)
298 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
299 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
300 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
302 if (intel_phy_is_combo(dev_priv, phy) &&
303 !IS_ELKHARTLAKE(dev_priv) &&
304 !intel_dp_is_edp(intel_dp))
311 intel_dp_set_source_rates(struct intel_dp *intel_dp)
313 /* The values must be in increasing order */
314 static const int cnl_rates[] = {
315 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
317 static const int bxt_rates[] = {
318 162000, 216000, 243000, 270000, 324000, 432000, 540000
320 static const int skl_rates[] = {
321 162000, 216000, 270000, 324000, 432000, 540000
323 static const int hsw_rates[] = {
324 162000, 270000, 540000
326 static const int g4x_rates[] = {
329 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
330 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
331 const struct ddi_vbt_port_info *info =
332 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
333 const int *source_rates;
334 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
336 /* This should only be done once */
337 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
339 if (INTEL_GEN(dev_priv) >= 10) {
340 source_rates = cnl_rates;
341 size = ARRAY_SIZE(cnl_rates);
342 if (IS_GEN(dev_priv, 10))
343 max_rate = cnl_max_source_rate(intel_dp);
345 max_rate = icl_max_source_rate(intel_dp);
346 } else if (IS_GEN9_LP(dev_priv)) {
347 source_rates = bxt_rates;
348 size = ARRAY_SIZE(bxt_rates);
349 } else if (IS_GEN9_BC(dev_priv)) {
350 source_rates = skl_rates;
351 size = ARRAY_SIZE(skl_rates);
352 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
353 IS_BROADWELL(dev_priv)) {
354 source_rates = hsw_rates;
355 size = ARRAY_SIZE(hsw_rates);
357 source_rates = g4x_rates;
358 size = ARRAY_SIZE(g4x_rates);
361 if (max_rate && vbt_max_rate)
362 max_rate = min(max_rate, vbt_max_rate);
363 else if (vbt_max_rate)
364 max_rate = vbt_max_rate;
367 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
369 intel_dp->source_rates = source_rates;
370 intel_dp->num_source_rates = size;
373 static int intersect_rates(const int *source_rates, int source_len,
374 const int *sink_rates, int sink_len,
377 int i = 0, j = 0, k = 0;
379 while (i < source_len && j < sink_len) {
380 if (source_rates[i] == sink_rates[j]) {
381 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
383 common_rates[k] = source_rates[i];
387 } else if (source_rates[i] < sink_rates[j]) {
396 /* return index of rate in rates array, or -1 if not found */
397 static int intel_dp_rate_index(const int *rates, int len, int rate)
401 for (i = 0; i < len; i++)
402 if (rate == rates[i])
408 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
410 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
412 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
413 intel_dp->num_source_rates,
414 intel_dp->sink_rates,
415 intel_dp->num_sink_rates,
416 intel_dp->common_rates);
418 /* Paranoia, there should always be something in common. */
419 if (WARN_ON(intel_dp->num_common_rates == 0)) {
420 intel_dp->common_rates[0] = 162000;
421 intel_dp->num_common_rates = 1;
425 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
429 * FIXME: we need to synchronize the current link parameters with
430 * hardware readout. Currently fast link training doesn't work on
433 if (link_rate == 0 ||
434 link_rate > intel_dp->max_link_rate)
437 if (lane_count == 0 ||
438 lane_count > intel_dp_max_lane_count(intel_dp))
444 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
448 const struct drm_display_mode *fixed_mode =
449 intel_dp->attached_connector->panel.fixed_mode;
450 int mode_rate, max_rate;
452 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
453 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
454 if (mode_rate > max_rate)
460 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
461 int link_rate, u8 lane_count)
465 index = intel_dp_rate_index(intel_dp->common_rates,
466 intel_dp->num_common_rates,
469 if (intel_dp_is_edp(intel_dp) &&
470 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
471 intel_dp->common_rates[index - 1],
473 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
476 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
477 intel_dp->max_link_lane_count = lane_count;
478 } else if (lane_count > 1) {
479 if (intel_dp_is_edp(intel_dp) &&
480 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
481 intel_dp_max_common_rate(intel_dp),
483 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
486 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
487 intel_dp->max_link_lane_count = lane_count >> 1;
489 DRM_ERROR("Link Training Unsuccessful\n");
496 static enum drm_mode_status
497 intel_dp_mode_valid(struct drm_connector *connector,
498 struct drm_display_mode *mode)
500 struct intel_dp *intel_dp = intel_attached_dp(connector);
501 struct intel_connector *intel_connector = to_intel_connector(connector);
502 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
503 struct drm_i915_private *dev_priv = to_i915(connector->dev);
504 int target_clock = mode->clock;
505 int max_rate, mode_rate, max_lanes, max_link_clock;
507 u16 dsc_max_output_bpp = 0;
508 u8 dsc_slice_count = 0;
510 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
511 return MODE_NO_DBLESCAN;
513 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
515 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
516 if (mode->hdisplay > fixed_mode->hdisplay)
519 if (mode->vdisplay > fixed_mode->vdisplay)
522 target_clock = fixed_mode->clock;
525 max_link_clock = intel_dp_max_link_rate(intel_dp);
526 max_lanes = intel_dp_max_lane_count(intel_dp);
528 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
529 mode_rate = intel_dp_link_required(target_clock, 18);
532 * Output bpp is stored in 6.4 format so right shift by 4 to get the
533 * integer value since we support only integer values of bpp.
535 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
536 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
537 if (intel_dp_is_edp(intel_dp)) {
539 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
541 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
543 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
545 intel_dp_dsc_get_output_bpp(max_link_clock,
548 mode->hdisplay) >> 4;
550 intel_dp_dsc_get_slice_count(intel_dp,
556 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
557 target_clock > max_dotclk)
558 return MODE_CLOCK_HIGH;
560 if (mode->clock < 10000)
561 return MODE_CLOCK_LOW;
563 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
564 return MODE_H_ILLEGAL;
569 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
576 for (i = 0; i < src_bytes; i++)
577 v |= ((u32)src[i]) << ((3 - i) * 8);
581 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
586 for (i = 0; i < dst_bytes; i++)
587 dst[i] = src >> ((3-i) * 8);
591 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
593 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
594 bool force_disable_vdd);
596 intel_dp_pps_init(struct intel_dp *intel_dp);
598 static intel_wakeref_t
599 pps_lock(struct intel_dp *intel_dp)
601 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
602 intel_wakeref_t wakeref;
605 * See intel_power_sequencer_reset() why we need
606 * a power domain reference here.
608 wakeref = intel_display_power_get(dev_priv,
609 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
611 mutex_lock(&dev_priv->pps_mutex);
616 static intel_wakeref_t
617 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
619 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
621 mutex_unlock(&dev_priv->pps_mutex);
622 intel_display_power_put(dev_priv,
623 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
628 #define with_pps_lock(dp, wf) \
629 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
632 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
634 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
635 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
636 enum pipe pipe = intel_dp->pps_pipe;
637 bool pll_enabled, release_cl_override = false;
638 enum dpio_phy phy = DPIO_PHY(pipe);
639 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
642 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
643 "skipping pipe %c power sequencer kick due to port %c being active\n",
644 pipe_name(pipe), port_name(intel_dig_port->base.port)))
647 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
648 pipe_name(pipe), port_name(intel_dig_port->base.port));
650 /* Preserve the BIOS-computed detected bit. This is
651 * supposed to be read-only.
653 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
654 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
655 DP |= DP_PORT_WIDTH(1);
656 DP |= DP_LINK_TRAIN_PAT_1;
658 if (IS_CHERRYVIEW(dev_priv))
659 DP |= DP_PIPE_SEL_CHV(pipe);
661 DP |= DP_PIPE_SEL(pipe);
663 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
666 * The DPLL for the pipe must be enabled for this to work.
667 * So enable temporarily it if it's not already enabled.
670 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
671 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
673 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
674 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
675 DRM_ERROR("Failed to force on pll for pipe %c!\n",
682 * Similar magic as in intel_dp_enable_port().
683 * We _must_ do this port enable + disable trick
684 * to make this power sequencer lock onto the port.
685 * Otherwise even VDD force bit won't work.
687 I915_WRITE(intel_dp->output_reg, DP);
688 POSTING_READ(intel_dp->output_reg);
690 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
691 POSTING_READ(intel_dp->output_reg);
693 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
694 POSTING_READ(intel_dp->output_reg);
697 vlv_force_pll_off(dev_priv, pipe);
699 if (release_cl_override)
700 chv_phy_powergate_ch(dev_priv, phy, ch, false);
704 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
706 struct intel_encoder *encoder;
707 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
710 * We don't have power sequencer currently.
711 * Pick one that's not used by other ports.
713 for_each_intel_dp(&dev_priv->drm, encoder) {
714 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
716 if (encoder->type == INTEL_OUTPUT_EDP) {
717 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
718 intel_dp->active_pipe != intel_dp->pps_pipe);
720 if (intel_dp->pps_pipe != INVALID_PIPE)
721 pipes &= ~(1 << intel_dp->pps_pipe);
723 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
725 if (intel_dp->active_pipe != INVALID_PIPE)
726 pipes &= ~(1 << intel_dp->active_pipe);
733 return ffs(pipes) - 1;
737 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
739 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
740 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
743 lockdep_assert_held(&dev_priv->pps_mutex);
745 /* We should never land here with regular DP ports */
746 WARN_ON(!intel_dp_is_edp(intel_dp));
748 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
749 intel_dp->active_pipe != intel_dp->pps_pipe);
751 if (intel_dp->pps_pipe != INVALID_PIPE)
752 return intel_dp->pps_pipe;
754 pipe = vlv_find_free_pps(dev_priv);
757 * Didn't find one. This should not happen since there
758 * are two power sequencers and up to two eDP ports.
760 if (WARN_ON(pipe == INVALID_PIPE))
763 vlv_steal_power_sequencer(dev_priv, pipe);
764 intel_dp->pps_pipe = pipe;
766 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
767 pipe_name(intel_dp->pps_pipe),
768 port_name(intel_dig_port->base.port));
770 /* init power sequencer on this pipe and port */
771 intel_dp_init_panel_power_sequencer(intel_dp);
772 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
775 * Even vdd force doesn't work until we've made
776 * the power sequencer lock in on the port.
778 vlv_power_sequencer_kick(intel_dp);
780 return intel_dp->pps_pipe;
784 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
786 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
787 int backlight_controller = dev_priv->vbt.backlight.controller;
789 lockdep_assert_held(&dev_priv->pps_mutex);
791 /* We should never land here with regular DP ports */
792 WARN_ON(!intel_dp_is_edp(intel_dp));
794 if (!intel_dp->pps_reset)
795 return backlight_controller;
797 intel_dp->pps_reset = false;
800 * Only the HW needs to be reprogrammed, the SW state is fixed and
801 * has been setup during connector init.
803 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
805 return backlight_controller;
808 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
811 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
814 return I915_READ(PP_STATUS(pipe)) & PP_ON;
817 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
820 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
823 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
830 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
832 vlv_pipe_check pipe_check)
836 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
837 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
838 PANEL_PORT_SELECT_MASK;
840 if (port_sel != PANEL_PORT_SELECT_VLV(port))
843 if (!pipe_check(dev_priv, pipe))
853 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
855 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
856 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
857 enum port port = intel_dig_port->base.port;
859 lockdep_assert_held(&dev_priv->pps_mutex);
861 /* try to find a pipe with this port selected */
862 /* first pick one where the panel is on */
863 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
865 /* didn't find one? pick one where vdd is on */
866 if (intel_dp->pps_pipe == INVALID_PIPE)
867 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
868 vlv_pipe_has_vdd_on);
869 /* didn't find one? pick one with just the correct port */
870 if (intel_dp->pps_pipe == INVALID_PIPE)
871 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
874 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
875 if (intel_dp->pps_pipe == INVALID_PIPE) {
876 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
881 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
882 port_name(port), pipe_name(intel_dp->pps_pipe));
884 intel_dp_init_panel_power_sequencer(intel_dp);
885 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
888 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
890 struct intel_encoder *encoder;
892 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
893 !IS_GEN9_LP(dev_priv)))
897 * We can't grab pps_mutex here due to deadlock with power_domain
898 * mutex when power_domain functions are called while holding pps_mutex.
899 * That also means that in order to use pps_pipe the code needs to
900 * hold both a power domain reference and pps_mutex, and the power domain
901 * reference get/put must be done while _not_ holding pps_mutex.
902 * pps_{lock,unlock}() do these steps in the correct order, so one
903 * should use them always.
906 for_each_intel_dp(&dev_priv->drm, encoder) {
907 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
909 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
911 if (encoder->type != INTEL_OUTPUT_EDP)
914 if (IS_GEN9_LP(dev_priv))
915 intel_dp->pps_reset = true;
917 intel_dp->pps_pipe = INVALID_PIPE;
921 struct pps_registers {
929 static void intel_pps_get_registers(struct intel_dp *intel_dp,
930 struct pps_registers *regs)
932 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
935 memset(regs, 0, sizeof(*regs));
937 if (IS_GEN9_LP(dev_priv))
938 pps_idx = bxt_power_sequencer_idx(intel_dp);
939 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
940 pps_idx = vlv_power_sequencer_pipe(intel_dp);
942 regs->pp_ctrl = PP_CONTROL(pps_idx);
943 regs->pp_stat = PP_STATUS(pps_idx);
944 regs->pp_on = PP_ON_DELAYS(pps_idx);
945 regs->pp_off = PP_OFF_DELAYS(pps_idx);
947 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
948 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
949 regs->pp_div = INVALID_MMIO_REG;
951 regs->pp_div = PP_DIVISOR(pps_idx);
955 _pp_ctrl_reg(struct intel_dp *intel_dp)
957 struct pps_registers regs;
959 intel_pps_get_registers(intel_dp, ®s);
965 _pp_stat_reg(struct intel_dp *intel_dp)
967 struct pps_registers regs;
969 intel_pps_get_registers(intel_dp, ®s);
974 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
975 This function only applicable when panel PM state is not to be tracked */
976 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
979 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
981 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
982 intel_wakeref_t wakeref;
984 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
987 with_pps_lock(intel_dp, wakeref) {
988 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
989 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
990 i915_reg_t pp_ctrl_reg, pp_div_reg;
993 pp_ctrl_reg = PP_CONTROL(pipe);
994 pp_div_reg = PP_DIVISOR(pipe);
995 pp_div = I915_READ(pp_div_reg);
996 pp_div &= PP_REFERENCE_DIVIDER_MASK;
998 /* 0x1F write to PP_DIV_REG sets max cycle delay */
999 I915_WRITE(pp_div_reg, pp_div | 0x1F);
1000 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1001 msleep(intel_dp->panel_power_cycle_delay);
1008 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1010 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1012 lockdep_assert_held(&dev_priv->pps_mutex);
1014 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1015 intel_dp->pps_pipe == INVALID_PIPE)
1018 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1021 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1023 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1025 lockdep_assert_held(&dev_priv->pps_mutex);
1027 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1028 intel_dp->pps_pipe == INVALID_PIPE)
1031 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1035 intel_dp_check_edp(struct intel_dp *intel_dp)
1037 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1039 if (!intel_dp_is_edp(intel_dp))
1042 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1043 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1044 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1045 I915_READ(_pp_stat_reg(intel_dp)),
1046 I915_READ(_pp_ctrl_reg(intel_dp)));
1051 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1053 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1054 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1058 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1059 done = wait_event_timeout(i915->gmbus_wait_queue, C,
1060 msecs_to_jiffies_timeout(10));
1062 /* just trace the final value */
1063 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1066 DRM_ERROR("dp aux hw did not signal timeout!\n");
1072 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1074 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1080 * The clock divider is based off the hrawclk, and would like to run at
1081 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
1083 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1086 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1088 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1089 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1095 * The clock divider is based off the cdclk or PCH rawclk, and would
1096 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1097 * divide by 2000 and use that
1099 if (dig_port->aux_ch == AUX_CH_A)
1100 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1102 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1105 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1107 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1110 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1111 /* Workaround for non-ULT HSW */
1119 return ilk_get_aux_clock_divider(intel_dp, index);
1122 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1125 * SKL doesn't need us to program the AUX clock divider (Hardware will
1126 * derive the clock from CDCLK automatically). We still implement the
1127 * get_aux_clock_divider vfunc to plug-in into the existing code.
1129 return index ? 0 : 1;
1132 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1134 u32 aux_clock_divider)
1136 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1137 struct drm_i915_private *dev_priv =
1138 to_i915(intel_dig_port->base.base.dev);
1139 u32 precharge, timeout;
1141 if (IS_GEN(dev_priv, 6))
1146 if (IS_BROADWELL(dev_priv))
1147 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1149 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1151 return DP_AUX_CH_CTL_SEND_BUSY |
1152 DP_AUX_CH_CTL_DONE |
1153 DP_AUX_CH_CTL_INTERRUPT |
1154 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1156 DP_AUX_CH_CTL_RECEIVE_ERROR |
1157 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1158 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1159 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1162 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1166 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1169 ret = DP_AUX_CH_CTL_SEND_BUSY |
1170 DP_AUX_CH_CTL_DONE |
1171 DP_AUX_CH_CTL_INTERRUPT |
1172 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1173 DP_AUX_CH_CTL_TIME_OUT_MAX |
1174 DP_AUX_CH_CTL_RECEIVE_ERROR |
1175 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1176 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1177 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1179 if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
1180 ret |= DP_AUX_CH_CTL_TBT_IO;
1186 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1187 const u8 *send, int send_bytes,
1188 u8 *recv, int recv_size,
1189 u32 aux_send_ctl_flags)
1191 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1192 struct drm_i915_private *i915 =
1193 to_i915(intel_dig_port->base.base.dev);
1194 struct intel_uncore *uncore = &i915->uncore;
1195 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1196 bool is_tc_port = intel_phy_is_tc(i915, phy);
1197 i915_reg_t ch_ctl, ch_data[5];
1198 u32 aux_clock_divider;
1199 enum intel_display_power_domain aux_domain =
1200 intel_aux_power_domain(intel_dig_port);
1201 intel_wakeref_t aux_wakeref;
1202 intel_wakeref_t pps_wakeref;
1203 int i, ret, recv_bytes;
1208 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1209 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1210 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1213 intel_tc_port_lock(intel_dig_port);
1215 aux_wakeref = intel_display_power_get(i915, aux_domain);
1216 pps_wakeref = pps_lock(intel_dp);
1219 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1220 * In such cases we want to leave VDD enabled and it's up to upper layers
1221 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1224 vdd = edp_panel_vdd_on(intel_dp);
1226 /* dp aux is extremely sensitive to irq latency, hence request the
1227 * lowest possible wakeup latency and so prevent the cpu from going into
1228 * deep sleep states.
1230 pm_qos_update_request(&i915->pm_qos, 0);
1232 intel_dp_check_edp(intel_dp);
1234 /* Try to wait for any previous AUX channel activity */
1235 for (try = 0; try < 3; try++) {
1236 status = intel_uncore_read_notrace(uncore, ch_ctl);
1237 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1241 /* just trace the final value */
1242 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1245 static u32 last_status = -1;
1246 const u32 status = intel_uncore_read(uncore, ch_ctl);
1248 if (status != last_status) {
1249 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1251 last_status = status;
1258 /* Only 5 data registers! */
1259 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1264 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1265 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1269 send_ctl |= aux_send_ctl_flags;
1271 /* Must try at least 3 times according to DP spec */
1272 for (try = 0; try < 5; try++) {
1273 /* Load the send data into the aux channel data registers */
1274 for (i = 0; i < send_bytes; i += 4)
1275 intel_uncore_write(uncore,
1277 intel_dp_pack_aux(send + i,
1280 /* Send the command and wait for it to complete */
1281 intel_uncore_write(uncore, ch_ctl, send_ctl);
1283 status = intel_dp_aux_wait_done(intel_dp);
1285 /* Clear done status and any errors */
1286 intel_uncore_write(uncore,
1289 DP_AUX_CH_CTL_DONE |
1290 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1291 DP_AUX_CH_CTL_RECEIVE_ERROR);
1293 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1294 * 400us delay required for errors and timeouts
1295 * Timeout errors from the HW already meet this
1296 * requirement so skip to next iteration
1298 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1301 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1302 usleep_range(400, 500);
1305 if (status & DP_AUX_CH_CTL_DONE)
1310 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1311 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1317 /* Check for timeout or receive error.
1318 * Timeouts occur when the sink is not connected
1320 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1321 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1326 /* Timeouts occur when the device isn't connected, so they're
1327 * "normal" -- don't fill the kernel log with these */
1328 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1329 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1334 /* Unload any bytes sent back from the other side */
1335 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1336 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1339 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1340 * We have no idea of what happened so we return -EBUSY so
1341 * drm layer takes care for the necessary retries.
1343 if (recv_bytes == 0 || recv_bytes > 20) {
1344 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1350 if (recv_bytes > recv_size)
1351 recv_bytes = recv_size;
1353 for (i = 0; i < recv_bytes; i += 4)
1354 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1355 recv + i, recv_bytes - i);
1359 pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1362 edp_panel_vdd_off(intel_dp, false);
1364 pps_unlock(intel_dp, pps_wakeref);
1365 intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1368 intel_tc_port_unlock(intel_dig_port);
1373 #define BARE_ADDRESS_SIZE 3
1374 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1377 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1378 const struct drm_dp_aux_msg *msg)
1380 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1381 txbuf[1] = (msg->address >> 8) & 0xff;
1382 txbuf[2] = msg->address & 0xff;
1383 txbuf[3] = msg->size - 1;
1387 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1389 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1390 u8 txbuf[20], rxbuf[20];
1391 size_t txsize, rxsize;
1394 intel_dp_aux_header(txbuf, msg);
1396 switch (msg->request & ~DP_AUX_I2C_MOT) {
1397 case DP_AUX_NATIVE_WRITE:
1398 case DP_AUX_I2C_WRITE:
1399 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1400 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1401 rxsize = 2; /* 0 or 1 data bytes */
1403 if (WARN_ON(txsize > 20))
1406 WARN_ON(!msg->buffer != !msg->size);
1409 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1411 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1414 msg->reply = rxbuf[0] >> 4;
1417 /* Number of bytes written in a short write. */
1418 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1420 /* Return payload size. */
1426 case DP_AUX_NATIVE_READ:
1427 case DP_AUX_I2C_READ:
1428 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1429 rxsize = msg->size + 1;
1431 if (WARN_ON(rxsize > 20))
1434 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1437 msg->reply = rxbuf[0] >> 4;
1439 * Assume happy day, and copy the data. The caller is
1440 * expected to check msg->reply before touching it.
1442 * Return payload size.
1445 memcpy(msg->buffer, rxbuf + 1, ret);
1458 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1460 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1461 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1462 enum aux_ch aux_ch = dig_port->aux_ch;
1468 return DP_AUX_CH_CTL(aux_ch);
1470 MISSING_CASE(aux_ch);
1471 return DP_AUX_CH_CTL(AUX_CH_B);
1475 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1477 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1478 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1479 enum aux_ch aux_ch = dig_port->aux_ch;
1485 return DP_AUX_CH_DATA(aux_ch, index);
1487 MISSING_CASE(aux_ch);
1488 return DP_AUX_CH_DATA(AUX_CH_B, index);
1492 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1494 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1495 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1496 enum aux_ch aux_ch = dig_port->aux_ch;
1500 return DP_AUX_CH_CTL(aux_ch);
1504 return PCH_DP_AUX_CH_CTL(aux_ch);
1506 MISSING_CASE(aux_ch);
1507 return DP_AUX_CH_CTL(AUX_CH_A);
1511 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1513 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1514 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1515 enum aux_ch aux_ch = dig_port->aux_ch;
1519 return DP_AUX_CH_DATA(aux_ch, index);
1523 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1525 MISSING_CASE(aux_ch);
1526 return DP_AUX_CH_DATA(AUX_CH_A, index);
1530 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1532 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1533 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1534 enum aux_ch aux_ch = dig_port->aux_ch;
1543 return DP_AUX_CH_CTL(aux_ch);
1545 MISSING_CASE(aux_ch);
1546 return DP_AUX_CH_CTL(AUX_CH_A);
1550 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1552 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1553 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1554 enum aux_ch aux_ch = dig_port->aux_ch;
1563 return DP_AUX_CH_DATA(aux_ch, index);
1565 MISSING_CASE(aux_ch);
1566 return DP_AUX_CH_DATA(AUX_CH_A, index);
1571 intel_dp_aux_fini(struct intel_dp *intel_dp)
1573 kfree(intel_dp->aux.name);
1577 intel_dp_aux_init(struct intel_dp *intel_dp)
1579 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1580 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1581 struct intel_encoder *encoder = &dig_port->base;
1583 if (INTEL_GEN(dev_priv) >= 9) {
1584 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1585 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1586 } else if (HAS_PCH_SPLIT(dev_priv)) {
1587 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1588 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1590 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1591 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1594 if (INTEL_GEN(dev_priv) >= 9)
1595 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1596 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1597 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1598 else if (HAS_PCH_SPLIT(dev_priv))
1599 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1601 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1603 if (INTEL_GEN(dev_priv) >= 9)
1604 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1606 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1608 drm_dp_aux_init(&intel_dp->aux);
1610 /* Failure to allocate our preferred name is not critical */
1611 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1612 port_name(encoder->port));
1613 intel_dp->aux.transfer = intel_dp_aux_transfer;
1616 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1618 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1620 return max_rate >= 540000;
1623 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1625 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1627 return max_rate >= 810000;
1631 intel_dp_set_clock(struct intel_encoder *encoder,
1632 struct intel_crtc_state *pipe_config)
1634 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1635 const struct dp_link_dpll *divisor = NULL;
1638 if (IS_G4X(dev_priv)) {
1640 count = ARRAY_SIZE(g4x_dpll);
1641 } else if (HAS_PCH_SPLIT(dev_priv)) {
1643 count = ARRAY_SIZE(pch_dpll);
1644 } else if (IS_CHERRYVIEW(dev_priv)) {
1646 count = ARRAY_SIZE(chv_dpll);
1647 } else if (IS_VALLEYVIEW(dev_priv)) {
1649 count = ARRAY_SIZE(vlv_dpll);
1652 if (divisor && count) {
1653 for (i = 0; i < count; i++) {
1654 if (pipe_config->port_clock == divisor[i].clock) {
1655 pipe_config->dpll = divisor[i].dpll;
1656 pipe_config->clock_set = true;
1663 static void snprintf_int_array(char *str, size_t len,
1664 const int *array, int nelem)
1670 for (i = 0; i < nelem; i++) {
1671 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1679 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1681 char str[128]; /* FIXME: too big for stack? */
1683 if ((drm_debug & DRM_UT_KMS) == 0)
1686 snprintf_int_array(str, sizeof(str),
1687 intel_dp->source_rates, intel_dp->num_source_rates);
1688 DRM_DEBUG_KMS("source rates: %s\n", str);
1690 snprintf_int_array(str, sizeof(str),
1691 intel_dp->sink_rates, intel_dp->num_sink_rates);
1692 DRM_DEBUG_KMS("sink rates: %s\n", str);
1694 snprintf_int_array(str, sizeof(str),
1695 intel_dp->common_rates, intel_dp->num_common_rates);
1696 DRM_DEBUG_KMS("common rates: %s\n", str);
1700 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1704 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1705 if (WARN_ON(len <= 0))
1708 return intel_dp->common_rates[len - 1];
1711 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1713 int i = intel_dp_rate_index(intel_dp->sink_rates,
1714 intel_dp->num_sink_rates, rate);
1722 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1723 u8 *link_bw, u8 *rate_select)
1725 /* eDP 1.4 rate select method. */
1726 if (intel_dp->use_rate_select) {
1729 intel_dp_rate_select(intel_dp, port_clock);
1731 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1736 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1737 const struct intel_crtc_state *pipe_config)
1739 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1741 return INTEL_GEN(dev_priv) >= 11 &&
1742 pipe_config->cpu_transcoder != TRANSCODER_A;
1745 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1746 const struct intel_crtc_state *pipe_config)
1748 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1749 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1752 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1753 const struct intel_crtc_state *pipe_config)
1755 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1757 return INTEL_GEN(dev_priv) >= 10 &&
1758 pipe_config->cpu_transcoder != TRANSCODER_A;
1761 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1762 const struct intel_crtc_state *pipe_config)
1764 if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1767 return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1768 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1771 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1772 struct intel_crtc_state *pipe_config)
1774 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1775 struct intel_connector *intel_connector = intel_dp->attached_connector;
1778 bpp = pipe_config->pipe_bpp;
1779 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1782 bpp = min(bpp, 3*bpc);
1784 if (intel_dp_is_edp(intel_dp)) {
1785 /* Get bpp from vbt only for panels that dont have bpp in edid */
1786 if (intel_connector->base.display_info.bpc == 0 &&
1787 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1788 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1789 dev_priv->vbt.edp.bpp);
1790 bpp = dev_priv->vbt.edp.bpp;
1797 /* Adjust link config limits based on compliance test requests. */
1799 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1800 struct intel_crtc_state *pipe_config,
1801 struct link_config_limits *limits)
1803 /* For DP Compliance we override the computed bpp for the pipe */
1804 if (intel_dp->compliance.test_data.bpc != 0) {
1805 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1807 limits->min_bpp = limits->max_bpp = bpp;
1808 pipe_config->dither_force_disable = bpp == 6 * 3;
1810 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1813 /* Use values requested by Compliance Test Request */
1814 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1817 /* Validate the compliance test data since max values
1818 * might have changed due to link train fallback.
1820 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1821 intel_dp->compliance.test_lane_count)) {
1822 index = intel_dp_rate_index(intel_dp->common_rates,
1823 intel_dp->num_common_rates,
1824 intel_dp->compliance.test_link_rate);
1826 limits->min_clock = limits->max_clock = index;
1827 limits->min_lane_count = limits->max_lane_count =
1828 intel_dp->compliance.test_lane_count;
1833 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1836 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1837 * format of the number of bytes per pixel will be half the number
1838 * of bytes of RGB pixel.
1840 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1846 /* Optimize link config in order: max bpp, min clock, min lanes */
1848 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1849 struct intel_crtc_state *pipe_config,
1850 const struct link_config_limits *limits)
1852 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1853 int bpp, clock, lane_count;
1854 int mode_rate, link_clock, link_avail;
1856 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1857 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1860 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1861 for (lane_count = limits->min_lane_count;
1862 lane_count <= limits->max_lane_count;
1864 link_clock = intel_dp->common_rates[clock];
1865 link_avail = intel_dp_max_data_rate(link_clock,
1868 if (mode_rate <= link_avail) {
1869 pipe_config->lane_count = lane_count;
1870 pipe_config->pipe_bpp = bpp;
1871 pipe_config->port_clock = link_clock;
1882 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1885 u8 dsc_bpc[3] = {0};
1887 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1889 for (i = 0; i < num_bpc; i++) {
1890 if (dsc_max_bpc >= dsc_bpc[i])
1891 return dsc_bpc[i] * 3;
1897 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1898 struct intel_crtc_state *pipe_config,
1899 struct drm_connector_state *conn_state,
1900 struct link_config_limits *limits)
1902 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1903 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1904 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1909 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1910 intel_dp_supports_fec(intel_dp, pipe_config);
1912 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1915 dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
1916 conn_state->max_requested_bpc);
1918 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1919 if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
1920 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
1925 * For now enable DSC for max bpp, max link rate, max lane count.
1926 * Optimize this later for the minimum possible link rate/lane count
1927 * with DSC enabled for the requested mode.
1929 pipe_config->pipe_bpp = pipe_bpp;
1930 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1931 pipe_config->lane_count = limits->max_lane_count;
1933 if (intel_dp_is_edp(intel_dp)) {
1934 pipe_config->dsc_params.compressed_bpp =
1935 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1936 pipe_config->pipe_bpp);
1937 pipe_config->dsc_params.slice_count =
1938 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1941 u16 dsc_max_output_bpp;
1942 u8 dsc_dp_slice_count;
1944 dsc_max_output_bpp =
1945 intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1946 pipe_config->lane_count,
1947 adjusted_mode->crtc_clock,
1948 adjusted_mode->crtc_hdisplay);
1949 dsc_dp_slice_count =
1950 intel_dp_dsc_get_slice_count(intel_dp,
1951 adjusted_mode->crtc_clock,
1952 adjusted_mode->crtc_hdisplay);
1953 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1954 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
1957 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1958 dsc_max_output_bpp >> 4,
1959 pipe_config->pipe_bpp);
1960 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1963 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1964 * is greater than the maximum Cdclock and if slice count is even
1965 * then we need to use 2 VDSC instances.
1967 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1968 if (pipe_config->dsc_params.slice_count > 1) {
1969 pipe_config->dsc_params.dsc_split = true;
1971 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
1976 ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
1978 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
1979 "Compressed BPP = %d\n",
1980 pipe_config->pipe_bpp,
1981 pipe_config->dsc_params.compressed_bpp);
1985 pipe_config->dsc_params.compression_enable = true;
1986 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
1987 "Compressed Bpp = %d Slice Count = %d\n",
1988 pipe_config->pipe_bpp,
1989 pipe_config->dsc_params.compressed_bpp,
1990 pipe_config->dsc_params.slice_count);
1995 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
1997 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2004 intel_dp_compute_link_config(struct intel_encoder *encoder,
2005 struct intel_crtc_state *pipe_config,
2006 struct drm_connector_state *conn_state)
2008 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2009 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2010 struct link_config_limits limits;
2014 common_len = intel_dp_common_len_rate_limit(intel_dp,
2015 intel_dp->max_link_rate);
2017 /* No common link rates between source and sink */
2018 WARN_ON(common_len <= 0);
2020 limits.min_clock = 0;
2021 limits.max_clock = common_len - 1;
2023 limits.min_lane_count = 1;
2024 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2026 limits.min_bpp = intel_dp_min_bpp(pipe_config);
2027 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2029 if (intel_dp_is_edp(intel_dp)) {
2031 * Use the maximum clock and number of lanes the eDP panel
2032 * advertizes being capable of. The panels are generally
2033 * designed to support only a single clock and lane
2034 * configuration, and typically these values correspond to the
2035 * native resolution of the panel.
2037 limits.min_lane_count = limits.max_lane_count;
2038 limits.min_clock = limits.max_clock;
2041 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2043 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2044 "max rate %d max bpp %d pixel clock %iKHz\n",
2045 limits.max_lane_count,
2046 intel_dp->common_rates[limits.max_clock],
2047 limits.max_bpp, adjusted_mode->crtc_clock);
2050 * Optimize for slow and wide. This is the place to add alternative
2051 * optimization policy.
2053 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2055 /* enable compression if the mode doesn't fit available BW */
2056 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2057 if (ret || intel_dp->force_dsc_en) {
2058 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2059 conn_state, &limits);
2064 if (pipe_config->dsc_params.compression_enable) {
2065 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2066 pipe_config->lane_count, pipe_config->port_clock,
2067 pipe_config->pipe_bpp,
2068 pipe_config->dsc_params.compressed_bpp);
2070 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2071 intel_dp_link_required(adjusted_mode->crtc_clock,
2072 pipe_config->dsc_params.compressed_bpp),
2073 intel_dp_max_data_rate(pipe_config->port_clock,
2074 pipe_config->lane_count));
2076 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2077 pipe_config->lane_count, pipe_config->port_clock,
2078 pipe_config->pipe_bpp);
2080 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2081 intel_dp_link_required(adjusted_mode->crtc_clock,
2082 pipe_config->pipe_bpp),
2083 intel_dp_max_data_rate(pipe_config->port_clock,
2084 pipe_config->lane_count));
2090 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2091 struct drm_connector *connector,
2092 struct intel_crtc_state *crtc_state)
2094 const struct drm_display_info *info = &connector->display_info;
2095 const struct drm_display_mode *adjusted_mode =
2096 &crtc_state->base.adjusted_mode;
2097 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2100 if (!drm_mode_is_420_only(info, adjusted_mode) ||
2101 !intel_dp_get_colorimetry_status(intel_dp) ||
2102 !connector->ycbcr_420_allowed)
2105 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2107 /* YCBCR 420 output conversion needs a scaler */
2108 ret = skl_update_scaler_crtc(crtc_state);
2110 DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2114 intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2119 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2120 const struct drm_connector_state *conn_state)
2122 const struct intel_digital_connector_state *intel_conn_state =
2123 to_intel_digital_connector_state(conn_state);
2124 const struct drm_display_mode *adjusted_mode =
2125 &crtc_state->base.adjusted_mode;
2127 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2130 * CEA-861-E - 5.1 Default Encoding Parameters
2131 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2133 return crtc_state->pipe_bpp != 18 &&
2134 drm_default_rgb_quant_range(adjusted_mode) ==
2135 HDMI_QUANTIZATION_RANGE_LIMITED;
2137 return intel_conn_state->broadcast_rgb ==
2138 INTEL_BROADCAST_RGB_LIMITED;
2143 intel_dp_compute_config(struct intel_encoder *encoder,
2144 struct intel_crtc_state *pipe_config,
2145 struct drm_connector_state *conn_state)
2147 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2148 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2149 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2150 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2151 enum port port = encoder->port;
2152 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2153 struct intel_connector *intel_connector = intel_dp->attached_connector;
2154 struct intel_digital_connector_state *intel_conn_state =
2155 to_intel_digital_connector_state(conn_state);
2156 bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2157 DP_DPCD_QUIRK_CONSTANT_N);
2158 int ret = 0, output_bpp;
2160 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2161 pipe_config->has_pch_encoder = true;
2163 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2165 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2167 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2173 pipe_config->has_drrs = false;
2174 if (IS_G4X(dev_priv) || port == PORT_A)
2175 pipe_config->has_audio = false;
2176 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2177 pipe_config->has_audio = intel_dp->has_audio;
2179 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2181 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2182 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2185 if (INTEL_GEN(dev_priv) >= 9) {
2186 ret = skl_update_scaler_crtc(pipe_config);
2191 if (HAS_GMCH(dev_priv))
2192 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2193 conn_state->scaling_mode);
2195 intel_pch_panel_fitting(intel_crtc, pipe_config,
2196 conn_state->scaling_mode);
2199 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2202 if (HAS_GMCH(dev_priv) &&
2203 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2206 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2209 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2213 pipe_config->limited_color_range =
2214 intel_dp_limited_color_range(pipe_config, conn_state);
2216 if (pipe_config->dsc_params.compression_enable)
2217 output_bpp = pipe_config->dsc_params.compressed_bpp;
2219 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2221 intel_link_compute_m_n(output_bpp,
2222 pipe_config->lane_count,
2223 adjusted_mode->crtc_clock,
2224 pipe_config->port_clock,
2225 &pipe_config->dp_m_n,
2228 if (intel_connector->panel.downclock_mode != NULL &&
2229 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2230 pipe_config->has_drrs = true;
2231 intel_link_compute_m_n(output_bpp,
2232 pipe_config->lane_count,
2233 intel_connector->panel.downclock_mode->clock,
2234 pipe_config->port_clock,
2235 &pipe_config->dp_m2_n2,
2239 if (!HAS_DDI(dev_priv))
2240 intel_dp_set_clock(encoder, pipe_config);
2242 intel_psr_compute_config(intel_dp, pipe_config);
2247 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2248 int link_rate, u8 lane_count,
2251 intel_dp->link_trained = false;
2252 intel_dp->link_rate = link_rate;
2253 intel_dp->lane_count = lane_count;
2254 intel_dp->link_mst = link_mst;
2257 static void intel_dp_prepare(struct intel_encoder *encoder,
2258 const struct intel_crtc_state *pipe_config)
2260 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2261 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2262 enum port port = encoder->port;
2263 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2264 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2266 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2267 pipe_config->lane_count,
2268 intel_crtc_has_type(pipe_config,
2269 INTEL_OUTPUT_DP_MST));
2272 * There are four kinds of DP registers:
2279 * IBX PCH and CPU are the same for almost everything,
2280 * except that the CPU DP PLL is configured in this
2283 * CPT PCH is quite different, having many bits moved
2284 * to the TRANS_DP_CTL register instead. That
2285 * configuration happens (oddly) in ironlake_pch_enable
2288 /* Preserve the BIOS-computed detected bit. This is
2289 * supposed to be read-only.
2291 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2293 /* Handle DP bits in common between all three register formats */
2294 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2295 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2297 /* Split out the IBX/CPU vs CPT settings */
2299 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2300 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2301 intel_dp->DP |= DP_SYNC_HS_HIGH;
2302 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2303 intel_dp->DP |= DP_SYNC_VS_HIGH;
2304 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2306 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2307 intel_dp->DP |= DP_ENHANCED_FRAMING;
2309 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2310 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2313 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2315 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2316 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2317 trans_dp |= TRANS_DP_ENH_FRAMING;
2319 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2320 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2322 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2323 intel_dp->DP |= DP_COLOR_RANGE_16_235;
2325 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2326 intel_dp->DP |= DP_SYNC_HS_HIGH;
2327 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2328 intel_dp->DP |= DP_SYNC_VS_HIGH;
2329 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2331 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2332 intel_dp->DP |= DP_ENHANCED_FRAMING;
2334 if (IS_CHERRYVIEW(dev_priv))
2335 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2337 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2341 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2342 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
2344 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2345 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
2347 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2348 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
2350 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2352 static void wait_panel_status(struct intel_dp *intel_dp,
2356 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2357 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2359 lockdep_assert_held(&dev_priv->pps_mutex);
2361 intel_pps_verify_state(intel_dp);
2363 pp_stat_reg = _pp_stat_reg(intel_dp);
2364 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2366 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2368 I915_READ(pp_stat_reg),
2369 I915_READ(pp_ctrl_reg));
2371 if (intel_wait_for_register(&dev_priv->uncore,
2372 pp_stat_reg, mask, value,
2374 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2375 I915_READ(pp_stat_reg),
2376 I915_READ(pp_ctrl_reg));
2378 DRM_DEBUG_KMS("Wait complete\n");
2381 static void wait_panel_on(struct intel_dp *intel_dp)
2383 DRM_DEBUG_KMS("Wait for panel power on\n");
2384 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2387 static void wait_panel_off(struct intel_dp *intel_dp)
2389 DRM_DEBUG_KMS("Wait for panel power off time\n");
2390 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2393 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2395 ktime_t panel_power_on_time;
2396 s64 panel_power_off_duration;
2398 DRM_DEBUG_KMS("Wait for panel power cycle\n");
2400 /* take the difference of currrent time and panel power off time
2401 * and then make panel wait for t11_t12 if needed. */
2402 panel_power_on_time = ktime_get_boottime();
2403 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2405 /* When we disable the VDD override bit last we have to do the manual
2407 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2408 wait_remaining_ms_from_jiffies(jiffies,
2409 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2411 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2414 static void wait_backlight_on(struct intel_dp *intel_dp)
2416 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2417 intel_dp->backlight_on_delay);
2420 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2422 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2423 intel_dp->backlight_off_delay);
2426 /* Read the current pp_control value, unlocking the register if it
2430 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2432 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2435 lockdep_assert_held(&dev_priv->pps_mutex);
2437 control = I915_READ(_pp_ctrl_reg(intel_dp));
2438 if (WARN_ON(!HAS_DDI(dev_priv) &&
2439 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2440 control &= ~PANEL_UNLOCK_MASK;
2441 control |= PANEL_UNLOCK_REGS;
2447 * Must be paired with edp_panel_vdd_off().
2448 * Must hold pps_mutex around the whole on/off sequence.
2449 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2451 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2453 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2454 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2456 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2457 bool need_to_disable = !intel_dp->want_panel_vdd;
2459 lockdep_assert_held(&dev_priv->pps_mutex);
2461 if (!intel_dp_is_edp(intel_dp))
2464 cancel_delayed_work(&intel_dp->panel_vdd_work);
2465 intel_dp->want_panel_vdd = true;
2467 if (edp_have_panel_vdd(intel_dp))
2468 return need_to_disable;
2470 intel_display_power_get(dev_priv,
2471 intel_aux_power_domain(intel_dig_port));
2473 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2474 port_name(intel_dig_port->base.port));
2476 if (!edp_have_panel_power(intel_dp))
2477 wait_panel_power_cycle(intel_dp);
2479 pp = ironlake_get_pp_control(intel_dp);
2480 pp |= EDP_FORCE_VDD;
2482 pp_stat_reg = _pp_stat_reg(intel_dp);
2483 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2485 I915_WRITE(pp_ctrl_reg, pp);
2486 POSTING_READ(pp_ctrl_reg);
2487 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2488 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2490 * If the panel wasn't on, delay before accessing aux channel
2492 if (!edp_have_panel_power(intel_dp)) {
2493 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2494 port_name(intel_dig_port->base.port));
2495 msleep(intel_dp->panel_power_up_delay);
2498 return need_to_disable;
2502 * Must be paired with intel_edp_panel_vdd_off() or
2503 * intel_edp_panel_off().
2504 * Nested calls to these functions are not allowed since
2505 * we drop the lock. Caller must use some higher level
2506 * locking to prevent nested calls from other threads.
2508 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2510 intel_wakeref_t wakeref;
2513 if (!intel_dp_is_edp(intel_dp))
2517 with_pps_lock(intel_dp, wakeref)
2518 vdd = edp_panel_vdd_on(intel_dp);
2519 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2520 port_name(dp_to_dig_port(intel_dp)->base.port));
2523 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2525 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2526 struct intel_digital_port *intel_dig_port =
2527 dp_to_dig_port(intel_dp);
2529 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2531 lockdep_assert_held(&dev_priv->pps_mutex);
2533 WARN_ON(intel_dp->want_panel_vdd);
2535 if (!edp_have_panel_vdd(intel_dp))
2538 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2539 port_name(intel_dig_port->base.port));
2541 pp = ironlake_get_pp_control(intel_dp);
2542 pp &= ~EDP_FORCE_VDD;
2544 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2545 pp_stat_reg = _pp_stat_reg(intel_dp);
2547 I915_WRITE(pp_ctrl_reg, pp);
2548 POSTING_READ(pp_ctrl_reg);
2550 /* Make sure sequencer is idle before allowing subsequent activity */
2551 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2552 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2554 if ((pp & PANEL_POWER_ON) == 0)
2555 intel_dp->panel_power_off_time = ktime_get_boottime();
2557 intel_display_power_put_unchecked(dev_priv,
2558 intel_aux_power_domain(intel_dig_port));
2561 static void edp_panel_vdd_work(struct work_struct *__work)
2563 struct intel_dp *intel_dp =
2564 container_of(to_delayed_work(__work),
2565 struct intel_dp, panel_vdd_work);
2566 intel_wakeref_t wakeref;
2568 with_pps_lock(intel_dp, wakeref) {
2569 if (!intel_dp->want_panel_vdd)
2570 edp_panel_vdd_off_sync(intel_dp);
2574 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2576 unsigned long delay;
2579 * Queue the timer to fire a long time from now (relative to the power
2580 * down delay) to keep the panel power up across a sequence of
2583 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2584 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2588 * Must be paired with edp_panel_vdd_on().
2589 * Must hold pps_mutex around the whole on/off sequence.
2590 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2592 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2594 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2596 lockdep_assert_held(&dev_priv->pps_mutex);
2598 if (!intel_dp_is_edp(intel_dp))
2601 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2602 port_name(dp_to_dig_port(intel_dp)->base.port));
2604 intel_dp->want_panel_vdd = false;
2607 edp_panel_vdd_off_sync(intel_dp);
2609 edp_panel_vdd_schedule_off(intel_dp);
2612 static void edp_panel_on(struct intel_dp *intel_dp)
2614 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2616 i915_reg_t pp_ctrl_reg;
2618 lockdep_assert_held(&dev_priv->pps_mutex);
2620 if (!intel_dp_is_edp(intel_dp))
2623 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2624 port_name(dp_to_dig_port(intel_dp)->base.port));
2626 if (WARN(edp_have_panel_power(intel_dp),
2627 "eDP port %c panel power already on\n",
2628 port_name(dp_to_dig_port(intel_dp)->base.port)))
2631 wait_panel_power_cycle(intel_dp);
2633 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2634 pp = ironlake_get_pp_control(intel_dp);
2635 if (IS_GEN(dev_priv, 5)) {
2636 /* ILK workaround: disable reset around power sequence */
2637 pp &= ~PANEL_POWER_RESET;
2638 I915_WRITE(pp_ctrl_reg, pp);
2639 POSTING_READ(pp_ctrl_reg);
2642 pp |= PANEL_POWER_ON;
2643 if (!IS_GEN(dev_priv, 5))
2644 pp |= PANEL_POWER_RESET;
2646 I915_WRITE(pp_ctrl_reg, pp);
2647 POSTING_READ(pp_ctrl_reg);
2649 wait_panel_on(intel_dp);
2650 intel_dp->last_power_on = jiffies;
2652 if (IS_GEN(dev_priv, 5)) {
2653 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2654 I915_WRITE(pp_ctrl_reg, pp);
2655 POSTING_READ(pp_ctrl_reg);
2659 void intel_edp_panel_on(struct intel_dp *intel_dp)
2661 intel_wakeref_t wakeref;
2663 if (!intel_dp_is_edp(intel_dp))
2666 with_pps_lock(intel_dp, wakeref)
2667 edp_panel_on(intel_dp);
2671 static void edp_panel_off(struct intel_dp *intel_dp)
2673 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2674 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2676 i915_reg_t pp_ctrl_reg;
2678 lockdep_assert_held(&dev_priv->pps_mutex);
2680 if (!intel_dp_is_edp(intel_dp))
2683 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2684 port_name(dig_port->base.port));
2686 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2687 port_name(dig_port->base.port));
2689 pp = ironlake_get_pp_control(intel_dp);
2690 /* We need to switch off panel power _and_ force vdd, for otherwise some
2691 * panels get very unhappy and cease to work. */
2692 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2695 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2697 intel_dp->want_panel_vdd = false;
2699 I915_WRITE(pp_ctrl_reg, pp);
2700 POSTING_READ(pp_ctrl_reg);
2702 wait_panel_off(intel_dp);
2703 intel_dp->panel_power_off_time = ktime_get_boottime();
2705 /* We got a reference when we enabled the VDD. */
2706 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2709 void intel_edp_panel_off(struct intel_dp *intel_dp)
2711 intel_wakeref_t wakeref;
2713 if (!intel_dp_is_edp(intel_dp))
2716 with_pps_lock(intel_dp, wakeref)
2717 edp_panel_off(intel_dp);
2720 /* Enable backlight in the panel power control. */
2721 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2723 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2724 intel_wakeref_t wakeref;
2727 * If we enable the backlight right away following a panel power
2728 * on, we may see slight flicker as the panel syncs with the eDP
2729 * link. So delay a bit to make sure the image is solid before
2730 * allowing it to appear.
2732 wait_backlight_on(intel_dp);
2734 with_pps_lock(intel_dp, wakeref) {
2735 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2738 pp = ironlake_get_pp_control(intel_dp);
2739 pp |= EDP_BLC_ENABLE;
2741 I915_WRITE(pp_ctrl_reg, pp);
2742 POSTING_READ(pp_ctrl_reg);
2746 /* Enable backlight PWM and backlight PP control. */
2747 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2748 const struct drm_connector_state *conn_state)
2750 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2752 if (!intel_dp_is_edp(intel_dp))
2755 DRM_DEBUG_KMS("\n");
2757 intel_panel_enable_backlight(crtc_state, conn_state);
2758 _intel_edp_backlight_on(intel_dp);
2761 /* Disable backlight in the panel power control. */
2762 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2764 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2765 intel_wakeref_t wakeref;
2767 if (!intel_dp_is_edp(intel_dp))
2770 with_pps_lock(intel_dp, wakeref) {
2771 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2774 pp = ironlake_get_pp_control(intel_dp);
2775 pp &= ~EDP_BLC_ENABLE;
2777 I915_WRITE(pp_ctrl_reg, pp);
2778 POSTING_READ(pp_ctrl_reg);
2781 intel_dp->last_backlight_off = jiffies;
2782 edp_wait_backlight_off(intel_dp);
2785 /* Disable backlight PP control and backlight PWM. */
2786 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2788 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2790 if (!intel_dp_is_edp(intel_dp))
2793 DRM_DEBUG_KMS("\n");
2795 _intel_edp_backlight_off(intel_dp);
2796 intel_panel_disable_backlight(old_conn_state);
2800 * Hook for controlling the panel power control backlight through the bl_power
2801 * sysfs attribute. Take care to handle multiple calls.
2803 static void intel_edp_backlight_power(struct intel_connector *connector,
2806 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2807 intel_wakeref_t wakeref;
2811 with_pps_lock(intel_dp, wakeref)
2812 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2813 if (is_enabled == enable)
2816 DRM_DEBUG_KMS("panel power control backlight %s\n",
2817 enable ? "enable" : "disable");
2820 _intel_edp_backlight_on(intel_dp);
2822 _intel_edp_backlight_off(intel_dp);
2825 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2827 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2828 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2829 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2831 I915_STATE_WARN(cur_state != state,
2832 "DP port %c state assertion failure (expected %s, current %s)\n",
2833 port_name(dig_port->base.port),
2834 onoff(state), onoff(cur_state));
2836 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2838 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2840 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2842 I915_STATE_WARN(cur_state != state,
2843 "eDP PLL state assertion failure (expected %s, current %s)\n",
2844 onoff(state), onoff(cur_state));
2846 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2847 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2849 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2850 const struct intel_crtc_state *pipe_config)
2852 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2853 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2855 assert_pipe_disabled(dev_priv, crtc->pipe);
2856 assert_dp_port_disabled(intel_dp);
2857 assert_edp_pll_disabled(dev_priv);
2859 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2860 pipe_config->port_clock);
2862 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2864 if (pipe_config->port_clock == 162000)
2865 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2867 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2869 I915_WRITE(DP_A, intel_dp->DP);
2874 * [DevILK] Work around required when enabling DP PLL
2875 * while a pipe is enabled going to FDI:
2876 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2877 * 2. Program DP PLL enable
2879 if (IS_GEN(dev_priv, 5))
2880 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2882 intel_dp->DP |= DP_PLL_ENABLE;
2884 I915_WRITE(DP_A, intel_dp->DP);
2889 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2890 const struct intel_crtc_state *old_crtc_state)
2892 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2893 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2895 assert_pipe_disabled(dev_priv, crtc->pipe);
2896 assert_dp_port_disabled(intel_dp);
2897 assert_edp_pll_enabled(dev_priv);
2899 DRM_DEBUG_KMS("disabling eDP PLL\n");
2901 intel_dp->DP &= ~DP_PLL_ENABLE;
2903 I915_WRITE(DP_A, intel_dp->DP);
2908 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2911 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2912 * be capable of signalling downstream hpd with a long pulse.
2913 * Whether or not that means D3 is safe to use is not clear,
2914 * but let's assume so until proven otherwise.
2916 * FIXME should really check all downstream ports...
2918 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2919 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2920 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2923 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2924 const struct intel_crtc_state *crtc_state,
2929 if (!crtc_state->dsc_params.compression_enable)
2932 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2933 enable ? DP_DECOMPRESSION_EN : 0);
2935 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2936 enable ? "enable" : "disable");
2939 /* If the sink supports it, try to set the power state appropriately */
2940 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2944 /* Should have a valid DPCD by this point */
2945 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2948 if (mode != DRM_MODE_DPMS_ON) {
2949 if (downstream_hpd_needs_d0(intel_dp))
2952 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2955 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2958 * When turning on, we need to retry for 1ms to give the sink
2961 for (i = 0; i < 3; i++) {
2962 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2969 if (ret == 1 && lspcon->active)
2970 lspcon_wait_pcon_mode(lspcon);
2974 DRM_DEBUG_KMS("failed to %s sink power state\n",
2975 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2978 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
2979 enum port port, enum pipe *pipe)
2983 for_each_pipe(dev_priv, p) {
2984 u32 val = I915_READ(TRANS_DP_CTL(p));
2986 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
2992 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
2994 /* must initialize pipe to something for the asserts */
3000 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3001 i915_reg_t dp_reg, enum port port,
3007 val = I915_READ(dp_reg);
3009 ret = val & DP_PORT_EN;
3011 /* asserts want to know the pipe even if the port is disabled */
3012 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3013 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3014 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3015 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3016 else if (IS_CHERRYVIEW(dev_priv))
3017 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3019 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3024 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3027 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3028 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3029 intel_wakeref_t wakeref;
3032 wakeref = intel_display_power_get_if_enabled(dev_priv,
3033 encoder->power_domain);
3037 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3038 encoder->port, pipe);
3040 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3045 static void intel_dp_get_config(struct intel_encoder *encoder,
3046 struct intel_crtc_state *pipe_config)
3048 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3049 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3051 enum port port = encoder->port;
3052 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3054 if (encoder->type == INTEL_OUTPUT_EDP)
3055 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3057 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3059 tmp = I915_READ(intel_dp->output_reg);
3061 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3063 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3064 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3066 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3067 flags |= DRM_MODE_FLAG_PHSYNC;
3069 flags |= DRM_MODE_FLAG_NHSYNC;
3071 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3072 flags |= DRM_MODE_FLAG_PVSYNC;
3074 flags |= DRM_MODE_FLAG_NVSYNC;
3076 if (tmp & DP_SYNC_HS_HIGH)
3077 flags |= DRM_MODE_FLAG_PHSYNC;
3079 flags |= DRM_MODE_FLAG_NHSYNC;
3081 if (tmp & DP_SYNC_VS_HIGH)
3082 flags |= DRM_MODE_FLAG_PVSYNC;
3084 flags |= DRM_MODE_FLAG_NVSYNC;
3087 pipe_config->base.adjusted_mode.flags |= flags;
3089 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3090 pipe_config->limited_color_range = true;
3092 pipe_config->lane_count =
3093 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3095 intel_dp_get_m_n(crtc, pipe_config);
3097 if (port == PORT_A) {
3098 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3099 pipe_config->port_clock = 162000;
3101 pipe_config->port_clock = 270000;
3104 pipe_config->base.adjusted_mode.crtc_clock =
3105 intel_dotclock_calculate(pipe_config->port_clock,
3106 &pipe_config->dp_m_n);
3108 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3109 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3111 * This is a big fat ugly hack.
3113 * Some machines in UEFI boot mode provide us a VBT that has 18
3114 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3115 * unknown we fail to light up. Yet the same BIOS boots up with
3116 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3117 * max, not what it tells us to use.
3119 * Note: This will still be broken if the eDP panel is not lit
3120 * up by the BIOS, and thus we can't get the mode at module
3123 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3124 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3125 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3129 static void intel_disable_dp(struct intel_encoder *encoder,
3130 const struct intel_crtc_state *old_crtc_state,
3131 const struct drm_connector_state *old_conn_state)
3133 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3135 intel_dp->link_trained = false;
3137 if (old_crtc_state->has_audio)
3138 intel_audio_codec_disable(encoder,
3139 old_crtc_state, old_conn_state);
3141 /* Make sure the panel is off before trying to change the mode. But also
3142 * ensure that we have vdd while we switch off the panel. */
3143 intel_edp_panel_vdd_on(intel_dp);
3144 intel_edp_backlight_off(old_conn_state);
3145 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3146 intel_edp_panel_off(intel_dp);
3149 static void g4x_disable_dp(struct intel_encoder *encoder,
3150 const struct intel_crtc_state *old_crtc_state,
3151 const struct drm_connector_state *old_conn_state)
3153 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3156 static void vlv_disable_dp(struct intel_encoder *encoder,
3157 const struct intel_crtc_state *old_crtc_state,
3158 const struct drm_connector_state *old_conn_state)
3160 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3163 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3164 const struct intel_crtc_state *old_crtc_state,
3165 const struct drm_connector_state *old_conn_state)
3167 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3168 enum port port = encoder->port;
3171 * Bspec does not list a specific disable sequence for g4x DP.
3172 * Follow the ilk+ sequence (disable pipe before the port) for
3173 * g4x DP as it does not suffer from underruns like the normal
3174 * g4x modeset sequence (disable pipe after the port).
3176 intel_dp_link_down(encoder, old_crtc_state);
3178 /* Only ilk+ has port A */
3180 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3183 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3184 const struct intel_crtc_state *old_crtc_state,
3185 const struct drm_connector_state *old_conn_state)
3187 intel_dp_link_down(encoder, old_crtc_state);
3190 static void chv_post_disable_dp(struct intel_encoder *encoder,
3191 const struct intel_crtc_state *old_crtc_state,
3192 const struct drm_connector_state *old_conn_state)
3194 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3196 intel_dp_link_down(encoder, old_crtc_state);
3198 vlv_dpio_get(dev_priv);
3200 /* Assert data lane reset */
3201 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3203 vlv_dpio_put(dev_priv);
3207 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3211 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3212 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3213 enum port port = intel_dig_port->base.port;
3214 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3216 if (dp_train_pat & train_pat_mask)
3217 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3218 dp_train_pat & train_pat_mask);
3220 if (HAS_DDI(dev_priv)) {
3221 u32 temp = I915_READ(DP_TP_CTL(port));
3223 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3224 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3226 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3228 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3229 switch (dp_train_pat & train_pat_mask) {
3230 case DP_TRAINING_PATTERN_DISABLE:
3231 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3234 case DP_TRAINING_PATTERN_1:
3235 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3237 case DP_TRAINING_PATTERN_2:
3238 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3240 case DP_TRAINING_PATTERN_3:
3241 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3243 case DP_TRAINING_PATTERN_4:
3244 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3247 I915_WRITE(DP_TP_CTL(port), temp);
3249 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3250 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3251 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3253 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3254 case DP_TRAINING_PATTERN_DISABLE:
3255 *DP |= DP_LINK_TRAIN_OFF_CPT;
3257 case DP_TRAINING_PATTERN_1:
3258 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3260 case DP_TRAINING_PATTERN_2:
3261 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3263 case DP_TRAINING_PATTERN_3:
3264 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3265 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3270 *DP &= ~DP_LINK_TRAIN_MASK;
3272 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3273 case DP_TRAINING_PATTERN_DISABLE:
3274 *DP |= DP_LINK_TRAIN_OFF;
3276 case DP_TRAINING_PATTERN_1:
3277 *DP |= DP_LINK_TRAIN_PAT_1;
3279 case DP_TRAINING_PATTERN_2:
3280 *DP |= DP_LINK_TRAIN_PAT_2;
3282 case DP_TRAINING_PATTERN_3:
3283 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3284 *DP |= DP_LINK_TRAIN_PAT_2;
3290 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3291 const struct intel_crtc_state *old_crtc_state)
3293 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3295 /* enable with pattern 1 (as per spec) */
3297 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3300 * Magic for VLV/CHV. We _must_ first set up the register
3301 * without actually enabling the port, and then do another
3302 * write to enable the port. Otherwise link training will
3303 * fail when the power sequencer is freshly used for this port.
3305 intel_dp->DP |= DP_PORT_EN;
3306 if (old_crtc_state->has_audio)
3307 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3309 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3310 POSTING_READ(intel_dp->output_reg);
3313 static void intel_enable_dp(struct intel_encoder *encoder,
3314 const struct intel_crtc_state *pipe_config,
3315 const struct drm_connector_state *conn_state)
3317 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3318 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3319 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3320 u32 dp_reg = I915_READ(intel_dp->output_reg);
3321 enum pipe pipe = crtc->pipe;
3322 intel_wakeref_t wakeref;
3324 if (WARN_ON(dp_reg & DP_PORT_EN))
3327 with_pps_lock(intel_dp, wakeref) {
3328 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3329 vlv_init_panel_power_sequencer(encoder, pipe_config);
3331 intel_dp_enable_port(intel_dp, pipe_config);
3333 edp_panel_vdd_on(intel_dp);
3334 edp_panel_on(intel_dp);
3335 edp_panel_vdd_off(intel_dp, true);
3338 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3339 unsigned int lane_mask = 0x0;
3341 if (IS_CHERRYVIEW(dev_priv))
3342 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3344 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3348 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3349 intel_dp_start_link_train(intel_dp);
3350 intel_dp_stop_link_train(intel_dp);
3352 if (pipe_config->has_audio) {
3353 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3355 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3359 static void g4x_enable_dp(struct intel_encoder *encoder,
3360 const struct intel_crtc_state *pipe_config,
3361 const struct drm_connector_state *conn_state)
3363 intel_enable_dp(encoder, pipe_config, conn_state);
3364 intel_edp_backlight_on(pipe_config, conn_state);
3367 static void vlv_enable_dp(struct intel_encoder *encoder,
3368 const struct intel_crtc_state *pipe_config,
3369 const struct drm_connector_state *conn_state)
3371 intel_edp_backlight_on(pipe_config, conn_state);
3374 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3375 const struct intel_crtc_state *pipe_config,
3376 const struct drm_connector_state *conn_state)
3378 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3379 enum port port = encoder->port;
3381 intel_dp_prepare(encoder, pipe_config);
3383 /* Only ilk+ has port A */
3385 ironlake_edp_pll_on(intel_dp, pipe_config);
3388 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3390 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3391 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3392 enum pipe pipe = intel_dp->pps_pipe;
3393 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3395 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3397 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3400 edp_panel_vdd_off_sync(intel_dp);
3403 * VLV seems to get confused when multiple power sequencers
3404 * have the same port selected (even if only one has power/vdd
3405 * enabled). The failure manifests as vlv_wait_port_ready() failing
3406 * CHV on the other hand doesn't seem to mind having the same port
3407 * selected in multiple power sequencers, but let's clear the
3408 * port select always when logically disconnecting a power sequencer
3411 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3412 pipe_name(pipe), port_name(intel_dig_port->base.port));
3413 I915_WRITE(pp_on_reg, 0);
3414 POSTING_READ(pp_on_reg);
3416 intel_dp->pps_pipe = INVALID_PIPE;
3419 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3422 struct intel_encoder *encoder;
3424 lockdep_assert_held(&dev_priv->pps_mutex);
3426 for_each_intel_dp(&dev_priv->drm, encoder) {
3427 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3428 enum port port = encoder->port;
3430 WARN(intel_dp->active_pipe == pipe,
3431 "stealing pipe %c power sequencer from active (e)DP port %c\n",
3432 pipe_name(pipe), port_name(port));
3434 if (intel_dp->pps_pipe != pipe)
3437 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3438 pipe_name(pipe), port_name(port));
3440 /* make sure vdd is off before we steal it */
3441 vlv_detach_power_sequencer(intel_dp);
3445 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3446 const struct intel_crtc_state *crtc_state)
3448 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3449 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3450 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3452 lockdep_assert_held(&dev_priv->pps_mutex);
3454 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3456 if (intel_dp->pps_pipe != INVALID_PIPE &&
3457 intel_dp->pps_pipe != crtc->pipe) {
3459 * If another power sequencer was being used on this
3460 * port previously make sure to turn off vdd there while
3461 * we still have control of it.
3463 vlv_detach_power_sequencer(intel_dp);
3467 * We may be stealing the power
3468 * sequencer from another port.
3470 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3472 intel_dp->active_pipe = crtc->pipe;
3474 if (!intel_dp_is_edp(intel_dp))
3477 /* now it's all ours */
3478 intel_dp->pps_pipe = crtc->pipe;
3480 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3481 pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3483 /* init power sequencer on this pipe and port */
3484 intel_dp_init_panel_power_sequencer(intel_dp);
3485 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3488 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3489 const struct intel_crtc_state *pipe_config,
3490 const struct drm_connector_state *conn_state)
3492 vlv_phy_pre_encoder_enable(encoder, pipe_config);
3494 intel_enable_dp(encoder, pipe_config, conn_state);
3497 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3498 const struct intel_crtc_state *pipe_config,
3499 const struct drm_connector_state *conn_state)
3501 intel_dp_prepare(encoder, pipe_config);
3503 vlv_phy_pre_pll_enable(encoder, pipe_config);
3506 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3507 const struct intel_crtc_state *pipe_config,
3508 const struct drm_connector_state *conn_state)
3510 chv_phy_pre_encoder_enable(encoder, pipe_config);
3512 intel_enable_dp(encoder, pipe_config, conn_state);
3514 /* Second common lane will stay alive on its own now */
3515 chv_phy_release_cl2_override(encoder);
3518 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3519 const struct intel_crtc_state *pipe_config,
3520 const struct drm_connector_state *conn_state)
3522 intel_dp_prepare(encoder, pipe_config);
3524 chv_phy_pre_pll_enable(encoder, pipe_config);
3527 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3528 const struct intel_crtc_state *old_crtc_state,
3529 const struct drm_connector_state *old_conn_state)
3531 chv_phy_post_pll_disable(encoder, old_crtc_state);
3535 * Fetch AUX CH registers 0x202 - 0x207 which contain
3536 * link status information
3539 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3541 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3542 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3545 /* These are source-specific values. */
3547 intel_dp_voltage_max(struct intel_dp *intel_dp)
3549 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3550 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3551 enum port port = encoder->port;
3553 if (HAS_DDI(dev_priv))
3554 return intel_ddi_dp_voltage_max(encoder);
3555 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3556 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3557 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3558 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3559 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3560 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3562 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3566 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3568 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3569 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3570 enum port port = encoder->port;
3572 if (HAS_DDI(dev_priv)) {
3573 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3574 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3575 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3576 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3577 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3578 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3579 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3580 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3581 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3582 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3584 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3586 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3587 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3588 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3589 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3590 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3591 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3592 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3594 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3597 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3598 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3599 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3600 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3601 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3602 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3603 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3604 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3606 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3611 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3613 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3614 unsigned long demph_reg_value, preemph_reg_value,
3615 uniqtranscale_reg_value;
3616 u8 train_set = intel_dp->train_set[0];
3618 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3619 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3620 preemph_reg_value = 0x0004000;
3621 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3622 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3623 demph_reg_value = 0x2B405555;
3624 uniqtranscale_reg_value = 0x552AB83A;
3626 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3627 demph_reg_value = 0x2B404040;
3628 uniqtranscale_reg_value = 0x5548B83A;
3630 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3631 demph_reg_value = 0x2B245555;
3632 uniqtranscale_reg_value = 0x5560B83A;
3634 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3635 demph_reg_value = 0x2B405555;
3636 uniqtranscale_reg_value = 0x5598DA3A;
3642 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3643 preemph_reg_value = 0x0002000;
3644 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3645 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3646 demph_reg_value = 0x2B404040;
3647 uniqtranscale_reg_value = 0x5552B83A;
3649 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3650 demph_reg_value = 0x2B404848;
3651 uniqtranscale_reg_value = 0x5580B83A;
3653 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3654 demph_reg_value = 0x2B404040;
3655 uniqtranscale_reg_value = 0x55ADDA3A;
3661 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3662 preemph_reg_value = 0x0000000;
3663 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3664 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3665 demph_reg_value = 0x2B305555;
3666 uniqtranscale_reg_value = 0x5570B83A;
3668 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3669 demph_reg_value = 0x2B2B4040;
3670 uniqtranscale_reg_value = 0x55ADDA3A;
3676 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3677 preemph_reg_value = 0x0006000;
3678 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3679 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3680 demph_reg_value = 0x1B405555;
3681 uniqtranscale_reg_value = 0x55ADDA3A;
3691 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3692 uniqtranscale_reg_value, 0);
3697 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3699 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3700 u32 deemph_reg_value, margin_reg_value;
3701 bool uniq_trans_scale = false;
3702 u8 train_set = intel_dp->train_set[0];
3704 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3705 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3706 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3707 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3708 deemph_reg_value = 128;
3709 margin_reg_value = 52;
3711 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3712 deemph_reg_value = 128;
3713 margin_reg_value = 77;
3715 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3716 deemph_reg_value = 128;
3717 margin_reg_value = 102;
3719 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3720 deemph_reg_value = 128;
3721 margin_reg_value = 154;
3722 uniq_trans_scale = true;
3728 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3729 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3730 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3731 deemph_reg_value = 85;
3732 margin_reg_value = 78;
3734 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3735 deemph_reg_value = 85;
3736 margin_reg_value = 116;
3738 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3739 deemph_reg_value = 85;
3740 margin_reg_value = 154;
3746 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3747 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3748 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3749 deemph_reg_value = 64;
3750 margin_reg_value = 104;
3752 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3753 deemph_reg_value = 64;
3754 margin_reg_value = 154;
3760 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3761 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3762 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3763 deemph_reg_value = 43;
3764 margin_reg_value = 154;
3774 chv_set_phy_signal_level(encoder, deemph_reg_value,
3775 margin_reg_value, uniq_trans_scale);
3781 g4x_signal_levels(u8 train_set)
3783 u32 signal_levels = 0;
3785 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3786 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3788 signal_levels |= DP_VOLTAGE_0_4;
3790 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3791 signal_levels |= DP_VOLTAGE_0_6;
3793 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3794 signal_levels |= DP_VOLTAGE_0_8;
3796 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3797 signal_levels |= DP_VOLTAGE_1_2;
3800 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3801 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3803 signal_levels |= DP_PRE_EMPHASIS_0;
3805 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3806 signal_levels |= DP_PRE_EMPHASIS_3_5;
3808 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3809 signal_levels |= DP_PRE_EMPHASIS_6;
3811 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3812 signal_levels |= DP_PRE_EMPHASIS_9_5;
3815 return signal_levels;
3818 /* SNB CPU eDP voltage swing and pre-emphasis control */
3820 snb_cpu_edp_signal_levels(u8 train_set)
3822 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3823 DP_TRAIN_PRE_EMPHASIS_MASK);
3824 switch (signal_levels) {
3825 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3826 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3827 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3828 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3829 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3830 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3831 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3832 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3833 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3834 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3835 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3836 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3837 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3838 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3840 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3841 "0x%x\n", signal_levels);
3842 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3846 /* IVB CPU eDP voltage swing and pre-emphasis control */
3848 ivb_cpu_edp_signal_levels(u8 train_set)
3850 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3851 DP_TRAIN_PRE_EMPHASIS_MASK);
3852 switch (signal_levels) {
3853 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3854 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3855 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3856 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3857 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3858 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3860 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3861 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3862 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3863 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3865 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3866 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3867 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3868 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3871 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3872 "0x%x\n", signal_levels);
3873 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3878 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3880 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3881 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3882 enum port port = intel_dig_port->base.port;
3883 u32 signal_levels, mask = 0;
3884 u8 train_set = intel_dp->train_set[0];
3886 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3887 signal_levels = bxt_signal_levels(intel_dp);
3888 } else if (HAS_DDI(dev_priv)) {
3889 signal_levels = ddi_signal_levels(intel_dp);
3890 mask = DDI_BUF_EMP_MASK;
3891 } else if (IS_CHERRYVIEW(dev_priv)) {
3892 signal_levels = chv_signal_levels(intel_dp);
3893 } else if (IS_VALLEYVIEW(dev_priv)) {
3894 signal_levels = vlv_signal_levels(intel_dp);
3895 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3896 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3897 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3898 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3899 signal_levels = snb_cpu_edp_signal_levels(train_set);
3900 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3902 signal_levels = g4x_signal_levels(train_set);
3903 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3907 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3909 DRM_DEBUG_KMS("Using vswing level %d\n",
3910 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3911 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3912 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3913 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3915 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3917 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3918 POSTING_READ(intel_dp->output_reg);
3922 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3925 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3926 struct drm_i915_private *dev_priv =
3927 to_i915(intel_dig_port->base.base.dev);
3929 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3931 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3932 POSTING_READ(intel_dp->output_reg);
3935 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3937 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3938 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3939 enum port port = intel_dig_port->base.port;
3942 if (!HAS_DDI(dev_priv))
3945 val = I915_READ(DP_TP_CTL(port));
3946 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3947 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3948 I915_WRITE(DP_TP_CTL(port), val);
3951 * On PORT_A we can have only eDP in SST mode. There the only reason
3952 * we need to set idle transmission mode is to work around a HW issue
3953 * where we enable the pipe while not in idle link-training mode.
3954 * In this case there is requirement to wait for a minimum number of
3955 * idle patterns to be sent.
3960 if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
3961 DP_TP_STATUS_IDLE_DONE,
3962 DP_TP_STATUS_IDLE_DONE,
3964 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3968 intel_dp_link_down(struct intel_encoder *encoder,
3969 const struct intel_crtc_state *old_crtc_state)
3971 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3972 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3973 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3974 enum port port = encoder->port;
3975 u32 DP = intel_dp->DP;
3977 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3980 DRM_DEBUG_KMS("\n");
3982 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3983 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3984 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3985 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3987 DP &= ~DP_LINK_TRAIN_MASK;
3988 DP |= DP_LINK_TRAIN_PAT_IDLE;
3990 I915_WRITE(intel_dp->output_reg, DP);
3991 POSTING_READ(intel_dp->output_reg);
3993 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3994 I915_WRITE(intel_dp->output_reg, DP);
3995 POSTING_READ(intel_dp->output_reg);
3998 * HW workaround for IBX, we need to move the port
3999 * to transcoder A after disabling it to allow the
4000 * matching HDMI port to be enabled on transcoder A.
4002 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4004 * We get CPU/PCH FIFO underruns on the other pipe when
4005 * doing the workaround. Sweep them under the rug.
4007 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4008 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4010 /* always enable with pattern 1 (as per spec) */
4011 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4012 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4013 DP_LINK_TRAIN_PAT_1;
4014 I915_WRITE(intel_dp->output_reg, DP);
4015 POSTING_READ(intel_dp->output_reg);
4018 I915_WRITE(intel_dp->output_reg, DP);
4019 POSTING_READ(intel_dp->output_reg);
4021 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4022 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4023 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4026 msleep(intel_dp->panel_power_down_delay);
4030 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4031 intel_wakeref_t wakeref;
4033 with_pps_lock(intel_dp, wakeref)
4034 intel_dp->active_pipe = INVALID_PIPE;
4039 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4044 * Prior to DP1.3 the bit represented by
4045 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4046 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4047 * the true capability of the panel. The only way to check is to
4048 * then compare 0000h and 2200h.
4050 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4051 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4054 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4055 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4056 DRM_ERROR("DPCD failed read at extended capabilities\n");
4060 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4061 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4065 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4068 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4069 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4071 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4075 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4077 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4078 sizeof(intel_dp->dpcd)) < 0)
4079 return false; /* aux transfer failed */
4081 intel_dp_extended_receiver_capabilities(intel_dp);
4083 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4085 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4088 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4092 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4095 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4098 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4101 * Clear the cached register set to avoid using stale values
4102 * for the sinks that do not support DSC.
4104 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4106 /* Clear fec_capable to avoid using stale values */
4107 intel_dp->fec_capable = 0;
4109 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4110 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4111 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4112 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4114 sizeof(intel_dp->dsc_dpcd)) < 0)
4115 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4118 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4119 (int)sizeof(intel_dp->dsc_dpcd),
4120 intel_dp->dsc_dpcd);
4122 /* FEC is supported only on DP 1.4 */
4123 if (!intel_dp_is_edp(intel_dp) &&
4124 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4125 &intel_dp->fec_capable) < 0)
4126 DRM_ERROR("Failed to read FEC DPCD register\n");
4128 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4133 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4135 struct drm_i915_private *dev_priv =
4136 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4138 /* this function is meant to be called only once */
4139 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4141 if (!intel_dp_read_dpcd(intel_dp))
4144 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4145 drm_dp_is_branch(intel_dp->dpcd));
4147 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4148 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4149 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4152 * Read the eDP display control registers.
4154 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4155 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4156 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4157 * method). The display control registers should read zero if they're
4158 * not supported anyway.
4160 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4161 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4162 sizeof(intel_dp->edp_dpcd))
4163 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4164 intel_dp->edp_dpcd);
4167 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4168 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4170 intel_psr_init_dpcd(intel_dp);
4172 /* Read the eDP 1.4+ supported link rates. */
4173 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4174 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4177 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4178 sink_rates, sizeof(sink_rates));
4180 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4181 int val = le16_to_cpu(sink_rates[i]);
4186 /* Value read multiplied by 200kHz gives the per-lane
4187 * link rate in kHz. The source rates are, however,
4188 * stored in terms of LS_Clk kHz. The full conversion
4189 * back to symbols is
4190 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4192 intel_dp->sink_rates[i] = (val * 200) / 10;
4194 intel_dp->num_sink_rates = i;
4198 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4199 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4201 if (intel_dp->num_sink_rates)
4202 intel_dp->use_rate_select = true;
4204 intel_dp_set_sink_rates(intel_dp);
4206 intel_dp_set_common_rates(intel_dp);
4208 /* Read the eDP DSC DPCD registers */
4209 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4210 intel_dp_get_dsc_sink_cap(intel_dp);
4217 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4219 if (!intel_dp_read_dpcd(intel_dp))
4222 /* Don't clobber cached eDP rates. */
4223 if (!intel_dp_is_edp(intel_dp)) {
4224 intel_dp_set_sink_rates(intel_dp);
4225 intel_dp_set_common_rates(intel_dp);
4229 * Some eDP panels do not set a valid value for sink count, that is why
4230 * it don't care about read it here and in intel_edp_init_dpcd().
4232 if (!intel_dp_is_edp(intel_dp)) {
4236 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4241 * Sink count can change between short pulse hpd hence
4242 * a member variable in intel_dp will track any changes
4243 * between short pulse interrupts.
4245 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4248 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4249 * a dongle is present but no display. Unless we require to know
4250 * if a dongle is present or not, we don't need to update
4251 * downstream port information. So, an early return here saves
4252 * time from performing other operations which are not required.
4254 if (!intel_dp->sink_count)
4258 if (!drm_dp_is_branch(intel_dp->dpcd))
4259 return true; /* native DP sink */
4261 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4262 return true; /* no per-port downstream info */
4264 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4265 intel_dp->downstream_ports,
4266 DP_MAX_DOWNSTREAM_PORTS) < 0)
4267 return false; /* downstream port status fetch failed */
4273 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4277 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4280 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4283 return mstm_cap & DP_MST_CAP;
4287 intel_dp_can_mst(struct intel_dp *intel_dp)
4289 return i915_modparams.enable_dp_mst &&
4290 intel_dp->can_mst &&
4291 intel_dp_sink_can_mst(intel_dp);
4295 intel_dp_configure_mst(struct intel_dp *intel_dp)
4297 struct intel_encoder *encoder =
4298 &dp_to_dig_port(intel_dp)->base;
4299 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4301 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4302 port_name(encoder->port), yesno(intel_dp->can_mst),
4303 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4305 if (!intel_dp->can_mst)
4308 intel_dp->is_mst = sink_can_mst &&
4309 i915_modparams.enable_dp_mst;
4311 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4316 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4318 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4319 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4323 u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
4324 int mode_clock, int mode_hdisplay)
4326 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4330 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4331 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4332 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4333 * for MST -> TimeSlotsPerMTP has to be calculated
4335 bits_per_pixel = (link_clock * lane_count * 8 *
4336 DP_DSC_FEC_OVERHEAD_FACTOR) /
4339 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4340 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4344 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4345 * check, output bpp from small joiner RAM check)
4347 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4349 /* Error out if the max bpp is less than smallest allowed valid bpp */
4350 if (bits_per_pixel < valid_dsc_bpp[0]) {
4351 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4355 /* Find the nearest match in the array of known BPPs from VESA */
4356 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4357 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4360 bits_per_pixel = valid_dsc_bpp[i];
4363 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4364 * fractional part is 0
4366 return bits_per_pixel << 4;
4369 u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4373 u8 min_slice_count, i;
4374 int max_slice_width;
4376 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4377 min_slice_count = DIV_ROUND_UP(mode_clock,
4378 DP_DSC_MAX_ENC_THROUGHPUT_0);
4380 min_slice_count = DIV_ROUND_UP(mode_clock,
4381 DP_DSC_MAX_ENC_THROUGHPUT_1);
4383 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4384 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4385 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4389 /* Also take into account max slice width */
4390 min_slice_count = min_t(u8, min_slice_count,
4391 DIV_ROUND_UP(mode_hdisplay,
4394 /* Find the closest match to the valid slice count values */
4395 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4396 if (valid_dsc_slicecount[i] >
4397 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4400 if (min_slice_count <= valid_dsc_slicecount[i])
4401 return valid_dsc_slicecount[i];
4404 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4409 intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4410 const struct intel_crtc_state *crtc_state)
4412 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4413 struct dp_sdp vsc_sdp = {};
4415 /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4416 vsc_sdp.sdp_header.HB0 = 0;
4417 vsc_sdp.sdp_header.HB1 = 0x7;
4420 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4421 * Colorimetry Format indication.
4423 vsc_sdp.sdp_header.HB2 = 0x5;
4426 * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4427 * Colorimetry Format indication (HB2 = 05h).
4429 vsc_sdp.sdp_header.HB3 = 0x13;
4432 * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
4433 * DB16[3:0] DP 1.4a spec, Table 2-120
4435 vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
4436 /* RGB->YCBCR color conversion uses the BT.709 color space. */
4437 vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4440 * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4441 * the following Component Bit Depth values are defined:
4447 switch (crtc_state->pipe_bpp) {
4449 vsc_sdp.db[17] = 0x1;
4451 case 30: /* 10bpc */
4452 vsc_sdp.db[17] = 0x2;
4454 case 36: /* 12bpc */
4455 vsc_sdp.db[17] = 0x3;
4457 case 48: /* 16bpc */
4458 vsc_sdp.db[17] = 0x4;
4461 MISSING_CASE(crtc_state->pipe_bpp);
4466 * Dynamic Range (Bit 7)
4467 * 0 = VESA range, 1 = CTA range.
4468 * all YCbCr are always limited range
4470 vsc_sdp.db[17] |= 0x80;
4473 * Content Type (Bits 2:0)
4474 * 000b = Not defined.
4479 * All other values are RESERVED.
4480 * Note: See CTA-861-G for the definition and expected
4481 * processing by a stream sink for the above contect types.
4485 intel_dig_port->write_infoframe(&intel_dig_port->base,
4486 crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4489 void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
4490 const struct intel_crtc_state *crtc_state)
4492 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
4495 intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
4498 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4502 u8 test_lane_count, test_link_bw;
4506 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4507 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4511 DRM_DEBUG_KMS("Lane count read failed\n");
4514 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4516 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4519 DRM_DEBUG_KMS("Link Rate read failed\n");
4522 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4524 /* Validate the requested link rate and lane count */
4525 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4529 intel_dp->compliance.test_lane_count = test_lane_count;
4530 intel_dp->compliance.test_link_rate = test_link_rate;
4535 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4539 __be16 h_width, v_height;
4542 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4543 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4546 DRM_DEBUG_KMS("Test pattern read failed\n");
4549 if (test_pattern != DP_COLOR_RAMP)
4552 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4555 DRM_DEBUG_KMS("H Width read failed\n");
4559 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4562 DRM_DEBUG_KMS("V Height read failed\n");
4566 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4569 DRM_DEBUG_KMS("TEST MISC read failed\n");
4572 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4574 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4576 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4577 case DP_TEST_BIT_DEPTH_6:
4578 intel_dp->compliance.test_data.bpc = 6;
4580 case DP_TEST_BIT_DEPTH_8:
4581 intel_dp->compliance.test_data.bpc = 8;
4587 intel_dp->compliance.test_data.video_pattern = test_pattern;
4588 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4589 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4590 /* Set test active flag here so userspace doesn't interrupt things */
4591 intel_dp->compliance.test_active = 1;
4596 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4598 u8 test_result = DP_TEST_ACK;
4599 struct intel_connector *intel_connector = intel_dp->attached_connector;
4600 struct drm_connector *connector = &intel_connector->base;
4602 if (intel_connector->detect_edid == NULL ||
4603 connector->edid_corrupt ||
4604 intel_dp->aux.i2c_defer_count > 6) {
4605 /* Check EDID read for NACKs, DEFERs and corruption
4606 * (DP CTS 1.2 Core r1.1)
4607 * 4.2.2.4 : Failed EDID read, I2C_NAK
4608 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4609 * 4.2.2.6 : EDID corruption detected
4610 * Use failsafe mode for all cases
4612 if (intel_dp->aux.i2c_nack_count > 0 ||
4613 intel_dp->aux.i2c_defer_count > 0)
4614 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4615 intel_dp->aux.i2c_nack_count,
4616 intel_dp->aux.i2c_defer_count);
4617 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4619 struct edid *block = intel_connector->detect_edid;
4621 /* We have to write the checksum
4622 * of the last block read
4624 block += intel_connector->detect_edid->extensions;
4626 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4627 block->checksum) <= 0)
4628 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4630 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4631 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4634 /* Set test active flag here so userspace doesn't interrupt things */
4635 intel_dp->compliance.test_active = 1;
4640 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4642 u8 test_result = DP_TEST_NAK;
4646 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4648 u8 response = DP_TEST_NAK;
4652 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4654 DRM_DEBUG_KMS("Could not read test request from sink\n");
4659 case DP_TEST_LINK_TRAINING:
4660 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4661 response = intel_dp_autotest_link_training(intel_dp);
4663 case DP_TEST_LINK_VIDEO_PATTERN:
4664 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4665 response = intel_dp_autotest_video_pattern(intel_dp);
4667 case DP_TEST_LINK_EDID_READ:
4668 DRM_DEBUG_KMS("EDID test requested\n");
4669 response = intel_dp_autotest_edid(intel_dp);
4671 case DP_TEST_LINK_PHY_TEST_PATTERN:
4672 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4673 response = intel_dp_autotest_phy_pattern(intel_dp);
4676 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4680 if (response & DP_TEST_ACK)
4681 intel_dp->compliance.test_type = request;
4684 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4686 DRM_DEBUG_KMS("Could not write test response to sink\n");
4690 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4694 if (intel_dp->is_mst) {
4695 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4700 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4701 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4705 /* check link status - esi[10] = 0x200c */
4706 if (intel_dp->active_mst_links > 0 &&
4707 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4708 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4709 intel_dp_start_link_train(intel_dp);
4710 intel_dp_stop_link_train(intel_dp);
4713 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4714 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4717 for (retry = 0; retry < 3; retry++) {
4719 wret = drm_dp_dpcd_write(&intel_dp->aux,
4720 DP_SINK_COUNT_ESI+1,
4727 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4729 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4737 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4738 intel_dp->is_mst = false;
4739 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4747 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4749 u8 link_status[DP_LINK_STATUS_SIZE];
4751 if (!intel_dp->link_trained)
4755 * While PSR source HW is enabled, it will control main-link sending
4756 * frames, enabling and disabling it so trying to do a retrain will fail
4757 * as the link would or not be on or it could mix training patterns
4758 * and frame data at the same time causing retrain to fail.
4759 * Also when exiting PSR, HW will retrain the link anyways fixing
4760 * any link status error.
4762 if (intel_psr_enabled(intel_dp))
4765 if (!intel_dp_get_link_status(intel_dp, link_status))
4769 * Validate the cached values of intel_dp->link_rate and
4770 * intel_dp->lane_count before attempting to retrain.
4772 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4773 intel_dp->lane_count))
4776 /* Retrain if Channel EQ or CR not ok */
4777 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4780 int intel_dp_retrain_link(struct intel_encoder *encoder,
4781 struct drm_modeset_acquire_ctx *ctx)
4783 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4784 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4785 struct intel_connector *connector = intel_dp->attached_connector;
4786 struct drm_connector_state *conn_state;
4787 struct intel_crtc_state *crtc_state;
4788 struct intel_crtc *crtc;
4791 /* FIXME handle the MST connectors as well */
4793 if (!connector || connector->base.status != connector_status_connected)
4796 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4801 conn_state = connector->base.state;
4803 crtc = to_intel_crtc(conn_state->crtc);
4807 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4811 crtc_state = to_intel_crtc_state(crtc->base.state);
4813 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4815 if (!crtc_state->base.active)
4818 if (conn_state->commit &&
4819 !try_wait_for_completion(&conn_state->commit->hw_done))
4822 if (!intel_dp_needs_link_retrain(intel_dp))
4825 /* Suppress underruns caused by re-training */
4826 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4827 if (crtc_state->has_pch_encoder)
4828 intel_set_pch_fifo_underrun_reporting(dev_priv,
4829 intel_crtc_pch_transcoder(crtc), false);
4831 intel_dp_start_link_train(intel_dp);
4832 intel_dp_stop_link_train(intel_dp);
4834 /* Keep underrun reporting disabled until things are stable */
4835 intel_wait_for_vblank(dev_priv, crtc->pipe);
4837 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4838 if (crtc_state->has_pch_encoder)
4839 intel_set_pch_fifo_underrun_reporting(dev_priv,
4840 intel_crtc_pch_transcoder(crtc), true);
4846 * If display is now connected check links status,
4847 * there has been known issues of link loss triggering
4850 * Some sinks (eg. ASUS PB287Q) seem to perform some
4851 * weird HPD ping pong during modesets. So we can apparently
4852 * end up with HPD going low during a modeset, and then
4853 * going back up soon after. And once that happens we must
4854 * retrain the link to get a picture. That's in case no
4855 * userspace component reacted to intermittent HPD dip.
4857 static bool intel_dp_hotplug(struct intel_encoder *encoder,
4858 struct intel_connector *connector)
4860 struct drm_modeset_acquire_ctx ctx;
4864 changed = intel_encoder_hotplug(encoder, connector);
4866 drm_modeset_acquire_init(&ctx, 0);
4869 ret = intel_dp_retrain_link(encoder, &ctx);
4871 if (ret == -EDEADLK) {
4872 drm_modeset_backoff(&ctx);
4879 drm_modeset_drop_locks(&ctx);
4880 drm_modeset_acquire_fini(&ctx);
4881 WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4886 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4890 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4893 if (drm_dp_dpcd_readb(&intel_dp->aux,
4894 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4897 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4899 if (val & DP_AUTOMATED_TEST_REQUEST)
4900 intel_dp_handle_test_request(intel_dp);
4902 if (val & DP_CP_IRQ)
4903 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4905 if (val & DP_SINK_SPECIFIC_IRQ)
4906 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4910 * According to DP spec
4913 * 2. Configure link according to Receiver Capabilities
4914 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4915 * 4. Check link status on receipt of hot-plug interrupt
4917 * intel_dp_short_pulse - handles short pulse interrupts
4918 * when full detection is not required.
4919 * Returns %true if short pulse is handled and full detection
4920 * is NOT required and %false otherwise.
4923 intel_dp_short_pulse(struct intel_dp *intel_dp)
4925 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4926 u8 old_sink_count = intel_dp->sink_count;
4930 * Clearing compliance test variables to allow capturing
4931 * of values for next automated test request.
4933 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4936 * Now read the DPCD to see if it's actually running
4937 * If the current value of sink count doesn't match with
4938 * the value that was stored earlier or dpcd read failed
4939 * we need to do full detection
4941 ret = intel_dp_get_dpcd(intel_dp);
4943 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4944 /* No need to proceed if we are going to do full detect */
4948 intel_dp_check_service_irq(intel_dp);
4950 /* Handle CEC interrupts, if any */
4951 drm_dp_cec_irq(&intel_dp->aux);
4953 /* defer to the hotplug work for link retraining if needed */
4954 if (intel_dp_needs_link_retrain(intel_dp))
4957 intel_psr_short_pulse(intel_dp);
4959 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4960 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4961 /* Send a Hotplug Uevent to userspace to start modeset */
4962 drm_kms_helper_hotplug_event(&dev_priv->drm);
4968 /* XXX this is probably wrong for multiple downstream ports */
4969 static enum drm_connector_status
4970 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4972 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4973 u8 *dpcd = intel_dp->dpcd;
4976 if (WARN_ON(intel_dp_is_edp(intel_dp)))
4977 return connector_status_connected;
4980 lspcon_resume(lspcon);
4982 if (!intel_dp_get_dpcd(intel_dp))
4983 return connector_status_disconnected;
4985 /* if there's no downstream port, we're done */
4986 if (!drm_dp_is_branch(dpcd))
4987 return connector_status_connected;
4989 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4990 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4991 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4993 return intel_dp->sink_count ?
4994 connector_status_connected : connector_status_disconnected;
4997 if (intel_dp_can_mst(intel_dp))
4998 return connector_status_connected;
5000 /* If no HPD, poke DDC gently */
5001 if (drm_probe_ddc(&intel_dp->aux.ddc))
5002 return connector_status_connected;
5004 /* Well we tried, say unknown for unreliable port types */
5005 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5006 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5007 if (type == DP_DS_PORT_TYPE_VGA ||
5008 type == DP_DS_PORT_TYPE_NON_EDID)
5009 return connector_status_unknown;
5011 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5012 DP_DWN_STRM_PORT_TYPE_MASK;
5013 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5014 type == DP_DWN_STRM_PORT_TYPE_OTHER)
5015 return connector_status_unknown;
5018 /* Anything else is out of spec, warn and ignore */
5019 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
5020 return connector_status_disconnected;
5023 static enum drm_connector_status
5024 edp_detect(struct intel_dp *intel_dp)
5026 return connector_status_connected;
5029 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5031 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5034 switch (encoder->hpd_pin) {
5036 bit = SDE_PORTB_HOTPLUG;
5039 bit = SDE_PORTC_HOTPLUG;
5042 bit = SDE_PORTD_HOTPLUG;
5045 MISSING_CASE(encoder->hpd_pin);
5049 return I915_READ(SDEISR) & bit;
5052 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
5054 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5057 switch (encoder->hpd_pin) {
5059 bit = SDE_PORTB_HOTPLUG_CPT;
5062 bit = SDE_PORTC_HOTPLUG_CPT;
5065 bit = SDE_PORTD_HOTPLUG_CPT;
5068 MISSING_CASE(encoder->hpd_pin);
5072 return I915_READ(SDEISR) & bit;
5075 static bool spt_digital_port_connected(struct intel_encoder *encoder)
5077 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5080 switch (encoder->hpd_pin) {
5082 bit = SDE_PORTA_HOTPLUG_SPT;
5085 bit = SDE_PORTE_HOTPLUG_SPT;
5088 return cpt_digital_port_connected(encoder);
5091 return I915_READ(SDEISR) & bit;
5094 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5096 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5099 switch (encoder->hpd_pin) {
5101 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5104 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5107 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5110 MISSING_CASE(encoder->hpd_pin);
5114 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5117 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5119 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5122 switch (encoder->hpd_pin) {
5124 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5127 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5130 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5133 MISSING_CASE(encoder->hpd_pin);
5137 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5140 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5142 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5144 if (encoder->hpd_pin == HPD_PORT_A)
5145 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5147 return ibx_digital_port_connected(encoder);
5150 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5152 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5154 if (encoder->hpd_pin == HPD_PORT_A)
5155 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5157 return cpt_digital_port_connected(encoder);
5160 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5162 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5164 if (encoder->hpd_pin == HPD_PORT_A)
5165 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5167 return cpt_digital_port_connected(encoder);
5170 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5172 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5174 if (encoder->hpd_pin == HPD_PORT_A)
5175 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5177 return cpt_digital_port_connected(encoder);
5180 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5182 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5185 switch (encoder->hpd_pin) {
5187 bit = BXT_DE_PORT_HP_DDIA;
5190 bit = BXT_DE_PORT_HP_DDIB;
5193 bit = BXT_DE_PORT_HP_DDIC;
5196 MISSING_CASE(encoder->hpd_pin);
5200 return I915_READ(GEN8_DE_PORT_ISR) & bit;
5203 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5204 struct intel_digital_port *intel_dig_port)
5206 enum port port = intel_dig_port->base.port;
5208 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5211 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5213 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5214 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5215 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
5217 if (intel_phy_is_combo(dev_priv, phy))
5218 return icl_combo_port_connected(dev_priv, dig_port);
5219 else if (intel_phy_is_tc(dev_priv, phy))
5220 return intel_tc_port_connected(dig_port);
5222 MISSING_CASE(encoder->hpd_pin);
5228 * intel_digital_port_connected - is the specified port connected?
5229 * @encoder: intel_encoder
5231 * In cases where there's a connector physically connected but it can't be used
5232 * by our hardware we also return false, since the rest of the driver should
5233 * pretty much treat the port as disconnected. This is relevant for type-C
5234 * (starting on ICL) where there's ownership involved.
5236 * Return %true if port is connected, %false otherwise.
5238 static bool __intel_digital_port_connected(struct intel_encoder *encoder)
5240 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5242 if (HAS_GMCH(dev_priv)) {
5243 if (IS_GM45(dev_priv))
5244 return gm45_digital_port_connected(encoder);
5246 return g4x_digital_port_connected(encoder);
5249 if (INTEL_GEN(dev_priv) >= 11)
5250 return icl_digital_port_connected(encoder);
5251 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5252 return spt_digital_port_connected(encoder);
5253 else if (IS_GEN9_LP(dev_priv))
5254 return bxt_digital_port_connected(encoder);
5255 else if (IS_GEN(dev_priv, 8))
5256 return bdw_digital_port_connected(encoder);
5257 else if (IS_GEN(dev_priv, 7))
5258 return ivb_digital_port_connected(encoder);
5259 else if (IS_GEN(dev_priv, 6))
5260 return snb_digital_port_connected(encoder);
5261 else if (IS_GEN(dev_priv, 5))
5262 return ilk_digital_port_connected(encoder);
5264 MISSING_CASE(INTEL_GEN(dev_priv));
5268 bool intel_digital_port_connected(struct intel_encoder *encoder)
5270 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5271 bool is_connected = false;
5272 intel_wakeref_t wakeref;
5274 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5275 is_connected = __intel_digital_port_connected(encoder);
5277 return is_connected;
5280 static struct edid *
5281 intel_dp_get_edid(struct intel_dp *intel_dp)
5283 struct intel_connector *intel_connector = intel_dp->attached_connector;
5285 /* use cached edid if we have one */
5286 if (intel_connector->edid) {
5288 if (IS_ERR(intel_connector->edid))
5291 return drm_edid_duplicate(intel_connector->edid);
5293 return drm_get_edid(&intel_connector->base,
5294 &intel_dp->aux.ddc);
5298 intel_dp_set_edid(struct intel_dp *intel_dp)
5300 struct intel_connector *intel_connector = intel_dp->attached_connector;
5303 intel_dp_unset_edid(intel_dp);
5304 edid = intel_dp_get_edid(intel_dp);
5305 intel_connector->detect_edid = edid;
5307 intel_dp->has_audio = drm_detect_monitor_audio(edid);
5308 drm_dp_cec_set_edid(&intel_dp->aux, edid);
5312 intel_dp_unset_edid(struct intel_dp *intel_dp)
5314 struct intel_connector *intel_connector = intel_dp->attached_connector;
5316 drm_dp_cec_unset_edid(&intel_dp->aux);
5317 kfree(intel_connector->detect_edid);
5318 intel_connector->detect_edid = NULL;
5320 intel_dp->has_audio = false;
5324 intel_dp_detect(struct drm_connector *connector,
5325 struct drm_modeset_acquire_ctx *ctx,
5328 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5329 struct intel_dp *intel_dp = intel_attached_dp(connector);
5330 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5331 struct intel_encoder *encoder = &dig_port->base;
5332 enum drm_connector_status status;
5334 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5335 connector->base.id, connector->name);
5336 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5338 /* Can't disconnect eDP */
5339 if (intel_dp_is_edp(intel_dp))
5340 status = edp_detect(intel_dp);
5341 else if (intel_digital_port_connected(encoder))
5342 status = intel_dp_detect_dpcd(intel_dp);
5344 status = connector_status_disconnected;
5346 if (status == connector_status_disconnected) {
5347 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5348 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5350 if (intel_dp->is_mst) {
5351 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5353 intel_dp->mst_mgr.mst_state);
5354 intel_dp->is_mst = false;
5355 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5362 if (intel_dp->reset_link_params) {
5363 /* Initial max link lane count */
5364 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5366 /* Initial max link rate */
5367 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5369 intel_dp->reset_link_params = false;
5372 intel_dp_print_rates(intel_dp);
5374 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5375 if (INTEL_GEN(dev_priv) >= 11)
5376 intel_dp_get_dsc_sink_cap(intel_dp);
5378 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5379 drm_dp_is_branch(intel_dp->dpcd));
5381 intel_dp_configure_mst(intel_dp);
5383 if (intel_dp->is_mst) {
5385 * If we are in MST mode then this connector
5386 * won't appear connected or have anything
5389 status = connector_status_disconnected;
5394 * Some external monitors do not signal loss of link synchronization
5395 * with an IRQ_HPD, so force a link status check.
5397 if (!intel_dp_is_edp(intel_dp)) {
5400 ret = intel_dp_retrain_link(encoder, ctx);
5406 * Clearing NACK and defer counts to get their exact values
5407 * while reading EDID which are required by Compliance tests
5408 * 4.2.2.4 and 4.2.2.5
5410 intel_dp->aux.i2c_nack_count = 0;
5411 intel_dp->aux.i2c_defer_count = 0;
5413 intel_dp_set_edid(intel_dp);
5414 if (intel_dp_is_edp(intel_dp) ||
5415 to_intel_connector(connector)->detect_edid)
5416 status = connector_status_connected;
5418 intel_dp_check_service_irq(intel_dp);
5421 if (status != connector_status_connected && !intel_dp->is_mst)
5422 intel_dp_unset_edid(intel_dp);
5428 intel_dp_force(struct drm_connector *connector)
5430 struct intel_dp *intel_dp = intel_attached_dp(connector);
5431 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5432 struct intel_encoder *intel_encoder = &dig_port->base;
5433 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5434 enum intel_display_power_domain aux_domain =
5435 intel_aux_power_domain(dig_port);
5436 intel_wakeref_t wakeref;
5438 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5439 connector->base.id, connector->name);
5440 intel_dp_unset_edid(intel_dp);
5442 if (connector->status != connector_status_connected)
5445 wakeref = intel_display_power_get(dev_priv, aux_domain);
5447 intel_dp_set_edid(intel_dp);
5449 intel_display_power_put(dev_priv, aux_domain, wakeref);
5452 static int intel_dp_get_modes(struct drm_connector *connector)
5454 struct intel_connector *intel_connector = to_intel_connector(connector);
5457 edid = intel_connector->detect_edid;
5459 int ret = intel_connector_update_modes(connector, edid);
5464 /* if eDP has no EDID, fall back to fixed mode */
5465 if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5466 intel_connector->panel.fixed_mode) {
5467 struct drm_display_mode *mode;
5469 mode = drm_mode_duplicate(connector->dev,
5470 intel_connector->panel.fixed_mode);
5472 drm_mode_probed_add(connector, mode);
5481 intel_dp_connector_register(struct drm_connector *connector)
5483 struct intel_dp *intel_dp = intel_attached_dp(connector);
5484 struct drm_device *dev = connector->dev;
5487 ret = intel_connector_register(connector);
5491 i915_debugfs_connector_add(connector);
5493 DRM_DEBUG_KMS("registering %s bus for %s\n",
5494 intel_dp->aux.name, connector->kdev->kobj.name);
5496 intel_dp->aux.dev = connector->kdev;
5497 ret = drm_dp_aux_register(&intel_dp->aux);
5499 drm_dp_cec_register_connector(&intel_dp->aux,
5500 connector->name, dev->dev);
5505 intel_dp_connector_unregister(struct drm_connector *connector)
5507 struct intel_dp *intel_dp = intel_attached_dp(connector);
5509 drm_dp_cec_unregister_connector(&intel_dp->aux);
5510 drm_dp_aux_unregister(&intel_dp->aux);
5511 intel_connector_unregister(connector);
5514 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5516 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5517 struct intel_dp *intel_dp = &intel_dig_port->dp;
5519 intel_dp_mst_encoder_cleanup(intel_dig_port);
5520 if (intel_dp_is_edp(intel_dp)) {
5521 intel_wakeref_t wakeref;
5523 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5525 * vdd might still be enabled do to the delayed vdd off.
5526 * Make sure vdd is actually turned off here.
5528 with_pps_lock(intel_dp, wakeref)
5529 edp_panel_vdd_off_sync(intel_dp);
5531 if (intel_dp->edp_notifier.notifier_call) {
5532 unregister_reboot_notifier(&intel_dp->edp_notifier);
5533 intel_dp->edp_notifier.notifier_call = NULL;
5537 intel_dp_aux_fini(intel_dp);
5540 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5542 intel_dp_encoder_flush_work(encoder);
5544 drm_encoder_cleanup(encoder);
5545 kfree(enc_to_dig_port(encoder));
5548 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5550 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5551 intel_wakeref_t wakeref;
5553 if (!intel_dp_is_edp(intel_dp))
5557 * vdd might still be enabled do to the delayed vdd off.
5558 * Make sure vdd is actually turned off here.
5560 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5561 with_pps_lock(intel_dp, wakeref)
5562 edp_panel_vdd_off_sync(intel_dp);
5565 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5569 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5570 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5571 msecs_to_jiffies(timeout));
5574 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5578 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5581 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5582 static const struct drm_dp_aux_msg msg = {
5583 .request = DP_AUX_NATIVE_WRITE,
5584 .address = DP_AUX_HDCP_AKSV,
5585 .size = DRM_HDCP_KSV_LEN,
5587 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5591 /* Output An first, that's easy */
5592 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5593 an, DRM_HDCP_AN_LEN);
5594 if (dpcd_ret != DRM_HDCP_AN_LEN) {
5595 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5597 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5601 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5602 * order to get it on the wire, we need to create the AUX header as if
5603 * we were writing the data, and then tickle the hardware to output the
5604 * data once the header is sent out.
5606 intel_dp_aux_header(txbuf, &msg);
5608 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5609 rxbuf, sizeof(rxbuf),
5610 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5612 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5614 } else if (ret == 0) {
5615 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5619 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5620 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5621 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5628 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5632 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5634 if (ret != DRM_HDCP_KSV_LEN) {
5635 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5636 return ret >= 0 ? -EIO : ret;
5641 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5646 * For some reason the HDMI and DP HDCP specs call this register
5647 * definition by different names. In the HDMI spec, it's called BSTATUS,
5648 * but in DP it's called BINFO.
5650 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5651 bstatus, DRM_HDCP_BSTATUS_LEN);
5652 if (ret != DRM_HDCP_BSTATUS_LEN) {
5653 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5654 return ret >= 0 ? -EIO : ret;
5660 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5665 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5668 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5669 return ret >= 0 ? -EIO : ret;
5676 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5677 bool *repeater_present)
5682 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5686 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5691 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5695 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5696 ri_prime, DRM_HDCP_RI_LEN);
5697 if (ret != DRM_HDCP_RI_LEN) {
5698 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5699 return ret >= 0 ? -EIO : ret;
5705 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5710 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5713 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5714 return ret >= 0 ? -EIO : ret;
5716 *ksv_ready = bstatus & DP_BSTATUS_READY;
5721 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5722 int num_downstream, u8 *ksv_fifo)
5727 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5728 for (i = 0; i < num_downstream; i += 3) {
5729 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5730 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5731 DP_AUX_HDCP_KSV_FIFO,
5732 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5735 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5737 return ret >= 0 ? -EIO : ret;
5744 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5749 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5752 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5753 DP_AUX_HDCP_V_PRIME(i), part,
5754 DRM_HDCP_V_PRIME_PART_LEN);
5755 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5756 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5757 return ret >= 0 ? -EIO : ret;
5763 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5766 /* Not used for single stream DisplayPort setups */
5771 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5776 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5779 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5783 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5787 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5793 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5797 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5801 struct hdcp2_dp_errata_stream_type {
5806 static struct hdcp2_dp_msg_data {
5809 bool msg_detectable;
5811 u32 timeout2; /* Added for non_paired situation */
5812 } hdcp2_msg_data[] = {
5813 {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
5814 {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
5815 false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
5816 {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
5818 {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
5820 {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
5821 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
5822 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
5823 {HDCP_2_2_AKE_SEND_PAIRING_INFO,
5824 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
5825 HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
5826 {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
5827 {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
5828 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
5829 {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
5831 {HDCP_2_2_REP_SEND_RECVID_LIST,
5832 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
5833 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
5834 {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
5836 {HDCP_2_2_REP_STREAM_MANAGE,
5837 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
5839 {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
5840 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
5841 /* local define to shovel this through the write_2_2 interface */
5842 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
5843 {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
5844 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
5849 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
5854 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5855 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
5856 HDCP_2_2_DP_RXSTATUS_LEN);
5857 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
5858 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5859 return ret >= 0 ? -EIO : ret;
5866 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
5867 u8 msg_id, bool *msg_ready)
5873 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
5878 case HDCP_2_2_AKE_SEND_HPRIME:
5879 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
5882 case HDCP_2_2_AKE_SEND_PAIRING_INFO:
5883 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
5886 case HDCP_2_2_REP_SEND_RECVID_LIST:
5887 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
5891 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
5899 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
5900 struct hdcp2_dp_msg_data *hdcp2_msg_data)
5902 struct intel_dp *dp = &intel_dig_port->dp;
5903 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5904 u8 msg_id = hdcp2_msg_data->msg_id;
5906 bool msg_ready = false;
5908 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
5909 timeout = hdcp2_msg_data->timeout2;
5911 timeout = hdcp2_msg_data->timeout;
5914 * There is no way to detect the CERT, LPRIME and STREAM_READY
5915 * availability. So Wait for timeout and read the msg.
5917 if (!hdcp2_msg_data->msg_detectable) {
5922 * As we want to check the msg availability at timeout, Ignoring
5923 * the timeout at wait for CP_IRQ.
5925 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
5926 ret = hdcp2_detect_msg_availability(intel_dig_port,
5927 msg_id, &msg_ready);
5933 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
5934 hdcp2_msg_data->msg_id, ret, timeout);
5939 static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
5943 for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
5944 if (hdcp2_msg_data[i].msg_id == msg_id)
5945 return &hdcp2_msg_data[i];
5951 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
5952 void *buf, size_t size)
5954 struct intel_dp *dp = &intel_dig_port->dp;
5955 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5956 unsigned int offset;
5958 ssize_t ret, bytes_to_write, len;
5959 struct hdcp2_dp_msg_data *hdcp2_msg_data;
5961 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
5962 if (!hdcp2_msg_data)
5965 offset = hdcp2_msg_data->offset;
5967 /* No msg_id in DP HDCP2.2 msgs */
5968 bytes_to_write = size - 1;
5971 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
5973 while (bytes_to_write) {
5974 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
5975 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
5977 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
5978 offset, (void *)byte, len);
5982 bytes_to_write -= ret;
5991 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
5993 u8 rx_info[HDCP_2_2_RXINFO_LEN];
5997 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5998 DP_HDCP_2_2_REG_RXINFO_OFFSET,
5999 (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6000 if (ret != HDCP_2_2_RXINFO_LEN)
6001 return ret >= 0 ? -EIO : ret;
6003 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6004 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6006 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6007 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6009 ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6010 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6011 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6017 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6018 u8 msg_id, void *buf, size_t size)
6020 unsigned int offset;
6022 ssize_t ret, bytes_to_recv, len;
6023 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6025 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6026 if (!hdcp2_msg_data)
6028 offset = hdcp2_msg_data->offset;
6030 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6034 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6035 ret = get_receiver_id_list_size(intel_dig_port);
6041 bytes_to_recv = size - 1;
6043 /* DP adaptation msgs has no msg_id */
6046 while (bytes_to_recv) {
6047 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6048 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6050 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6053 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6057 bytes_to_recv -= ret;
6068 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6069 bool is_repeater, u8 content_type)
6071 struct hdcp2_dp_errata_stream_type stream_type_msg;
6077 * Errata for DP: As Stream type is used for encryption, Receiver
6078 * should be communicated with stream type for the decryption of the
6080 * Repeater will be communicated with stream type as a part of it's
6081 * auth later in time.
6083 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6084 stream_type_msg.stream_type = content_type;
6086 return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6087 sizeof(stream_type_msg));
6091 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6096 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6100 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6101 ret = HDCP_REAUTH_REQUEST;
6102 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6103 ret = HDCP_LINK_INTEGRITY_FAILURE;
6104 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6105 ret = HDCP_TOPOLOGY_CHANGE;
6111 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6118 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6119 DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6120 rx_caps, HDCP_2_2_RXCAPS_LEN);
6121 if (ret != HDCP_2_2_RXCAPS_LEN)
6122 return ret >= 0 ? -EIO : ret;
6124 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6125 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6131 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6132 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6133 .read_bksv = intel_dp_hdcp_read_bksv,
6134 .read_bstatus = intel_dp_hdcp_read_bstatus,
6135 .repeater_present = intel_dp_hdcp_repeater_present,
6136 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6137 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6138 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6139 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6140 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6141 .check_link = intel_dp_hdcp_check_link,
6142 .hdcp_capable = intel_dp_hdcp_capable,
6143 .write_2_2_msg = intel_dp_hdcp2_write_msg,
6144 .read_2_2_msg = intel_dp_hdcp2_read_msg,
6145 .config_stream_type = intel_dp_hdcp2_config_stream_type,
6146 .check_2_2_link = intel_dp_hdcp2_check_link,
6147 .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6148 .protocol = HDCP_PROTOCOL_DP,
6151 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6153 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6154 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6156 lockdep_assert_held(&dev_priv->pps_mutex);
6158 if (!edp_have_panel_vdd(intel_dp))
6162 * The VDD bit needs a power domain reference, so if the bit is
6163 * already enabled when we boot or resume, grab this reference and
6164 * schedule a vdd off, so we don't hold on to the reference
6167 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6168 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6170 edp_panel_vdd_schedule_off(intel_dp);
6173 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6175 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6176 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6179 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6180 encoder->port, &pipe))
6183 return INVALID_PIPE;
6186 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6188 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6189 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6190 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6191 intel_wakeref_t wakeref;
6193 if (!HAS_DDI(dev_priv))
6194 intel_dp->DP = I915_READ(intel_dp->output_reg);
6197 lspcon_resume(lspcon);
6199 intel_dp->reset_link_params = true;
6201 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6202 !intel_dp_is_edp(intel_dp))
6205 with_pps_lock(intel_dp, wakeref) {
6206 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6207 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6209 if (intel_dp_is_edp(intel_dp)) {
6211 * Reinit the power sequencer, in case BIOS did
6212 * something nasty with it.
6214 intel_dp_pps_init(intel_dp);
6215 intel_edp_panel_vdd_sanitize(intel_dp);
6220 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6221 .force = intel_dp_force,
6222 .fill_modes = drm_helper_probe_single_connector_modes,
6223 .atomic_get_property = intel_digital_connector_atomic_get_property,
6224 .atomic_set_property = intel_digital_connector_atomic_set_property,
6225 .late_register = intel_dp_connector_register,
6226 .early_unregister = intel_dp_connector_unregister,
6227 .destroy = intel_connector_destroy,
6228 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6229 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6232 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6233 .detect_ctx = intel_dp_detect,
6234 .get_modes = intel_dp_get_modes,
6235 .mode_valid = intel_dp_mode_valid,
6236 .atomic_check = intel_digital_connector_atomic_check,
6239 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6240 .reset = intel_dp_encoder_reset,
6241 .destroy = intel_dp_encoder_destroy,
6245 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6247 struct intel_dp *intel_dp = &intel_dig_port->dp;
6249 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6251 * vdd off can generate a long pulse on eDP which
6252 * would require vdd on to handle it, and thus we
6253 * would end up in an endless cycle of
6254 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6256 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
6257 port_name(intel_dig_port->base.port));
6261 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
6262 port_name(intel_dig_port->base.port),
6263 long_hpd ? "long" : "short");
6266 intel_dp->reset_link_params = true;
6270 if (intel_dp->is_mst) {
6271 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6273 * If we were in MST mode, and device is not
6274 * there, get out of MST mode
6276 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6277 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6278 intel_dp->is_mst = false;
6279 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6286 if (!intel_dp->is_mst) {
6289 handled = intel_dp_short_pulse(intel_dp);
6298 /* check the VBT to see whether the eDP is on another port */
6299 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6302 * eDP not supported on g4x. so bail out early just
6303 * for a bit extra safety in case the VBT is bonkers.
6305 if (INTEL_GEN(dev_priv) < 5)
6308 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6311 return intel_bios_is_port_edp(dev_priv, port);
6315 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6317 struct drm_i915_private *dev_priv = to_i915(connector->dev);
6318 enum port port = dp_to_dig_port(intel_dp)->base.port;
6320 if (!IS_G4X(dev_priv) && port != PORT_A)
6321 intel_attach_force_audio_property(connector);
6323 intel_attach_broadcast_rgb_property(connector);
6324 if (HAS_GMCH(dev_priv))
6325 drm_connector_attach_max_bpc_property(connector, 6, 10);
6326 else if (INTEL_GEN(dev_priv) >= 5)
6327 drm_connector_attach_max_bpc_property(connector, 6, 12);
6329 if (intel_dp_is_edp(intel_dp)) {
6330 u32 allowed_scalers;
6332 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6333 if (!HAS_GMCH(dev_priv))
6334 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6336 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6338 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6343 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6345 intel_dp->panel_power_off_time = ktime_get_boottime();
6346 intel_dp->last_power_on = jiffies;
6347 intel_dp->last_backlight_off = jiffies;
6351 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6353 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6354 u32 pp_on, pp_off, pp_ctl;
6355 struct pps_registers regs;
6357 intel_pps_get_registers(intel_dp, ®s);
6359 pp_ctl = ironlake_get_pp_control(intel_dp);
6361 /* Ensure PPS is unlocked */
6362 if (!HAS_DDI(dev_priv))
6363 I915_WRITE(regs.pp_ctrl, pp_ctl);
6365 pp_on = I915_READ(regs.pp_on);
6366 pp_off = I915_READ(regs.pp_off);
6368 /* Pull timing values out of registers */
6369 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6370 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6371 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6372 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6374 if (i915_mmio_reg_valid(regs.pp_div)) {
6377 pp_div = I915_READ(regs.pp_div);
6379 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6381 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6386 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6388 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6390 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6394 intel_pps_verify_state(struct intel_dp *intel_dp)
6396 struct edp_power_seq hw;
6397 struct edp_power_seq *sw = &intel_dp->pps_delays;
6399 intel_pps_readout_hw_state(intel_dp, &hw);
6401 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6402 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6403 DRM_ERROR("PPS state mismatch\n");
6404 intel_pps_dump_state("sw", sw);
6405 intel_pps_dump_state("hw", &hw);
6410 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6412 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6413 struct edp_power_seq cur, vbt, spec,
6414 *final = &intel_dp->pps_delays;
6416 lockdep_assert_held(&dev_priv->pps_mutex);
6418 /* already initialized? */
6419 if (final->t11_t12 != 0)
6422 intel_pps_readout_hw_state(intel_dp, &cur);
6424 intel_pps_dump_state("cur", &cur);
6426 vbt = dev_priv->vbt.edp.pps;
6427 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6428 * of 500ms appears to be too short. Ocassionally the panel
6429 * just fails to power back on. Increasing the delay to 800ms
6430 * seems sufficient to avoid this problem.
6432 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6433 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6434 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6437 /* T11_T12 delay is special and actually in units of 100ms, but zero
6438 * based in the hw (so we need to add 100 ms). But the sw vbt
6439 * table multiplies it with 1000 to make it in units of 100usec,
6441 vbt.t11_t12 += 100 * 10;
6443 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6444 * our hw here, which are all in 100usec. */
6445 spec.t1_t3 = 210 * 10;
6446 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6447 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6448 spec.t10 = 500 * 10;
6449 /* This one is special and actually in units of 100ms, but zero
6450 * based in the hw (so we need to add 100 ms). But the sw vbt
6451 * table multiplies it with 1000 to make it in units of 100usec,
6453 spec.t11_t12 = (510 + 100) * 10;
6455 intel_pps_dump_state("vbt", &vbt);
6457 /* Use the max of the register settings and vbt. If both are
6458 * unset, fall back to the spec limits. */
6459 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
6461 max(cur.field, vbt.field))
6462 assign_final(t1_t3);
6466 assign_final(t11_t12);
6469 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
6470 intel_dp->panel_power_up_delay = get_delay(t1_t3);
6471 intel_dp->backlight_on_delay = get_delay(t8);
6472 intel_dp->backlight_off_delay = get_delay(t9);
6473 intel_dp->panel_power_down_delay = get_delay(t10);
6474 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6477 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6478 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6479 intel_dp->panel_power_cycle_delay);
6481 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6482 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6485 * We override the HW backlight delays to 1 because we do manual waits
6486 * on them. For T8, even BSpec recommends doing it. For T9, if we
6487 * don't do this, we'll end up waiting for the backlight off delay
6488 * twice: once when we do the manual sleep, and once when we disable
6489 * the panel and wait for the PP_STATUS bit to become zero.
6495 * HW has only a 100msec granularity for t11_t12 so round it up
6498 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6502 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6503 bool force_disable_vdd)
6505 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6506 u32 pp_on, pp_off, port_sel = 0;
6507 int div = dev_priv->rawclk_freq / 1000;
6508 struct pps_registers regs;
6509 enum port port = dp_to_dig_port(intel_dp)->base.port;
6510 const struct edp_power_seq *seq = &intel_dp->pps_delays;
6512 lockdep_assert_held(&dev_priv->pps_mutex);
6514 intel_pps_get_registers(intel_dp, ®s);
6517 * On some VLV machines the BIOS can leave the VDD
6518 * enabled even on power sequencers which aren't
6519 * hooked up to any port. This would mess up the
6520 * power domain tracking the first time we pick
6521 * one of these power sequencers for use since
6522 * edp_panel_vdd_on() would notice that the VDD was
6523 * already on and therefore wouldn't grab the power
6524 * domain reference. Disable VDD first to avoid this.
6525 * This also avoids spuriously turning the VDD on as
6526 * soon as the new power sequencer gets initialized.
6528 if (force_disable_vdd) {
6529 u32 pp = ironlake_get_pp_control(intel_dp);
6531 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6533 if (pp & EDP_FORCE_VDD)
6534 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6536 pp &= ~EDP_FORCE_VDD;
6538 I915_WRITE(regs.pp_ctrl, pp);
6541 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6542 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6543 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6544 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6546 /* Haswell doesn't have any port selection bits for the panel
6547 * power sequencer any more. */
6548 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6549 port_sel = PANEL_PORT_SELECT_VLV(port);
6550 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6553 port_sel = PANEL_PORT_SELECT_DPA;
6556 port_sel = PANEL_PORT_SELECT_DPC;
6559 port_sel = PANEL_PORT_SELECT_DPD;
6569 I915_WRITE(regs.pp_on, pp_on);
6570 I915_WRITE(regs.pp_off, pp_off);
6573 * Compute the divisor for the pp clock, simply match the Bspec formula.
6575 if (i915_mmio_reg_valid(regs.pp_div)) {
6576 I915_WRITE(regs.pp_div,
6577 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6578 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6582 pp_ctl = I915_READ(regs.pp_ctrl);
6583 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6584 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6585 I915_WRITE(regs.pp_ctrl, pp_ctl);
6588 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6589 I915_READ(regs.pp_on),
6590 I915_READ(regs.pp_off),
6591 i915_mmio_reg_valid(regs.pp_div) ?
6592 I915_READ(regs.pp_div) :
6593 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6596 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6598 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6600 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6601 vlv_initial_power_sequencer_setup(intel_dp);
6603 intel_dp_init_panel_power_sequencer(intel_dp);
6604 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6609 * intel_dp_set_drrs_state - program registers for RR switch to take effect
6610 * @dev_priv: i915 device
6611 * @crtc_state: a pointer to the active intel_crtc_state
6612 * @refresh_rate: RR to be programmed
6614 * This function gets called when refresh rate (RR) has to be changed from
6615 * one frequency to another. Switches can be between high and low RR
6616 * supported by the panel or to any other RR based on media playback (in
6617 * this case, RR value needs to be passed from user space).
6619 * The caller of this function needs to take a lock on dev_priv->drrs.
6621 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6622 const struct intel_crtc_state *crtc_state,
6625 struct intel_dp *intel_dp = dev_priv->drrs.dp;
6626 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6627 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6629 if (refresh_rate <= 0) {
6630 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6634 if (intel_dp == NULL) {
6635 DRM_DEBUG_KMS("DRRS not supported.\n");
6640 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6644 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6645 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6649 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6651 index = DRRS_LOW_RR;
6653 if (index == dev_priv->drrs.refresh_rate_type) {
6655 "DRRS requested for previously set RR...ignoring\n");
6659 if (!crtc_state->base.active) {
6660 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6664 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6667 intel_dp_set_m_n(crtc_state, M1_N1);
6670 intel_dp_set_m_n(crtc_state, M2_N2);
6674 DRM_ERROR("Unsupported refreshrate type\n");
6676 } else if (INTEL_GEN(dev_priv) > 6) {
6677 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6680 val = I915_READ(reg);
6681 if (index > DRRS_HIGH_RR) {
6682 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6683 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6685 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6687 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6688 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6690 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6692 I915_WRITE(reg, val);
6695 dev_priv->drrs.refresh_rate_type = index;
6697 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6701 * intel_edp_drrs_enable - init drrs struct if supported
6702 * @intel_dp: DP struct
6703 * @crtc_state: A pointer to the active crtc state.
6705 * Initializes frontbuffer_bits and drrs.dp
6707 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6708 const struct intel_crtc_state *crtc_state)
6710 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6712 if (!crtc_state->has_drrs) {
6713 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6717 if (dev_priv->psr.enabled) {
6718 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6722 mutex_lock(&dev_priv->drrs.mutex);
6723 if (dev_priv->drrs.dp) {
6724 DRM_DEBUG_KMS("DRRS already enabled\n");
6728 dev_priv->drrs.busy_frontbuffer_bits = 0;
6730 dev_priv->drrs.dp = intel_dp;
6733 mutex_unlock(&dev_priv->drrs.mutex);
6737 * intel_edp_drrs_disable - Disable DRRS
6738 * @intel_dp: DP struct
6739 * @old_crtc_state: Pointer to old crtc_state.
6742 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6743 const struct intel_crtc_state *old_crtc_state)
6745 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6747 if (!old_crtc_state->has_drrs)
6750 mutex_lock(&dev_priv->drrs.mutex);
6751 if (!dev_priv->drrs.dp) {
6752 mutex_unlock(&dev_priv->drrs.mutex);
6756 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6757 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6758 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6760 dev_priv->drrs.dp = NULL;
6761 mutex_unlock(&dev_priv->drrs.mutex);
6763 cancel_delayed_work_sync(&dev_priv->drrs.work);
6766 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6768 struct drm_i915_private *dev_priv =
6769 container_of(work, typeof(*dev_priv), drrs.work.work);
6770 struct intel_dp *intel_dp;
6772 mutex_lock(&dev_priv->drrs.mutex);
6774 intel_dp = dev_priv->drrs.dp;
6780 * The delayed work can race with an invalidate hence we need to
6784 if (dev_priv->drrs.busy_frontbuffer_bits)
6787 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6788 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6790 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6791 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6795 mutex_unlock(&dev_priv->drrs.mutex);
6799 * intel_edp_drrs_invalidate - Disable Idleness DRRS
6800 * @dev_priv: i915 device
6801 * @frontbuffer_bits: frontbuffer plane tracking bits
6803 * This function gets called everytime rendering on the given planes start.
6804 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6806 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6808 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6809 unsigned int frontbuffer_bits)
6811 struct drm_crtc *crtc;
6814 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6817 cancel_delayed_work(&dev_priv->drrs.work);
6819 mutex_lock(&dev_priv->drrs.mutex);
6820 if (!dev_priv->drrs.dp) {
6821 mutex_unlock(&dev_priv->drrs.mutex);
6825 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6826 pipe = to_intel_crtc(crtc)->pipe;
6828 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6829 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6831 /* invalidate means busy screen hence upclock */
6832 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6833 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6834 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6836 mutex_unlock(&dev_priv->drrs.mutex);
6840 * intel_edp_drrs_flush - Restart Idleness DRRS
6841 * @dev_priv: i915 device
6842 * @frontbuffer_bits: frontbuffer plane tracking bits
6844 * This function gets called every time rendering on the given planes has
6845 * completed or flip on a crtc is completed. So DRRS should be upclocked
6846 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6847 * if no other planes are dirty.
6849 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6851 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6852 unsigned int frontbuffer_bits)
6854 struct drm_crtc *crtc;
6857 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6860 cancel_delayed_work(&dev_priv->drrs.work);
6862 mutex_lock(&dev_priv->drrs.mutex);
6863 if (!dev_priv->drrs.dp) {
6864 mutex_unlock(&dev_priv->drrs.mutex);
6868 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6869 pipe = to_intel_crtc(crtc)->pipe;
6871 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6872 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6874 /* flush means busy screen hence upclock */
6875 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6876 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6877 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6880 * flush also means no more activity hence schedule downclock, if all
6881 * other fbs are quiescent too
6883 if (!dev_priv->drrs.busy_frontbuffer_bits)
6884 schedule_delayed_work(&dev_priv->drrs.work,
6885 msecs_to_jiffies(1000));
6886 mutex_unlock(&dev_priv->drrs.mutex);
6890 * DOC: Display Refresh Rate Switching (DRRS)
6892 * Display Refresh Rate Switching (DRRS) is a power conservation feature
6893 * which enables swtching between low and high refresh rates,
6894 * dynamically, based on the usage scenario. This feature is applicable
6895 * for internal panels.
6897 * Indication that the panel supports DRRS is given by the panel EDID, which
6898 * would list multiple refresh rates for one resolution.
6900 * DRRS is of 2 types - static and seamless.
6901 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6902 * (may appear as a blink on screen) and is used in dock-undock scenario.
6903 * Seamless DRRS involves changing RR without any visual effect to the user
6904 * and can be used during normal system usage. This is done by programming
6905 * certain registers.
6907 * Support for static/seamless DRRS may be indicated in the VBT based on
6908 * inputs from the panel spec.
6910 * DRRS saves power by switching to low RR based on usage scenarios.
6912 * The implementation is based on frontbuffer tracking implementation. When
6913 * there is a disturbance on the screen triggered by user activity or a periodic
6914 * system activity, DRRS is disabled (RR is changed to high RR). When there is
6915 * no movement on screen, after a timeout of 1 second, a switch to low RR is
6918 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6919 * and intel_edp_drrs_flush() are called.
6921 * DRRS can be further extended to support other internal panels and also
6922 * the scenario of video playback wherein RR is set based on the rate
6923 * requested by userspace.
6927 * intel_dp_drrs_init - Init basic DRRS work and mutex.
6928 * @connector: eDP connector
6929 * @fixed_mode: preferred mode of panel
6931 * This function is called only once at driver load to initialize basic
6935 * Downclock mode if panel supports it, else return NULL.
6936 * DRRS support is determined by the presence of downclock mode (apart
6937 * from VBT setting).
6939 static struct drm_display_mode *
6940 intel_dp_drrs_init(struct intel_connector *connector,
6941 struct drm_display_mode *fixed_mode)
6943 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
6944 struct drm_display_mode *downclock_mode = NULL;
6946 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6947 mutex_init(&dev_priv->drrs.mutex);
6949 if (INTEL_GEN(dev_priv) <= 6) {
6950 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
6954 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6955 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6959 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
6960 if (!downclock_mode) {
6961 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
6965 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6967 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6968 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
6969 return downclock_mode;
6972 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6973 struct intel_connector *intel_connector)
6975 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6976 struct drm_device *dev = &dev_priv->drm;
6977 struct drm_connector *connector = &intel_connector->base;
6978 struct drm_display_mode *fixed_mode = NULL;
6979 struct drm_display_mode *downclock_mode = NULL;
6981 enum pipe pipe = INVALID_PIPE;
6982 intel_wakeref_t wakeref;
6985 if (!intel_dp_is_edp(intel_dp))
6988 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
6991 * On IBX/CPT we may get here with LVDS already registered. Since the
6992 * driver uses the only internal power sequencer available for both
6993 * eDP and LVDS bail out early in this case to prevent interfering
6994 * with an already powered-on LVDS power sequencer.
6996 if (intel_get_lvds_encoder(dev_priv)) {
6997 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
6998 DRM_INFO("LVDS was detected, not registering eDP\n");
7003 with_pps_lock(intel_dp, wakeref) {
7004 intel_dp_init_panel_power_timestamps(intel_dp);
7005 intel_dp_pps_init(intel_dp);
7006 intel_edp_panel_vdd_sanitize(intel_dp);
7009 /* Cache DPCD and EDID for edp. */
7010 has_dpcd = intel_edp_init_dpcd(intel_dp);
7013 /* if this fails, presume the device is a ghost */
7014 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7018 mutex_lock(&dev->mode_config.mutex);
7019 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7021 if (drm_add_edid_modes(connector, edid)) {
7022 drm_connector_update_edid_property(connector,
7026 edid = ERR_PTR(-EINVAL);
7029 edid = ERR_PTR(-ENOENT);
7031 intel_connector->edid = edid;
7033 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7035 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7037 /* fallback to VBT if available for eDP */
7039 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7040 mutex_unlock(&dev->mode_config.mutex);
7042 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7043 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7044 register_reboot_notifier(&intel_dp->edp_notifier);
7047 * Figure out the current pipe for the initial backlight setup.
7048 * If the current pipe isn't valid, try the PPS pipe, and if that
7049 * fails just assume pipe A.
7051 pipe = vlv_active_pipe(intel_dp);
7053 if (pipe != PIPE_A && pipe != PIPE_B)
7054 pipe = intel_dp->pps_pipe;
7056 if (pipe != PIPE_A && pipe != PIPE_B)
7059 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7063 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7064 intel_connector->panel.backlight.power = intel_edp_backlight_power;
7065 intel_panel_setup_backlight(connector, pipe);
7068 drm_connector_init_panel_orientation_property(
7069 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7074 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7076 * vdd might still be enabled do to the delayed vdd off.
7077 * Make sure vdd is actually turned off here.
7079 with_pps_lock(intel_dp, wakeref)
7080 edp_panel_vdd_off_sync(intel_dp);
7085 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7087 struct intel_connector *intel_connector;
7088 struct drm_connector *connector;
7090 intel_connector = container_of(work, typeof(*intel_connector),
7091 modeset_retry_work);
7092 connector = &intel_connector->base;
7093 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7096 /* Grab the locks before changing connector property*/
7097 mutex_lock(&connector->dev->mode_config.mutex);
7098 /* Set connector link status to BAD and send a Uevent to notify
7099 * userspace to do a modeset.
7101 drm_connector_set_link_status_property(connector,
7102 DRM_MODE_LINK_STATUS_BAD);
7103 mutex_unlock(&connector->dev->mode_config.mutex);
7104 /* Send Hotplug uevent so userspace can reprobe */
7105 drm_kms_helper_hotplug_event(connector->dev);
7109 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7110 struct intel_connector *intel_connector)
7112 struct drm_connector *connector = &intel_connector->base;
7113 struct intel_dp *intel_dp = &intel_dig_port->dp;
7114 struct intel_encoder *intel_encoder = &intel_dig_port->base;
7115 struct drm_device *dev = intel_encoder->base.dev;
7116 struct drm_i915_private *dev_priv = to_i915(dev);
7117 enum port port = intel_encoder->port;
7118 enum phy phy = intel_port_to_phy(dev_priv, port);
7121 /* Initialize the work for modeset in case of link train failure */
7122 INIT_WORK(&intel_connector->modeset_retry_work,
7123 intel_dp_modeset_retry_work_fn);
7125 if (WARN(intel_dig_port->max_lanes < 1,
7126 "Not enough lanes (%d) for DP on port %c\n",
7127 intel_dig_port->max_lanes, port_name(port)))
7130 intel_dp_set_source_rates(intel_dp);
7132 intel_dp->reset_link_params = true;
7133 intel_dp->pps_pipe = INVALID_PIPE;
7134 intel_dp->active_pipe = INVALID_PIPE;
7136 /* Preserve the current hw state. */
7137 intel_dp->DP = I915_READ(intel_dp->output_reg);
7138 intel_dp->attached_connector = intel_connector;
7140 if (intel_dp_is_port_edp(dev_priv, port)) {
7142 * Currently we don't support eDP on TypeC ports, although in
7143 * theory it could work on TypeC legacy ports.
7145 WARN_ON(intel_phy_is_tc(dev_priv, phy));
7146 type = DRM_MODE_CONNECTOR_eDP;
7148 type = DRM_MODE_CONNECTOR_DisplayPort;
7151 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7152 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7155 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7156 * for DP the encoder type can be set by the caller to
7157 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7159 if (type == DRM_MODE_CONNECTOR_eDP)
7160 intel_encoder->type = INTEL_OUTPUT_EDP;
7162 /* eDP only on port B and/or C on vlv/chv */
7163 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7164 intel_dp_is_edp(intel_dp) &&
7165 port != PORT_B && port != PORT_C))
7168 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7169 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7172 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7173 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7175 if (!HAS_GMCH(dev_priv))
7176 connector->interlace_allowed = true;
7177 connector->doublescan_allowed = 0;
7179 if (INTEL_GEN(dev_priv) >= 11)
7180 connector->ycbcr_420_allowed = true;
7182 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7184 intel_dp_aux_init(intel_dp);
7186 intel_connector_attach_encoder(intel_connector, intel_encoder);
7188 if (HAS_DDI(dev_priv))
7189 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7191 intel_connector->get_hw_state = intel_connector_get_hw_state;
7193 /* init MST on ports that can support it */
7194 if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
7195 (port == PORT_B || port == PORT_C ||
7196 port == PORT_D || port == PORT_F))
7197 intel_dp_mst_encoder_init(intel_dig_port,
7198 intel_connector->base.base.id);
7200 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7201 intel_dp_aux_fini(intel_dp);
7202 intel_dp_mst_encoder_cleanup(intel_dig_port);
7206 intel_dp_add_properties(intel_dp, connector);
7208 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7209 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7211 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7214 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7215 * 0xd. Failure to do so will result in spurious interrupts being
7216 * generated on the port when a cable is not attached.
7218 if (IS_G45(dev_priv)) {
7219 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7220 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7226 drm_connector_cleanup(connector);
7231 bool intel_dp_init(struct drm_i915_private *dev_priv,
7232 i915_reg_t output_reg,
7235 struct intel_digital_port *intel_dig_port;
7236 struct intel_encoder *intel_encoder;
7237 struct drm_encoder *encoder;
7238 struct intel_connector *intel_connector;
7240 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7241 if (!intel_dig_port)
7244 intel_connector = intel_connector_alloc();
7245 if (!intel_connector)
7246 goto err_connector_alloc;
7248 intel_encoder = &intel_dig_port->base;
7249 encoder = &intel_encoder->base;
7251 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7252 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7253 "DP %c", port_name(port)))
7254 goto err_encoder_init;
7256 intel_encoder->hotplug = intel_dp_hotplug;
7257 intel_encoder->compute_config = intel_dp_compute_config;
7258 intel_encoder->get_hw_state = intel_dp_get_hw_state;
7259 intel_encoder->get_config = intel_dp_get_config;
7260 intel_encoder->update_pipe = intel_panel_update_backlight;
7261 intel_encoder->suspend = intel_dp_encoder_suspend;
7262 if (IS_CHERRYVIEW(dev_priv)) {
7263 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7264 intel_encoder->pre_enable = chv_pre_enable_dp;
7265 intel_encoder->enable = vlv_enable_dp;
7266 intel_encoder->disable = vlv_disable_dp;
7267 intel_encoder->post_disable = chv_post_disable_dp;
7268 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7269 } else if (IS_VALLEYVIEW(dev_priv)) {
7270 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7271 intel_encoder->pre_enable = vlv_pre_enable_dp;
7272 intel_encoder->enable = vlv_enable_dp;
7273 intel_encoder->disable = vlv_disable_dp;
7274 intel_encoder->post_disable = vlv_post_disable_dp;
7276 intel_encoder->pre_enable = g4x_pre_enable_dp;
7277 intel_encoder->enable = g4x_enable_dp;
7278 intel_encoder->disable = g4x_disable_dp;
7279 intel_encoder->post_disable = g4x_post_disable_dp;
7282 intel_dig_port->dp.output_reg = output_reg;
7283 intel_dig_port->max_lanes = 4;
7285 intel_encoder->type = INTEL_OUTPUT_DP;
7286 intel_encoder->power_domain = intel_port_to_power_domain(port);
7287 if (IS_CHERRYVIEW(dev_priv)) {
7289 intel_encoder->crtc_mask = 1 << 2;
7291 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7293 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7295 intel_encoder->cloneable = 0;
7296 intel_encoder->port = port;
7298 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7301 intel_infoframe_init(intel_dig_port);
7303 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7304 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7305 goto err_init_connector;
7310 drm_encoder_cleanup(encoder);
7312 kfree(intel_connector);
7313 err_connector_alloc:
7314 kfree(intel_dig_port);
7318 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7320 struct intel_encoder *encoder;
7322 for_each_intel_encoder(&dev_priv->drm, encoder) {
7323 struct intel_dp *intel_dp;
7325 if (encoder->type != INTEL_OUTPUT_DDI)
7328 intel_dp = enc_to_intel_dp(&encoder->base);
7330 if (!intel_dp->can_mst)
7333 if (intel_dp->is_mst)
7334 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7338 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7340 struct intel_encoder *encoder;
7342 for_each_intel_encoder(&dev_priv->drm, encoder) {
7343 struct intel_dp *intel_dp;
7346 if (encoder->type != INTEL_OUTPUT_DDI)
7349 intel_dp = enc_to_intel_dp(&encoder->base);
7351 if (!intel_dp->can_mst)
7354 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7356 intel_dp->is_mst = false;
7357 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,