2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
58 #include "gt/intel_rps.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_dp_link_training.h"
70 #include "intel_fbc.h"
71 #include "intel_fbdev.h"
72 #include "intel_fifo_underrun.h"
73 #include "intel_frontbuffer.h"
74 #include "intel_hdcp.h"
75 #include "intel_hotplug.h"
76 #include "intel_overlay.h"
77 #include "intel_pipe_crc.h"
79 #include "intel_psr.h"
80 #include "intel_quirks.h"
81 #include "intel_sideband.h"
82 #include "intel_sprite.h"
84 #include "intel_vga.h"
86 /* Primary plane formats for gen <= 3 */
87 static const u32 i8xx_primary_formats[] = {
94 /* Primary plane formats for ivb (no fp16 due to hw issue) */
95 static const u32 ivb_primary_formats[] = {
100 DRM_FORMAT_XRGB2101010,
101 DRM_FORMAT_XBGR2101010,
104 /* Primary plane formats for gen >= 4, except ivb */
105 static const u32 i965_primary_formats[] = {
110 DRM_FORMAT_XRGB2101010,
111 DRM_FORMAT_XBGR2101010,
112 DRM_FORMAT_XBGR16161616F,
115 /* Primary plane formats for vlv/chv */
116 static const u32 vlv_primary_formats[] = {
123 DRM_FORMAT_XRGB2101010,
124 DRM_FORMAT_XBGR2101010,
125 DRM_FORMAT_ARGB2101010,
126 DRM_FORMAT_ABGR2101010,
127 DRM_FORMAT_XBGR16161616F,
130 static const u64 i9xx_format_modifiers[] = {
131 I915_FORMAT_MOD_X_TILED,
132 DRM_FORMAT_MOD_LINEAR,
133 DRM_FORMAT_MOD_INVALID
137 static const u32 intel_cursor_formats[] = {
141 static const u64 cursor_format_modifiers[] = {
142 DRM_FORMAT_MOD_LINEAR,
143 DRM_FORMAT_MOD_INVALID
146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
147 struct intel_crtc_state *pipe_config);
148 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
149 struct intel_crtc_state *pipe_config);
151 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
152 struct drm_i915_gem_object *obj,
153 struct drm_mode_fb_cmd2 *mode_cmd);
154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
157 const struct intel_link_m_n *m_n,
158 const struct intel_link_m_n *m2_n2);
159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
160 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
163 static void vlv_prepare_pll(struct intel_crtc *crtc,
164 const struct intel_crtc_state *pipe_config);
165 static void chv_prepare_pll(struct intel_crtc *crtc,
166 const struct intel_crtc_state *pipe_config);
167 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
168 struct intel_crtc_state *crtc_state);
169 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
170 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
171 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
172 static void intel_modeset_setup_hw_state(struct drm_device *dev,
173 struct drm_modeset_acquire_ctx *ctx);
174 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
179 } dot, vco, n, m, m1, m2, p, p1;
183 int p2_slow, p2_fast;
187 /* returns HPLL frequency in kHz */
188 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
190 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
192 /* Obtain SKU information */
193 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
194 CCK_FUSE_HPLL_FREQ_MASK;
196 return vco_freq[hpll_freq] * 1000;
199 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
200 const char *name, u32 reg, int ref_freq)
205 val = vlv_cck_read(dev_priv, reg);
206 divider = val & CCK_FREQUENCY_VALUES;
208 WARN((val & CCK_FREQUENCY_STATUS) !=
209 (divider << CCK_FREQUENCY_STATUS_SHIFT),
210 "%s change in progress\n", name);
212 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
215 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
216 const char *name, u32 reg)
220 vlv_cck_get(dev_priv);
222 if (dev_priv->hpll_freq == 0)
223 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
225 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
227 vlv_cck_put(dev_priv);
232 static void intel_update_czclk(struct drm_i915_private *dev_priv)
234 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
237 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
238 CCK_CZ_CLOCK_CONTROL);
240 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
243 static inline u32 /* units of 100MHz */
244 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
245 const struct intel_crtc_state *pipe_config)
247 if (HAS_DDI(dev_priv))
248 return pipe_config->port_clock; /* SPLL */
250 return dev_priv->fdi_pll_freq;
253 static const struct intel_limit intel_limits_i8xx_dac = {
254 .dot = { .min = 25000, .max = 350000 },
255 .vco = { .min = 908000, .max = 1512000 },
256 .n = { .min = 2, .max = 16 },
257 .m = { .min = 96, .max = 140 },
258 .m1 = { .min = 18, .max = 26 },
259 .m2 = { .min = 6, .max = 16 },
260 .p = { .min = 4, .max = 128 },
261 .p1 = { .min = 2, .max = 33 },
262 .p2 = { .dot_limit = 165000,
263 .p2_slow = 4, .p2_fast = 2 },
266 static const struct intel_limit intel_limits_i8xx_dvo = {
267 .dot = { .min = 25000, .max = 350000 },
268 .vco = { .min = 908000, .max = 1512000 },
269 .n = { .min = 2, .max = 16 },
270 .m = { .min = 96, .max = 140 },
271 .m1 = { .min = 18, .max = 26 },
272 .m2 = { .min = 6, .max = 16 },
273 .p = { .min = 4, .max = 128 },
274 .p1 = { .min = 2, .max = 33 },
275 .p2 = { .dot_limit = 165000,
276 .p2_slow = 4, .p2_fast = 4 },
279 static const struct intel_limit intel_limits_i8xx_lvds = {
280 .dot = { .min = 25000, .max = 350000 },
281 .vco = { .min = 908000, .max = 1512000 },
282 .n = { .min = 2, .max = 16 },
283 .m = { .min = 96, .max = 140 },
284 .m1 = { .min = 18, .max = 26 },
285 .m2 = { .min = 6, .max = 16 },
286 .p = { .min = 4, .max = 128 },
287 .p1 = { .min = 1, .max = 6 },
288 .p2 = { .dot_limit = 165000,
289 .p2_slow = 14, .p2_fast = 7 },
292 static const struct intel_limit intel_limits_i9xx_sdvo = {
293 .dot = { .min = 20000, .max = 400000 },
294 .vco = { .min = 1400000, .max = 2800000 },
295 .n = { .min = 1, .max = 6 },
296 .m = { .min = 70, .max = 120 },
297 .m1 = { .min = 8, .max = 18 },
298 .m2 = { .min = 3, .max = 7 },
299 .p = { .min = 5, .max = 80 },
300 .p1 = { .min = 1, .max = 8 },
301 .p2 = { .dot_limit = 200000,
302 .p2_slow = 10, .p2_fast = 5 },
305 static const struct intel_limit intel_limits_i9xx_lvds = {
306 .dot = { .min = 20000, .max = 400000 },
307 .vco = { .min = 1400000, .max = 2800000 },
308 .n = { .min = 1, .max = 6 },
309 .m = { .min = 70, .max = 120 },
310 .m1 = { .min = 8, .max = 18 },
311 .m2 = { .min = 3, .max = 7 },
312 .p = { .min = 7, .max = 98 },
313 .p1 = { .min = 1, .max = 8 },
314 .p2 = { .dot_limit = 112000,
315 .p2_slow = 14, .p2_fast = 7 },
319 static const struct intel_limit intel_limits_g4x_sdvo = {
320 .dot = { .min = 25000, .max = 270000 },
321 .vco = { .min = 1750000, .max = 3500000},
322 .n = { .min = 1, .max = 4 },
323 .m = { .min = 104, .max = 138 },
324 .m1 = { .min = 17, .max = 23 },
325 .m2 = { .min = 5, .max = 11 },
326 .p = { .min = 10, .max = 30 },
327 .p1 = { .min = 1, .max = 3},
328 .p2 = { .dot_limit = 270000,
334 static const struct intel_limit intel_limits_g4x_hdmi = {
335 .dot = { .min = 22000, .max = 400000 },
336 .vco = { .min = 1750000, .max = 3500000},
337 .n = { .min = 1, .max = 4 },
338 .m = { .min = 104, .max = 138 },
339 .m1 = { .min = 16, .max = 23 },
340 .m2 = { .min = 5, .max = 11 },
341 .p = { .min = 5, .max = 80 },
342 .p1 = { .min = 1, .max = 8},
343 .p2 = { .dot_limit = 165000,
344 .p2_slow = 10, .p2_fast = 5 },
347 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
348 .dot = { .min = 20000, .max = 115000 },
349 .vco = { .min = 1750000, .max = 3500000 },
350 .n = { .min = 1, .max = 3 },
351 .m = { .min = 104, .max = 138 },
352 .m1 = { .min = 17, .max = 23 },
353 .m2 = { .min = 5, .max = 11 },
354 .p = { .min = 28, .max = 112 },
355 .p1 = { .min = 2, .max = 8 },
356 .p2 = { .dot_limit = 0,
357 .p2_slow = 14, .p2_fast = 14
361 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
362 .dot = { .min = 80000, .max = 224000 },
363 .vco = { .min = 1750000, .max = 3500000 },
364 .n = { .min = 1, .max = 3 },
365 .m = { .min = 104, .max = 138 },
366 .m1 = { .min = 17, .max = 23 },
367 .m2 = { .min = 5, .max = 11 },
368 .p = { .min = 14, .max = 42 },
369 .p1 = { .min = 2, .max = 6 },
370 .p2 = { .dot_limit = 0,
371 .p2_slow = 7, .p2_fast = 7
375 static const struct intel_limit intel_limits_pineview_sdvo = {
376 .dot = { .min = 20000, .max = 400000},
377 .vco = { .min = 1700000, .max = 3500000 },
378 /* Pineview's Ncounter is a ring counter */
379 .n = { .min = 3, .max = 6 },
380 .m = { .min = 2, .max = 256 },
381 /* Pineview only has one combined m divider, which we treat as m2. */
382 .m1 = { .min = 0, .max = 0 },
383 .m2 = { .min = 0, .max = 254 },
384 .p = { .min = 5, .max = 80 },
385 .p1 = { .min = 1, .max = 8 },
386 .p2 = { .dot_limit = 200000,
387 .p2_slow = 10, .p2_fast = 5 },
390 static const struct intel_limit intel_limits_pineview_lvds = {
391 .dot = { .min = 20000, .max = 400000 },
392 .vco = { .min = 1700000, .max = 3500000 },
393 .n = { .min = 3, .max = 6 },
394 .m = { .min = 2, .max = 256 },
395 .m1 = { .min = 0, .max = 0 },
396 .m2 = { .min = 0, .max = 254 },
397 .p = { .min = 7, .max = 112 },
398 .p1 = { .min = 1, .max = 8 },
399 .p2 = { .dot_limit = 112000,
400 .p2_slow = 14, .p2_fast = 14 },
403 /* Ironlake / Sandybridge
405 * We calculate clock using (register_value + 2) for N/M1/M2, so here
406 * the range value for them is (actual_value - 2).
408 static const struct intel_limit intel_limits_ironlake_dac = {
409 .dot = { .min = 25000, .max = 350000 },
410 .vco = { .min = 1760000, .max = 3510000 },
411 .n = { .min = 1, .max = 5 },
412 .m = { .min = 79, .max = 127 },
413 .m1 = { .min = 12, .max = 22 },
414 .m2 = { .min = 5, .max = 9 },
415 .p = { .min = 5, .max = 80 },
416 .p1 = { .min = 1, .max = 8 },
417 .p2 = { .dot_limit = 225000,
418 .p2_slow = 10, .p2_fast = 5 },
421 static const struct intel_limit intel_limits_ironlake_single_lvds = {
422 .dot = { .min = 25000, .max = 350000 },
423 .vco = { .min = 1760000, .max = 3510000 },
424 .n = { .min = 1, .max = 3 },
425 .m = { .min = 79, .max = 118 },
426 .m1 = { .min = 12, .max = 22 },
427 .m2 = { .min = 5, .max = 9 },
428 .p = { .min = 28, .max = 112 },
429 .p1 = { .min = 2, .max = 8 },
430 .p2 = { .dot_limit = 225000,
431 .p2_slow = 14, .p2_fast = 14 },
434 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
435 .dot = { .min = 25000, .max = 350000 },
436 .vco = { .min = 1760000, .max = 3510000 },
437 .n = { .min = 1, .max = 3 },
438 .m = { .min = 79, .max = 127 },
439 .m1 = { .min = 12, .max = 22 },
440 .m2 = { .min = 5, .max = 9 },
441 .p = { .min = 14, .max = 56 },
442 .p1 = { .min = 2, .max = 8 },
443 .p2 = { .dot_limit = 225000,
444 .p2_slow = 7, .p2_fast = 7 },
447 /* LVDS 100mhz refclk limits. */
448 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
449 .dot = { .min = 25000, .max = 350000 },
450 .vco = { .min = 1760000, .max = 3510000 },
451 .n = { .min = 1, .max = 2 },
452 .m = { .min = 79, .max = 126 },
453 .m1 = { .min = 12, .max = 22 },
454 .m2 = { .min = 5, .max = 9 },
455 .p = { .min = 28, .max = 112 },
456 .p1 = { .min = 2, .max = 8 },
457 .p2 = { .dot_limit = 225000,
458 .p2_slow = 14, .p2_fast = 14 },
461 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
462 .dot = { .min = 25000, .max = 350000 },
463 .vco = { .min = 1760000, .max = 3510000 },
464 .n = { .min = 1, .max = 3 },
465 .m = { .min = 79, .max = 126 },
466 .m1 = { .min = 12, .max = 22 },
467 .m2 = { .min = 5, .max = 9 },
468 .p = { .min = 14, .max = 42 },
469 .p1 = { .min = 2, .max = 6 },
470 .p2 = { .dot_limit = 225000,
471 .p2_slow = 7, .p2_fast = 7 },
474 static const struct intel_limit intel_limits_vlv = {
476 * These are the data rate limits (measured in fast clocks)
477 * since those are the strictest limits we have. The fast
478 * clock and actual rate limits are more relaxed, so checking
479 * them would make no difference.
481 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
482 .vco = { .min = 4000000, .max = 6000000 },
483 .n = { .min = 1, .max = 7 },
484 .m1 = { .min = 2, .max = 3 },
485 .m2 = { .min = 11, .max = 156 },
486 .p1 = { .min = 2, .max = 3 },
487 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
490 static const struct intel_limit intel_limits_chv = {
492 * These are the data rate limits (measured in fast clocks)
493 * since those are the strictest limits we have. The fast
494 * clock and actual rate limits are more relaxed, so checking
495 * them would make no difference.
497 .dot = { .min = 25000 * 5, .max = 540000 * 5},
498 .vco = { .min = 4800000, .max = 6480000 },
499 .n = { .min = 1, .max = 1 },
500 .m1 = { .min = 2, .max = 2 },
501 .m2 = { .min = 24 << 22, .max = 175 << 22 },
502 .p1 = { .min = 2, .max = 4 },
503 .p2 = { .p2_slow = 1, .p2_fast = 14 },
506 static const struct intel_limit intel_limits_bxt = {
507 /* FIXME: find real dot limits */
508 .dot = { .min = 0, .max = INT_MAX },
509 .vco = { .min = 4800000, .max = 6700000 },
510 .n = { .min = 1, .max = 1 },
511 .m1 = { .min = 2, .max = 2 },
512 /* FIXME: find real m2 limits */
513 .m2 = { .min = 2 << 22, .max = 255 << 22 },
514 .p1 = { .min = 2, .max = 4 },
515 .p2 = { .p2_slow = 1, .p2_fast = 20 },
518 /* WA Display #0827: Gen9:all */
520 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
523 I915_WRITE(CLKGATE_DIS_PSL(pipe),
524 I915_READ(CLKGATE_DIS_PSL(pipe)) |
525 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
527 I915_WRITE(CLKGATE_DIS_PSL(pipe),
528 I915_READ(CLKGATE_DIS_PSL(pipe)) &
529 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
532 /* Wa_2006604312:icl */
534 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
538 I915_WRITE(CLKGATE_DIS_PSL(pipe),
539 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
541 I915_WRITE(CLKGATE_DIS_PSL(pipe),
542 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
546 needs_modeset(const struct intel_crtc_state *state)
548 return drm_atomic_crtc_needs_modeset(&state->uapi);
552 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
554 return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
555 crtc_state->sync_mode_slaves_mask);
559 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
561 return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
562 crtc_state->sync_mode_slaves_mask);
566 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
567 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
568 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
569 * The helpers' return value is the rate of the clock that is fed to the
570 * display engine's pipe which can be the above fast dot clock rate or a
571 * divided-down version of it.
573 /* m1 is reserved as 0 in Pineview, n is a ring counter */
574 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
576 clock->m = clock->m2 + 2;
577 clock->p = clock->p1 * clock->p2;
578 if (WARN_ON(clock->n == 0 || clock->p == 0))
580 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
581 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
586 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
588 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
591 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
593 clock->m = i9xx_dpll_compute_m(clock);
594 clock->p = clock->p1 * clock->p2;
595 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
597 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
598 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
603 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
605 clock->m = clock->m1 * clock->m2;
606 clock->p = clock->p1 * clock->p2;
607 if (WARN_ON(clock->n == 0 || clock->p == 0))
609 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
610 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
612 return clock->dot / 5;
615 int chv_calc_dpll_params(int refclk, struct dpll *clock)
617 clock->m = clock->m1 * clock->m2;
618 clock->p = clock->p1 * clock->p2;
619 if (WARN_ON(clock->n == 0 || clock->p == 0))
621 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
623 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
625 return clock->dot / 5;
628 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
631 * Returns whether the given set of divisors are valid for a given refclk with
632 * the given connectors.
634 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
635 const struct intel_limit *limit,
636 const struct dpll *clock)
638 if (clock->n < limit->n.min || limit->n.max < clock->n)
639 INTELPllInvalid("n out of range\n");
640 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
641 INTELPllInvalid("p1 out of range\n");
642 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
643 INTELPllInvalid("m2 out of range\n");
644 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
645 INTELPllInvalid("m1 out of range\n");
647 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
648 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
649 if (clock->m1 <= clock->m2)
650 INTELPllInvalid("m1 <= m2\n");
652 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
653 !IS_GEN9_LP(dev_priv)) {
654 if (clock->p < limit->p.min || limit->p.max < clock->p)
655 INTELPllInvalid("p out of range\n");
656 if (clock->m < limit->m.min || limit->m.max < clock->m)
657 INTELPllInvalid("m out of range\n");
660 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
661 INTELPllInvalid("vco out of range\n");
662 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
663 * connector, etc., rather than just a single range.
665 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
666 INTELPllInvalid("dot out of range\n");
672 i9xx_select_p2_div(const struct intel_limit *limit,
673 const struct intel_crtc_state *crtc_state,
676 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
678 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
680 * For LVDS just rely on its current settings for dual-channel.
681 * We haven't figured out how to reliably set up different
682 * single/dual channel state, if we even can.
684 if (intel_is_dual_link_lvds(dev_priv))
685 return limit->p2.p2_fast;
687 return limit->p2.p2_slow;
689 if (target < limit->p2.dot_limit)
690 return limit->p2.p2_slow;
692 return limit->p2.p2_fast;
697 * Returns a set of divisors for the desired target clock with the given
698 * refclk, or FALSE. The returned values represent the clock equation:
699 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
701 * Target and reference clocks are specified in kHz.
703 * If match_clock is provided, then best_clock P divider must match the P
704 * divider from @match_clock used for LVDS downclocking.
707 i9xx_find_best_dpll(const struct intel_limit *limit,
708 struct intel_crtc_state *crtc_state,
709 int target, int refclk, struct dpll *match_clock,
710 struct dpll *best_clock)
712 struct drm_device *dev = crtc_state->uapi.crtc->dev;
716 memset(best_clock, 0, sizeof(*best_clock));
718 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
720 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
722 for (clock.m2 = limit->m2.min;
723 clock.m2 <= limit->m2.max; clock.m2++) {
724 if (clock.m2 >= clock.m1)
726 for (clock.n = limit->n.min;
727 clock.n <= limit->n.max; clock.n++) {
728 for (clock.p1 = limit->p1.min;
729 clock.p1 <= limit->p1.max; clock.p1++) {
732 i9xx_calc_dpll_params(refclk, &clock);
733 if (!intel_PLL_is_valid(to_i915(dev),
738 clock.p != match_clock->p)
741 this_err = abs(clock.dot - target);
742 if (this_err < err) {
751 return (err != target);
755 * Returns a set of divisors for the desired target clock with the given
756 * refclk, or FALSE. The returned values represent the clock equation:
757 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
759 * Target and reference clocks are specified in kHz.
761 * If match_clock is provided, then best_clock P divider must match the P
762 * divider from @match_clock used for LVDS downclocking.
765 pnv_find_best_dpll(const struct intel_limit *limit,
766 struct intel_crtc_state *crtc_state,
767 int target, int refclk, struct dpll *match_clock,
768 struct dpll *best_clock)
770 struct drm_device *dev = crtc_state->uapi.crtc->dev;
774 memset(best_clock, 0, sizeof(*best_clock));
776 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
778 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
780 for (clock.m2 = limit->m2.min;
781 clock.m2 <= limit->m2.max; clock.m2++) {
782 for (clock.n = limit->n.min;
783 clock.n <= limit->n.max; clock.n++) {
784 for (clock.p1 = limit->p1.min;
785 clock.p1 <= limit->p1.max; clock.p1++) {
788 pnv_calc_dpll_params(refclk, &clock);
789 if (!intel_PLL_is_valid(to_i915(dev),
794 clock.p != match_clock->p)
797 this_err = abs(clock.dot - target);
798 if (this_err < err) {
807 return (err != target);
811 * Returns a set of divisors for the desired target clock with the given
812 * refclk, or FALSE. The returned values represent the clock equation:
813 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
815 * Target and reference clocks are specified in kHz.
817 * If match_clock is provided, then best_clock P divider must match the P
818 * divider from @match_clock used for LVDS downclocking.
821 g4x_find_best_dpll(const struct intel_limit *limit,
822 struct intel_crtc_state *crtc_state,
823 int target, int refclk, struct dpll *match_clock,
824 struct dpll *best_clock)
826 struct drm_device *dev = crtc_state->uapi.crtc->dev;
830 /* approximately equals target * 0.00585 */
831 int err_most = (target >> 8) + (target >> 9);
833 memset(best_clock, 0, sizeof(*best_clock));
835 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
837 max_n = limit->n.max;
838 /* based on hardware requirement, prefer smaller n to precision */
839 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
840 /* based on hardware requirement, prefere larger m1,m2 */
841 for (clock.m1 = limit->m1.max;
842 clock.m1 >= limit->m1.min; clock.m1--) {
843 for (clock.m2 = limit->m2.max;
844 clock.m2 >= limit->m2.min; clock.m2--) {
845 for (clock.p1 = limit->p1.max;
846 clock.p1 >= limit->p1.min; clock.p1--) {
849 i9xx_calc_dpll_params(refclk, &clock);
850 if (!intel_PLL_is_valid(to_i915(dev),
855 this_err = abs(clock.dot - target);
856 if (this_err < err_most) {
870 * Check if the calculated PLL configuration is more optimal compared to the
871 * best configuration and error found so far. Return the calculated error.
873 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
874 const struct dpll *calculated_clock,
875 const struct dpll *best_clock,
876 unsigned int best_error_ppm,
877 unsigned int *error_ppm)
880 * For CHV ignore the error and consider only the P value.
881 * Prefer a bigger P value based on HW requirements.
883 if (IS_CHERRYVIEW(to_i915(dev))) {
886 return calculated_clock->p > best_clock->p;
889 if (WARN_ON_ONCE(!target_freq))
892 *error_ppm = div_u64(1000000ULL *
893 abs(target_freq - calculated_clock->dot),
896 * Prefer a better P value over a better (smaller) error if the error
897 * is small. Ensure this preference for future configurations too by
898 * setting the error to 0.
900 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
906 return *error_ppm + 10 < best_error_ppm;
910 * Returns a set of divisors for the desired target clock with the given
911 * refclk, or FALSE. The returned values represent the clock equation:
912 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
915 vlv_find_best_dpll(const struct intel_limit *limit,
916 struct intel_crtc_state *crtc_state,
917 int target, int refclk, struct dpll *match_clock,
918 struct dpll *best_clock)
920 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
921 struct drm_device *dev = crtc->base.dev;
923 unsigned int bestppm = 1000000;
924 /* min update 19.2 MHz */
925 int max_n = min(limit->n.max, refclk / 19200);
928 target *= 5; /* fast clock */
930 memset(best_clock, 0, sizeof(*best_clock));
932 /* based on hardware requirement, prefer smaller n to precision */
933 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
934 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
935 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
936 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
937 clock.p = clock.p1 * clock.p2;
938 /* based on hardware requirement, prefer bigger m1,m2 values */
939 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
942 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
945 vlv_calc_dpll_params(refclk, &clock);
947 if (!intel_PLL_is_valid(to_i915(dev),
952 if (!vlv_PLL_is_optimal(dev, target,
970 * Returns a set of divisors for the desired target clock with the given
971 * refclk, or FALSE. The returned values represent the clock equation:
972 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
975 chv_find_best_dpll(const struct intel_limit *limit,
976 struct intel_crtc_state *crtc_state,
977 int target, int refclk, struct dpll *match_clock,
978 struct dpll *best_clock)
980 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
981 struct drm_device *dev = crtc->base.dev;
982 unsigned int best_error_ppm;
987 memset(best_clock, 0, sizeof(*best_clock));
988 best_error_ppm = 1000000;
991 * Based on hardware doc, the n always set to 1, and m1 always
992 * set to 2. If requires to support 200Mhz refclk, we need to
993 * revisit this because n may not 1 anymore.
995 clock.n = 1, clock.m1 = 2;
996 target *= 5; /* fast clock */
998 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
999 for (clock.p2 = limit->p2.p2_fast;
1000 clock.p2 >= limit->p2.p2_slow;
1001 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1002 unsigned int error_ppm;
1004 clock.p = clock.p1 * clock.p2;
1006 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1009 if (m2 > INT_MAX/clock.m1)
1014 chv_calc_dpll_params(refclk, &clock);
1016 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1019 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1020 best_error_ppm, &error_ppm))
1023 *best_clock = clock;
1024 best_error_ppm = error_ppm;
1032 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1033 struct dpll *best_clock)
1035 int refclk = 100000;
1036 const struct intel_limit *limit = &intel_limits_bxt;
1038 return chv_find_best_dpll(limit, crtc_state,
1039 crtc_state->port_clock, refclk,
1043 bool intel_crtc_active(struct intel_crtc *crtc)
1045 /* Be paranoid as we can arrive here with only partial
1046 * state retrieved from the hardware during setup.
1048 * We can ditch the adjusted_mode.crtc_clock check as soon
1049 * as Haswell has gained clock readout/fastboot support.
1051 * We can ditch the crtc->primary->state->fb check as soon as we can
1052 * properly reconstruct framebuffers.
1054 * FIXME: The intel_crtc->active here should be switched to
1055 * crtc->state->active once we have proper CRTC states wired up
1058 return crtc->active && crtc->base.primary->state->fb &&
1059 crtc->config->hw.adjusted_mode.crtc_clock;
1062 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1065 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1067 return crtc->config->cpu_transcoder;
1070 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1073 i915_reg_t reg = PIPEDSL(pipe);
1077 if (IS_GEN(dev_priv, 2))
1078 line_mask = DSL_LINEMASK_GEN2;
1080 line_mask = DSL_LINEMASK_GEN3;
1082 line1 = I915_READ(reg) & line_mask;
1084 line2 = I915_READ(reg) & line_mask;
1086 return line1 != line2;
1089 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1091 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1092 enum pipe pipe = crtc->pipe;
1094 /* Wait for the display line to settle/start moving */
1095 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1096 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1097 pipe_name(pipe), onoff(state));
1100 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1102 wait_for_pipe_scanline_moving(crtc, false);
1105 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1107 wait_for_pipe_scanline_moving(crtc, true);
1111 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1113 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1114 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1116 if (INTEL_GEN(dev_priv) >= 4) {
1117 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1118 i915_reg_t reg = PIPECONF(cpu_transcoder);
1120 /* Wait for the Pipe State to go off */
1121 if (intel_de_wait_for_clear(dev_priv, reg,
1122 I965_PIPECONF_ACTIVE, 100))
1123 WARN(1, "pipe_off wait timed out\n");
1125 intel_wait_for_pipe_scanline_stopped(crtc);
1129 /* Only for pre-ILK configs */
1130 void assert_pll(struct drm_i915_private *dev_priv,
1131 enum pipe pipe, bool state)
1136 val = I915_READ(DPLL(pipe));
1137 cur_state = !!(val & DPLL_VCO_ENABLE);
1138 I915_STATE_WARN(cur_state != state,
1139 "PLL state assertion failure (expected %s, current %s)\n",
1140 onoff(state), onoff(cur_state));
1143 /* XXX: the dsi pll is shared between MIPI DSI ports */
1144 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1149 vlv_cck_get(dev_priv);
1150 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1151 vlv_cck_put(dev_priv);
1153 cur_state = val & DSI_PLL_VCO_EN;
1154 I915_STATE_WARN(cur_state != state,
1155 "DSI PLL state assertion failure (expected %s, current %s)\n",
1156 onoff(state), onoff(cur_state));
1159 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1160 enum pipe pipe, bool state)
1163 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1166 if (HAS_DDI(dev_priv)) {
1167 /* DDI does not have a specific FDI_TX register */
1168 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1169 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1171 u32 val = I915_READ(FDI_TX_CTL(pipe));
1172 cur_state = !!(val & FDI_TX_ENABLE);
1174 I915_STATE_WARN(cur_state != state,
1175 "FDI TX state assertion failure (expected %s, current %s)\n",
1176 onoff(state), onoff(cur_state));
1178 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1179 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1181 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1182 enum pipe pipe, bool state)
1187 val = I915_READ(FDI_RX_CTL(pipe));
1188 cur_state = !!(val & FDI_RX_ENABLE);
1189 I915_STATE_WARN(cur_state != state,
1190 "FDI RX state assertion failure (expected %s, current %s)\n",
1191 onoff(state), onoff(cur_state));
1193 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1194 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1196 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1201 /* ILK FDI PLL is always enabled */
1202 if (IS_GEN(dev_priv, 5))
1205 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1206 if (HAS_DDI(dev_priv))
1209 val = I915_READ(FDI_TX_CTL(pipe));
1210 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1213 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1214 enum pipe pipe, bool state)
1219 val = I915_READ(FDI_RX_CTL(pipe));
1220 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1221 I915_STATE_WARN(cur_state != state,
1222 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1223 onoff(state), onoff(cur_state));
1226 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1230 enum pipe panel_pipe = INVALID_PIPE;
1233 if (WARN_ON(HAS_DDI(dev_priv)))
1236 if (HAS_PCH_SPLIT(dev_priv)) {
1239 pp_reg = PP_CONTROL(0);
1240 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1243 case PANEL_PORT_SELECT_LVDS:
1244 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1246 case PANEL_PORT_SELECT_DPA:
1247 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1249 case PANEL_PORT_SELECT_DPC:
1250 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1252 case PANEL_PORT_SELECT_DPD:
1253 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1256 MISSING_CASE(port_sel);
1259 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1260 /* presumably write lock depends on pipe, not port select */
1261 pp_reg = PP_CONTROL(pipe);
1266 pp_reg = PP_CONTROL(0);
1267 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1269 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1270 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1273 val = I915_READ(pp_reg);
1274 if (!(val & PANEL_POWER_ON) ||
1275 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1278 I915_STATE_WARN(panel_pipe == pipe && locked,
1279 "panel assertion failure, pipe %c regs locked\n",
1283 void assert_pipe(struct drm_i915_private *dev_priv,
1284 enum pipe pipe, bool state)
1287 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1289 enum intel_display_power_domain power_domain;
1290 intel_wakeref_t wakeref;
1292 /* we keep both pipes enabled on 830 */
1293 if (IS_I830(dev_priv))
1296 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1297 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1299 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1300 cur_state = !!(val & PIPECONF_ENABLE);
1302 intel_display_power_put(dev_priv, power_domain, wakeref);
1307 I915_STATE_WARN(cur_state != state,
1308 "pipe %c assertion failure (expected %s, current %s)\n",
1309 pipe_name(pipe), onoff(state), onoff(cur_state));
1312 static void assert_plane(struct intel_plane *plane, bool state)
1317 cur_state = plane->get_hw_state(plane, &pipe);
1319 I915_STATE_WARN(cur_state != state,
1320 "%s assertion failure (expected %s, current %s)\n",
1321 plane->base.name, onoff(state), onoff(cur_state));
1324 #define assert_plane_enabled(p) assert_plane(p, true)
1325 #define assert_plane_disabled(p) assert_plane(p, false)
1327 static void assert_planes_disabled(struct intel_crtc *crtc)
1329 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1330 struct intel_plane *plane;
1332 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1333 assert_plane_disabled(plane);
1336 static void assert_vblank_disabled(struct drm_crtc *crtc)
1338 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1339 drm_crtc_vblank_put(crtc);
1342 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1348 val = I915_READ(PCH_TRANSCONF(pipe));
1349 enabled = !!(val & TRANS_ENABLE);
1350 I915_STATE_WARN(enabled,
1351 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1355 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1356 enum pipe pipe, enum port port,
1359 enum pipe port_pipe;
1362 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1364 I915_STATE_WARN(state && port_pipe == pipe,
1365 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1366 port_name(port), pipe_name(pipe));
1368 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1369 "IBX PCH DP %c still using transcoder B\n",
1373 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1374 enum pipe pipe, enum port port,
1375 i915_reg_t hdmi_reg)
1377 enum pipe port_pipe;
1380 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1382 I915_STATE_WARN(state && port_pipe == pipe,
1383 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1384 port_name(port), pipe_name(pipe));
1386 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1387 "IBX PCH HDMI %c still using transcoder B\n",
1391 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1394 enum pipe port_pipe;
1396 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1397 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1398 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1400 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1402 "PCH VGA enabled on transcoder %c, should be disabled\n",
1405 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1407 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1410 /* PCH SDVOB multiplex with HDMIB */
1411 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1412 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1413 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1416 static void _vlv_enable_pll(struct intel_crtc *crtc,
1417 const struct intel_crtc_state *pipe_config)
1419 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1420 enum pipe pipe = crtc->pipe;
1422 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1423 POSTING_READ(DPLL(pipe));
1426 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1427 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1430 static void vlv_enable_pll(struct intel_crtc *crtc,
1431 const struct intel_crtc_state *pipe_config)
1433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1434 enum pipe pipe = crtc->pipe;
1436 assert_pipe_disabled(dev_priv, pipe);
1438 /* PLL is protected by panel, make sure we can write it */
1439 assert_panel_unlocked(dev_priv, pipe);
1441 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1442 _vlv_enable_pll(crtc, pipe_config);
1444 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1445 POSTING_READ(DPLL_MD(pipe));
1449 static void _chv_enable_pll(struct intel_crtc *crtc,
1450 const struct intel_crtc_state *pipe_config)
1452 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1453 enum pipe pipe = crtc->pipe;
1454 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1457 vlv_dpio_get(dev_priv);
1459 /* Enable back the 10bit clock to display controller */
1460 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1461 tmp |= DPIO_DCLKP_EN;
1462 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1464 vlv_dpio_put(dev_priv);
1467 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1472 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1474 /* Check PLL is locked */
1475 if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1476 DRM_ERROR("PLL %d failed to lock\n", pipe);
1479 static void chv_enable_pll(struct intel_crtc *crtc,
1480 const struct intel_crtc_state *pipe_config)
1482 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1483 enum pipe pipe = crtc->pipe;
1485 assert_pipe_disabled(dev_priv, pipe);
1487 /* PLL is protected by panel, make sure we can write it */
1488 assert_panel_unlocked(dev_priv, pipe);
1490 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1491 _chv_enable_pll(crtc, pipe_config);
1493 if (pipe != PIPE_A) {
1495 * WaPixelRepeatModeFixForC0:chv
1497 * DPLLCMD is AWOL. Use chicken bits to propagate
1498 * the value from DPLLBMD to either pipe B or C.
1500 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1501 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1502 I915_WRITE(CBR4_VLV, 0);
1503 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1506 * DPLLB VGA mode also seems to cause problems.
1507 * We should always have it disabled.
1509 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1511 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1512 POSTING_READ(DPLL_MD(pipe));
1516 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1518 if (IS_I830(dev_priv))
1521 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1524 static void i9xx_enable_pll(struct intel_crtc *crtc,
1525 const struct intel_crtc_state *crtc_state)
1527 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1528 i915_reg_t reg = DPLL(crtc->pipe);
1529 u32 dpll = crtc_state->dpll_hw_state.dpll;
1532 assert_pipe_disabled(dev_priv, crtc->pipe);
1534 /* PLL is protected by panel, make sure we can write it */
1535 if (i9xx_has_pps(dev_priv))
1536 assert_panel_unlocked(dev_priv, crtc->pipe);
1539 * Apparently we need to have VGA mode enabled prior to changing
1540 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1541 * dividers, even though the register value does change.
1543 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1544 I915_WRITE(reg, dpll);
1546 /* Wait for the clocks to stabilize. */
1550 if (INTEL_GEN(dev_priv) >= 4) {
1551 I915_WRITE(DPLL_MD(crtc->pipe),
1552 crtc_state->dpll_hw_state.dpll_md);
1554 /* The pixel multiplier can only be updated once the
1555 * DPLL is enabled and the clocks are stable.
1557 * So write it again.
1559 I915_WRITE(reg, dpll);
1562 /* We do this three times for luck */
1563 for (i = 0; i < 3; i++) {
1564 I915_WRITE(reg, dpll);
1566 udelay(150); /* wait for warmup */
1570 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1572 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1573 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1574 enum pipe pipe = crtc->pipe;
1576 /* Don't disable pipe or pipe PLLs if needed */
1577 if (IS_I830(dev_priv))
1580 /* Make sure the pipe isn't still relying on us */
1581 assert_pipe_disabled(dev_priv, pipe);
1583 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1584 POSTING_READ(DPLL(pipe));
1587 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1591 /* Make sure the pipe isn't still relying on us */
1592 assert_pipe_disabled(dev_priv, pipe);
1594 val = DPLL_INTEGRATED_REF_CLK_VLV |
1595 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1597 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1599 I915_WRITE(DPLL(pipe), val);
1600 POSTING_READ(DPLL(pipe));
1603 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1605 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1608 /* Make sure the pipe isn't still relying on us */
1609 assert_pipe_disabled(dev_priv, pipe);
1611 val = DPLL_SSC_REF_CLK_CHV |
1612 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1614 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1616 I915_WRITE(DPLL(pipe), val);
1617 POSTING_READ(DPLL(pipe));
1619 vlv_dpio_get(dev_priv);
1621 /* Disable 10bit clock to display controller */
1622 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1623 val &= ~DPIO_DCLKP_EN;
1624 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1626 vlv_dpio_put(dev_priv);
1629 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1630 struct intel_digital_port *dport,
1631 unsigned int expected_mask)
1634 i915_reg_t dpll_reg;
1636 switch (dport->base.port) {
1638 port_mask = DPLL_PORTB_READY_MASK;
1642 port_mask = DPLL_PORTC_READY_MASK;
1644 expected_mask <<= 4;
1647 port_mask = DPLL_PORTD_READY_MASK;
1648 dpll_reg = DPIO_PHY_STATUS;
1654 if (intel_de_wait_for_register(dev_priv, dpll_reg,
1655 port_mask, expected_mask, 1000))
1656 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1657 dport->base.base.base.id, dport->base.base.name,
1658 I915_READ(dpll_reg) & port_mask, expected_mask);
1661 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1663 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1664 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1665 enum pipe pipe = crtc->pipe;
1667 u32 val, pipeconf_val;
1669 /* Make sure PCH DPLL is enabled */
1670 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1672 /* FDI must be feeding us bits for PCH ports */
1673 assert_fdi_tx_enabled(dev_priv, pipe);
1674 assert_fdi_rx_enabled(dev_priv, pipe);
1676 if (HAS_PCH_CPT(dev_priv)) {
1677 reg = TRANS_CHICKEN2(pipe);
1678 val = I915_READ(reg);
1680 * Workaround: Set the timing override bit
1681 * before enabling the pch transcoder.
1683 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1684 /* Configure frame start delay to match the CPU */
1685 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1686 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1687 I915_WRITE(reg, val);
1690 reg = PCH_TRANSCONF(pipe);
1691 val = I915_READ(reg);
1692 pipeconf_val = I915_READ(PIPECONF(pipe));
1694 if (HAS_PCH_IBX(dev_priv)) {
1695 /* Configure frame start delay to match the CPU */
1696 val &= ~TRANS_FRAME_START_DELAY_MASK;
1697 val |= TRANS_FRAME_START_DELAY(0);
1700 * Make the BPC in transcoder be consistent with
1701 * that in pipeconf reg. For HDMI we must use 8bpc
1702 * here for both 8bpc and 12bpc.
1704 val &= ~PIPECONF_BPC_MASK;
1705 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1706 val |= PIPECONF_8BPC;
1708 val |= pipeconf_val & PIPECONF_BPC_MASK;
1711 val &= ~TRANS_INTERLACE_MASK;
1712 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1713 if (HAS_PCH_IBX(dev_priv) &&
1714 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1715 val |= TRANS_LEGACY_INTERLACED_ILK;
1717 val |= TRANS_INTERLACED;
1719 val |= TRANS_PROGRESSIVE;
1722 I915_WRITE(reg, val | TRANS_ENABLE);
1723 if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1724 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1727 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1728 enum transcoder cpu_transcoder)
1730 u32 val, pipeconf_val;
1732 /* FDI must be feeding us bits for PCH ports */
1733 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1734 assert_fdi_rx_enabled(dev_priv, PIPE_A);
1736 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1737 /* Workaround: set timing override bit. */
1738 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1739 /* Configure frame start delay to match the CPU */
1740 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1741 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1742 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1745 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1747 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1748 PIPECONF_INTERLACED_ILK)
1749 val |= TRANS_INTERLACED;
1751 val |= TRANS_PROGRESSIVE;
1753 I915_WRITE(LPT_TRANSCONF, val);
1754 if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1755 TRANS_STATE_ENABLE, 100))
1756 DRM_ERROR("Failed to enable PCH transcoder\n");
1759 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1765 /* FDI relies on the transcoder */
1766 assert_fdi_tx_disabled(dev_priv, pipe);
1767 assert_fdi_rx_disabled(dev_priv, pipe);
1769 /* Ports must be off as well */
1770 assert_pch_ports_disabled(dev_priv, pipe);
1772 reg = PCH_TRANSCONF(pipe);
1773 val = I915_READ(reg);
1774 val &= ~TRANS_ENABLE;
1775 I915_WRITE(reg, val);
1776 /* wait for PCH transcoder off, transcoder state */
1777 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1778 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1780 if (HAS_PCH_CPT(dev_priv)) {
1781 /* Workaround: Clear the timing override chicken bit again. */
1782 reg = TRANS_CHICKEN2(pipe);
1783 val = I915_READ(reg);
1784 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1785 I915_WRITE(reg, val);
1789 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1793 val = I915_READ(LPT_TRANSCONF);
1794 val &= ~TRANS_ENABLE;
1795 I915_WRITE(LPT_TRANSCONF, val);
1796 /* wait for PCH transcoder off, transcoder state */
1797 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1798 TRANS_STATE_ENABLE, 50))
1799 DRM_ERROR("Failed to disable PCH transcoder\n");
1801 /* Workaround: clear timing override bit. */
1802 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1803 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1804 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1807 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1809 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1811 if (HAS_PCH_LPT(dev_priv))
1817 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1819 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1822 * On i965gm the hardware frame counter reads
1823 * zero when the TV encoder is enabled :(
1825 if (IS_I965GM(dev_priv) &&
1826 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1829 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1830 return 0xffffffff; /* full 32 bit counter */
1831 else if (INTEL_GEN(dev_priv) >= 3)
1832 return 0xffffff; /* only 24 bits of frame count */
1834 return 0; /* Gen2 doesn't have a hardware frame counter */
1837 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1839 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1841 drm_crtc_set_max_vblank_count(&crtc->base,
1842 intel_crtc_max_vblank_count(crtc_state));
1843 drm_crtc_vblank_on(&crtc->base);
1846 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1848 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1849 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1850 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1851 enum pipe pipe = crtc->pipe;
1855 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1857 assert_planes_disabled(crtc);
1860 * A pipe without a PLL won't actually be able to drive bits from
1861 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1864 if (HAS_GMCH(dev_priv)) {
1865 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1866 assert_dsi_pll_enabled(dev_priv);
1868 assert_pll_enabled(dev_priv, pipe);
1870 if (new_crtc_state->has_pch_encoder) {
1871 /* if driving the PCH, we need FDI enabled */
1872 assert_fdi_rx_pll_enabled(dev_priv,
1873 intel_crtc_pch_transcoder(crtc));
1874 assert_fdi_tx_pll_enabled(dev_priv,
1875 (enum pipe) cpu_transcoder);
1877 /* FIXME: assert CPU port conditions for SNB+ */
1880 trace_intel_pipe_enable(crtc);
1882 reg = PIPECONF(cpu_transcoder);
1883 val = I915_READ(reg);
1884 if (val & PIPECONF_ENABLE) {
1885 /* we keep both pipes enabled on 830 */
1886 WARN_ON(!IS_I830(dev_priv));
1890 I915_WRITE(reg, val | PIPECONF_ENABLE);
1894 * Until the pipe starts PIPEDSL reads will return a stale value,
1895 * which causes an apparent vblank timestamp jump when PIPEDSL
1896 * resets to its proper value. That also messes up the frame count
1897 * when it's derived from the timestamps. So let's wait for the
1898 * pipe to start properly before we call drm_crtc_vblank_on()
1900 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1901 intel_wait_for_pipe_scanline_moving(crtc);
1904 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1906 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1907 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1908 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1909 enum pipe pipe = crtc->pipe;
1913 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1916 * Make sure planes won't keep trying to pump pixels to us,
1917 * or we might hang the display.
1919 assert_planes_disabled(crtc);
1921 trace_intel_pipe_disable(crtc);
1923 reg = PIPECONF(cpu_transcoder);
1924 val = I915_READ(reg);
1925 if ((val & PIPECONF_ENABLE) == 0)
1929 * Double wide has implications for planes
1930 * so best keep it disabled when not needed.
1932 if (old_crtc_state->double_wide)
1933 val &= ~PIPECONF_DOUBLE_WIDE;
1935 /* Don't disable pipe or pipe PLLs if needed */
1936 if (!IS_I830(dev_priv))
1937 val &= ~PIPECONF_ENABLE;
1939 I915_WRITE(reg, val);
1940 if ((val & PIPECONF_ENABLE) == 0)
1941 intel_wait_for_pipe_off(old_crtc_state);
1944 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1946 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1950 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1952 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1953 unsigned int cpp = fb->format->cpp[color_plane];
1955 switch (fb->modifier) {
1956 case DRM_FORMAT_MOD_LINEAR:
1957 return intel_tile_size(dev_priv);
1958 case I915_FORMAT_MOD_X_TILED:
1959 if (IS_GEN(dev_priv, 2))
1963 case I915_FORMAT_MOD_Y_TILED_CCS:
1964 if (color_plane == 1)
1967 case I915_FORMAT_MOD_Y_TILED:
1968 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1972 case I915_FORMAT_MOD_Yf_TILED_CCS:
1973 if (color_plane == 1)
1976 case I915_FORMAT_MOD_Yf_TILED:
1992 MISSING_CASE(fb->modifier);
1998 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2000 return intel_tile_size(to_i915(fb->dev)) /
2001 intel_tile_width_bytes(fb, color_plane);
2004 /* Return the tile dimensions in pixel units */
2005 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2006 unsigned int *tile_width,
2007 unsigned int *tile_height)
2009 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2010 unsigned int cpp = fb->format->cpp[color_plane];
2012 *tile_width = tile_width_bytes / cpp;
2013 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
2017 intel_fb_align_height(const struct drm_framebuffer *fb,
2018 int color_plane, unsigned int height)
2020 unsigned int tile_height = intel_tile_height(fb, color_plane);
2022 return ALIGN(height, tile_height);
2025 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2027 unsigned int size = 0;
2030 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2031 size += rot_info->plane[i].width * rot_info->plane[i].height;
2036 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2038 unsigned int size = 0;
2041 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2042 size += rem_info->plane[i].width * rem_info->plane[i].height;
2048 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2049 const struct drm_framebuffer *fb,
2050 unsigned int rotation)
2052 view->type = I915_GGTT_VIEW_NORMAL;
2053 if (drm_rotation_90_or_270(rotation)) {
2054 view->type = I915_GGTT_VIEW_ROTATED;
2055 view->rotated = to_intel_framebuffer(fb)->rot_info;
2059 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2061 if (IS_I830(dev_priv))
2063 else if (IS_I85X(dev_priv))
2065 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2071 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2073 if (INTEL_GEN(dev_priv) >= 9)
2075 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2076 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2078 else if (INTEL_GEN(dev_priv) >= 4)
2084 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2087 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2089 /* AUX_DIST needs only 4K alignment */
2090 if (color_plane == 1)
2093 switch (fb->modifier) {
2094 case DRM_FORMAT_MOD_LINEAR:
2095 return intel_linear_alignment(dev_priv);
2096 case I915_FORMAT_MOD_X_TILED:
2097 if (INTEL_GEN(dev_priv) >= 9)
2100 case I915_FORMAT_MOD_Y_TILED_CCS:
2101 case I915_FORMAT_MOD_Yf_TILED_CCS:
2102 case I915_FORMAT_MOD_Y_TILED:
2103 case I915_FORMAT_MOD_Yf_TILED:
2104 return 1 * 1024 * 1024;
2106 MISSING_CASE(fb->modifier);
2111 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2113 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2114 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2116 return INTEL_GEN(dev_priv) < 4 ||
2118 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2122 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2123 const struct i915_ggtt_view *view,
2125 unsigned long *out_flags)
2127 struct drm_device *dev = fb->dev;
2128 struct drm_i915_private *dev_priv = to_i915(dev);
2129 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2130 intel_wakeref_t wakeref;
2131 struct i915_vma *vma;
2132 unsigned int pinctl;
2135 if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2136 return ERR_PTR(-EINVAL);
2138 alignment = intel_surf_alignment(fb, 0);
2140 /* Note that the w/a also requires 64 PTE of padding following the
2141 * bo. We currently fill all unused PTE with the shadow page and so
2142 * we should always have valid PTE following the scanout preventing
2145 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2146 alignment = 256 * 1024;
2149 * Global gtt pte registers are special registers which actually forward
2150 * writes to a chunk of system memory. Which means that there is no risk
2151 * that the register values disappear as soon as we call
2152 * intel_runtime_pm_put(), so it is correct to wrap only the
2153 * pin/unpin/fence and not more.
2155 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2156 i915_gem_object_lock(obj);
2158 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2162 /* Valleyview is definitely limited to scanning out the first
2163 * 512MiB. Lets presume this behaviour was inherited from the
2164 * g4x display engine and that all earlier gen are similarly
2165 * limited. Testing suggests that it is a little more
2166 * complicated than this. For example, Cherryview appears quite
2167 * happy to scanout from anywhere within its global aperture.
2169 if (HAS_GMCH(dev_priv))
2170 pinctl |= PIN_MAPPABLE;
2172 vma = i915_gem_object_pin_to_display_plane(obj,
2173 alignment, view, pinctl);
2177 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2180 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2181 * fence, whereas 965+ only requires a fence if using
2182 * framebuffer compression. For simplicity, we always, when
2183 * possible, install a fence as the cost is not that onerous.
2185 * If we fail to fence the tiled scanout, then either the
2186 * modeset will reject the change (which is highly unlikely as
2187 * the affected systems, all but one, do not have unmappable
2188 * space) or we will not be able to enable full powersaving
2189 * techniques (also likely not to apply due to various limits
2190 * FBC and the like impose on the size of the buffer, which
2191 * presumably we violated anyway with this unmappable buffer).
2192 * Anyway, it is presumably better to stumble onwards with
2193 * something and try to run the system in a "less than optimal"
2194 * mode that matches the user configuration.
2196 ret = i915_vma_pin_fence(vma);
2197 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2198 i915_gem_object_unpin_from_display_plane(vma);
2203 if (ret == 0 && vma->fence)
2204 *out_flags |= PLANE_HAS_FENCE;
2209 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2211 i915_gem_object_unlock(obj);
2212 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2216 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2218 i915_gem_object_lock(vma->obj);
2219 if (flags & PLANE_HAS_FENCE)
2220 i915_vma_unpin_fence(vma);
2221 i915_gem_object_unpin_from_display_plane(vma);
2222 i915_gem_object_unlock(vma->obj);
2227 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2228 unsigned int rotation)
2230 if (drm_rotation_90_or_270(rotation))
2231 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2233 return fb->pitches[color_plane];
2237 * Convert the x/y offsets into a linear offset.
2238 * Only valid with 0/180 degree rotation, which is fine since linear
2239 * offset is only used with linear buffers on pre-hsw and tiled buffers
2240 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2242 u32 intel_fb_xy_to_linear(int x, int y,
2243 const struct intel_plane_state *state,
2246 const struct drm_framebuffer *fb = state->hw.fb;
2247 unsigned int cpp = fb->format->cpp[color_plane];
2248 unsigned int pitch = state->color_plane[color_plane].stride;
2250 return y * pitch + x * cpp;
2254 * Add the x/y offsets derived from fb->offsets[] to the user
2255 * specified plane src x/y offsets. The resulting x/y offsets
2256 * specify the start of scanout from the beginning of the gtt mapping.
2258 void intel_add_fb_offsets(int *x, int *y,
2259 const struct intel_plane_state *state,
2263 *x += state->color_plane[color_plane].x;
2264 *y += state->color_plane[color_plane].y;
2267 static u32 intel_adjust_tile_offset(int *x, int *y,
2268 unsigned int tile_width,
2269 unsigned int tile_height,
2270 unsigned int tile_size,
2271 unsigned int pitch_tiles,
2275 unsigned int pitch_pixels = pitch_tiles * tile_width;
2278 WARN_ON(old_offset & (tile_size - 1));
2279 WARN_ON(new_offset & (tile_size - 1));
2280 WARN_ON(new_offset > old_offset);
2282 tiles = (old_offset - new_offset) / tile_size;
2284 *y += tiles / pitch_tiles * tile_height;
2285 *x += tiles % pitch_tiles * tile_width;
2287 /* minimize x in case it got needlessly big */
2288 *y += *x / pitch_pixels * tile_height;
2294 static bool is_surface_linear(u64 modifier, int color_plane)
2296 return modifier == DRM_FORMAT_MOD_LINEAR;
2299 static u32 intel_adjust_aligned_offset(int *x, int *y,
2300 const struct drm_framebuffer *fb,
2302 unsigned int rotation,
2304 u32 old_offset, u32 new_offset)
2306 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2307 unsigned int cpp = fb->format->cpp[color_plane];
2309 WARN_ON(new_offset > old_offset);
2311 if (!is_surface_linear(fb->modifier, color_plane)) {
2312 unsigned int tile_size, tile_width, tile_height;
2313 unsigned int pitch_tiles;
2315 tile_size = intel_tile_size(dev_priv);
2316 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2318 if (drm_rotation_90_or_270(rotation)) {
2319 pitch_tiles = pitch / tile_height;
2320 swap(tile_width, tile_height);
2322 pitch_tiles = pitch / (tile_width * cpp);
2325 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2326 tile_size, pitch_tiles,
2327 old_offset, new_offset);
2329 old_offset += *y * pitch + *x * cpp;
2331 *y = (old_offset - new_offset) / pitch;
2332 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2339 * Adjust the tile offset by moving the difference into
2342 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2343 const struct intel_plane_state *state,
2345 u32 old_offset, u32 new_offset)
2347 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2349 state->color_plane[color_plane].stride,
2350 old_offset, new_offset);
2354 * Computes the aligned offset to the base tile and adjusts
2355 * x, y. bytes per pixel is assumed to be a power-of-two.
2357 * In the 90/270 rotated case, x and y are assumed
2358 * to be already rotated to match the rotated GTT view, and
2359 * pitch is the tile_height aligned framebuffer height.
2361 * This function is used when computing the derived information
2362 * under intel_framebuffer, so using any of that information
2363 * here is not allowed. Anything under drm_framebuffer can be
2364 * used. This is why the user has to pass in the pitch since it
2365 * is specified in the rotated orientation.
2367 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2369 const struct drm_framebuffer *fb,
2372 unsigned int rotation,
2375 unsigned int cpp = fb->format->cpp[color_plane];
2376 u32 offset, offset_aligned;
2381 if (!is_surface_linear(fb->modifier, color_plane)) {
2382 unsigned int tile_size, tile_width, tile_height;
2383 unsigned int tile_rows, tiles, pitch_tiles;
2385 tile_size = intel_tile_size(dev_priv);
2386 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2388 if (drm_rotation_90_or_270(rotation)) {
2389 pitch_tiles = pitch / tile_height;
2390 swap(tile_width, tile_height);
2392 pitch_tiles = pitch / (tile_width * cpp);
2395 tile_rows = *y / tile_height;
2398 tiles = *x / tile_width;
2401 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2402 offset_aligned = offset & ~alignment;
2404 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2405 tile_size, pitch_tiles,
2406 offset, offset_aligned);
2408 offset = *y * pitch + *x * cpp;
2409 offset_aligned = offset & ~alignment;
2411 *y = (offset & alignment) / pitch;
2412 *x = ((offset & alignment) - *y * pitch) / cpp;
2415 return offset_aligned;
2418 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2419 const struct intel_plane_state *state,
2422 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2423 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2424 const struct drm_framebuffer *fb = state->hw.fb;
2425 unsigned int rotation = state->hw.rotation;
2426 int pitch = state->color_plane[color_plane].stride;
2429 if (intel_plane->id == PLANE_CURSOR)
2430 alignment = intel_cursor_alignment(dev_priv);
2432 alignment = intel_surf_alignment(fb, color_plane);
2434 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2435 pitch, rotation, alignment);
2438 /* Convert the fb->offset[] into x/y offsets */
2439 static int intel_fb_offset_to_xy(int *x, int *y,
2440 const struct drm_framebuffer *fb,
2443 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2444 unsigned int height;
2446 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2447 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2448 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2449 fb->offsets[color_plane], color_plane);
2453 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2454 height = ALIGN(height, intel_tile_height(fb, color_plane));
2456 /* Catch potential overflows early */
2457 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2458 fb->offsets[color_plane])) {
2459 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2460 fb->offsets[color_plane], fb->pitches[color_plane],
2468 intel_adjust_aligned_offset(x, y,
2469 fb, color_plane, DRM_MODE_ROTATE_0,
2470 fb->pitches[color_plane],
2471 fb->offsets[color_plane], 0);
2476 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2478 switch (fb_modifier) {
2479 case I915_FORMAT_MOD_X_TILED:
2480 return I915_TILING_X;
2481 case I915_FORMAT_MOD_Y_TILED:
2482 case I915_FORMAT_MOD_Y_TILED_CCS:
2483 return I915_TILING_Y;
2485 return I915_TILING_NONE;
2490 * From the Sky Lake PRM:
2491 * "The Color Control Surface (CCS) contains the compression status of
2492 * the cache-line pairs. The compression state of the cache-line pair
2493 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2494 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2495 * cache-line-pairs. CCS is always Y tiled."
2497 * Since cache line pairs refers to horizontally adjacent cache lines,
2498 * each cache line in the CCS corresponds to an area of 32x16 cache
2499 * lines on the main surface. Since each pixel is 4 bytes, this gives
2500 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2503 static const struct drm_format_info ccs_formats[] = {
2504 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2505 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2506 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2507 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2508 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2509 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2510 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2511 .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2514 static const struct drm_format_info *
2515 lookup_format_info(const struct drm_format_info formats[],
2516 int num_formats, u32 format)
2520 for (i = 0; i < num_formats; i++) {
2521 if (formats[i].format == format)
2528 static const struct drm_format_info *
2529 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2531 switch (cmd->modifier[0]) {
2532 case I915_FORMAT_MOD_Y_TILED_CCS:
2533 case I915_FORMAT_MOD_Yf_TILED_CCS:
2534 return lookup_format_info(ccs_formats,
2535 ARRAY_SIZE(ccs_formats),
2542 bool is_ccs_modifier(u64 modifier)
2544 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2545 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2548 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2549 u32 pixel_format, u64 modifier)
2551 struct intel_crtc *crtc;
2552 struct intel_plane *plane;
2555 * We assume the primary plane for pipe A has
2556 * the highest stride limits of them all.
2558 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2562 plane = to_intel_plane(crtc->base.primary);
2564 return plane->max_stride(plane, pixel_format, modifier,
2569 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2570 u32 pixel_format, u64 modifier)
2573 * Arbitrary limit for gen4+ chosen to match the
2574 * render engine max stride.
2576 * The new CCS hash mode makes remapping impossible
2578 if (!is_ccs_modifier(modifier)) {
2579 if (INTEL_GEN(dev_priv) >= 7)
2581 else if (INTEL_GEN(dev_priv) >= 4)
2585 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2589 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2591 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2593 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2594 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2599 * To make remapping with linear generally feasible
2600 * we need the stride to be page aligned.
2602 if (fb->pitches[color_plane] > max_stride)
2603 return intel_tile_size(dev_priv);
2607 return intel_tile_width_bytes(fb, color_plane);
2611 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2613 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2614 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2615 const struct drm_framebuffer *fb = plane_state->hw.fb;
2618 /* We don't want to deal with remapping with cursors */
2619 if (plane->id == PLANE_CURSOR)
2623 * The display engine limits already match/exceed the
2624 * render engine limits, so not much point in remapping.
2625 * Would also need to deal with the fence POT alignment
2626 * and gen2 2KiB GTT tile size.
2628 if (INTEL_GEN(dev_priv) < 4)
2632 * The new CCS hash mode isn't compatible with remapping as
2633 * the virtual address of the pages affects the compressed data.
2635 if (is_ccs_modifier(fb->modifier))
2638 /* Linear needs a page aligned stride for remapping */
2639 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2640 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2642 for (i = 0; i < fb->format->num_planes; i++) {
2643 if (fb->pitches[i] & alignment)
2651 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2653 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2654 const struct drm_framebuffer *fb = plane_state->hw.fb;
2655 unsigned int rotation = plane_state->hw.rotation;
2656 u32 stride, max_stride;
2659 * No remapping for invisible planes since we don't have
2660 * an actual source viewport to remap.
2662 if (!plane_state->uapi.visible)
2665 if (!intel_plane_can_remap(plane_state))
2669 * FIXME: aux plane limits on gen9+ are
2670 * unclear in Bspec, for now no checking.
2672 stride = intel_fb_pitch(fb, 0, rotation);
2673 max_stride = plane->max_stride(plane, fb->format->format,
2674 fb->modifier, rotation);
2676 return stride > max_stride;
2680 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2681 struct drm_framebuffer *fb)
2683 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2684 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2685 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2686 u32 gtt_offset_rotated = 0;
2687 unsigned int max_size = 0;
2688 int i, num_planes = fb->format->num_planes;
2689 unsigned int tile_size = intel_tile_size(dev_priv);
2691 for (i = 0; i < num_planes; i++) {
2692 unsigned int width, height;
2693 unsigned int cpp, size;
2698 cpp = fb->format->cpp[i];
2699 width = drm_framebuffer_plane_width(fb->width, fb, i);
2700 height = drm_framebuffer_plane_height(fb->height, fb, i);
2702 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2704 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2709 if (is_ccs_modifier(fb->modifier) && i == 1) {
2710 int hsub = fb->format->hsub;
2711 int vsub = fb->format->vsub;
2712 int tile_width, tile_height;
2716 intel_tile_dims(fb, i, &tile_width, &tile_height);
2718 tile_height *= vsub;
2720 ccs_x = (x * hsub) % tile_width;
2721 ccs_y = (y * vsub) % tile_height;
2722 main_x = intel_fb->normal[0].x % tile_width;
2723 main_y = intel_fb->normal[0].y % tile_height;
2726 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2727 * x/y offsets must match between CCS and the main surface.
2729 if (main_x != ccs_x || main_y != ccs_y) {
2730 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2733 intel_fb->normal[0].x,
2734 intel_fb->normal[0].y,
2741 * The fence (if used) is aligned to the start of the object
2742 * so having the framebuffer wrap around across the edge of the
2743 * fenced region doesn't really work. We have no API to configure
2744 * the fence start offset within the object (nor could we probably
2745 * on gen2/3). So it's just easier if we just require that the
2746 * fb layout agrees with the fence layout. We already check that the
2747 * fb stride matches the fence stride elsewhere.
2749 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2750 (x + width) * cpp > fb->pitches[i]) {
2751 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2757 * First pixel of the framebuffer from
2758 * the start of the normal gtt mapping.
2760 intel_fb->normal[i].x = x;
2761 intel_fb->normal[i].y = y;
2763 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2767 offset /= tile_size;
2769 if (!is_surface_linear(fb->modifier, i)) {
2770 unsigned int tile_width, tile_height;
2771 unsigned int pitch_tiles;
2774 intel_tile_dims(fb, i, &tile_width, &tile_height);
2776 rot_info->plane[i].offset = offset;
2777 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2778 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2779 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2781 intel_fb->rotated[i].pitch =
2782 rot_info->plane[i].height * tile_height;
2784 /* how many tiles does this plane need */
2785 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2787 * If the plane isn't horizontally tile aligned,
2788 * we need one more tile.
2793 /* rotate the x/y offsets to match the GTT view */
2794 drm_rect_init(&r, x, y, width, height);
2796 rot_info->plane[i].width * tile_width,
2797 rot_info->plane[i].height * tile_height,
2798 DRM_MODE_ROTATE_270);
2802 /* rotate the tile dimensions to match the GTT view */
2803 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2804 swap(tile_width, tile_height);
2807 * We only keep the x/y offsets, so push all of the
2808 * gtt offset into the x/y offsets.
2810 intel_adjust_tile_offset(&x, &y,
2811 tile_width, tile_height,
2812 tile_size, pitch_tiles,
2813 gtt_offset_rotated * tile_size, 0);
2815 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2818 * First pixel of the framebuffer from
2819 * the start of the rotated gtt mapping.
2821 intel_fb->rotated[i].x = x;
2822 intel_fb->rotated[i].y = y;
2824 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2825 x * cpp, tile_size);
2828 /* how many tiles in total needed in the bo */
2829 max_size = max(max_size, offset + size);
2832 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2833 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2834 mul_u32_u32(max_size, tile_size), obj->base.size);
2842 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2844 struct drm_i915_private *dev_priv =
2845 to_i915(plane_state->uapi.plane->dev);
2846 struct drm_framebuffer *fb = plane_state->hw.fb;
2847 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2848 struct intel_rotation_info *info = &plane_state->view.rotated;
2849 unsigned int rotation = plane_state->hw.rotation;
2850 int i, num_planes = fb->format->num_planes;
2851 unsigned int tile_size = intel_tile_size(dev_priv);
2852 unsigned int src_x, src_y;
2853 unsigned int src_w, src_h;
2856 memset(&plane_state->view, 0, sizeof(plane_state->view));
2857 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2858 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2860 src_x = plane_state->uapi.src.x1 >> 16;
2861 src_y = plane_state->uapi.src.y1 >> 16;
2862 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2863 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2865 WARN_ON(is_ccs_modifier(fb->modifier));
2867 /* Make src coordinates relative to the viewport */
2868 drm_rect_translate(&plane_state->uapi.src,
2869 -(src_x << 16), -(src_y << 16));
2871 /* Rotate src coordinates to match rotated GTT view */
2872 if (drm_rotation_90_or_270(rotation))
2873 drm_rect_rotate(&plane_state->uapi.src,
2874 src_w << 16, src_h << 16,
2875 DRM_MODE_ROTATE_270);
2877 for (i = 0; i < num_planes; i++) {
2878 unsigned int hsub = i ? fb->format->hsub : 1;
2879 unsigned int vsub = i ? fb->format->vsub : 1;
2880 unsigned int cpp = fb->format->cpp[i];
2881 unsigned int tile_width, tile_height;
2882 unsigned int width, height;
2883 unsigned int pitch_tiles;
2887 intel_tile_dims(fb, i, &tile_width, &tile_height);
2891 width = src_w / hsub;
2892 height = src_h / vsub;
2895 * First pixel of the src viewport from the
2896 * start of the normal gtt mapping.
2898 x += intel_fb->normal[i].x;
2899 y += intel_fb->normal[i].y;
2901 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2902 fb, i, fb->pitches[i],
2903 DRM_MODE_ROTATE_0, tile_size);
2904 offset /= tile_size;
2906 info->plane[i].offset = offset;
2907 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2909 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2910 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2912 if (drm_rotation_90_or_270(rotation)) {
2915 /* rotate the x/y offsets to match the GTT view */
2916 drm_rect_init(&r, x, y, width, height);
2918 info->plane[i].width * tile_width,
2919 info->plane[i].height * tile_height,
2920 DRM_MODE_ROTATE_270);
2924 pitch_tiles = info->plane[i].height;
2925 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2927 /* rotate the tile dimensions to match the GTT view */
2928 swap(tile_width, tile_height);
2930 pitch_tiles = info->plane[i].width;
2931 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2935 * We only keep the x/y offsets, so push all of the
2936 * gtt offset into the x/y offsets.
2938 intel_adjust_tile_offset(&x, &y,
2939 tile_width, tile_height,
2940 tile_size, pitch_tiles,
2941 gtt_offset * tile_size, 0);
2943 gtt_offset += info->plane[i].width * info->plane[i].height;
2945 plane_state->color_plane[i].offset = 0;
2946 plane_state->color_plane[i].x = x;
2947 plane_state->color_plane[i].y = y;
2952 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2954 const struct intel_framebuffer *fb =
2955 to_intel_framebuffer(plane_state->hw.fb);
2956 unsigned int rotation = plane_state->hw.rotation;
2962 num_planes = fb->base.format->num_planes;
2964 if (intel_plane_needs_remap(plane_state)) {
2965 intel_plane_remap_gtt(plane_state);
2968 * Sometimes even remapping can't overcome
2969 * the stride limitations :( Can happen with
2970 * big plane sizes and suitably misaligned
2973 return intel_plane_check_stride(plane_state);
2976 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2978 for (i = 0; i < num_planes; i++) {
2979 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2980 plane_state->color_plane[i].offset = 0;
2982 if (drm_rotation_90_or_270(rotation)) {
2983 plane_state->color_plane[i].x = fb->rotated[i].x;
2984 plane_state->color_plane[i].y = fb->rotated[i].y;
2986 plane_state->color_plane[i].x = fb->normal[i].x;
2987 plane_state->color_plane[i].y = fb->normal[i].y;
2991 /* Rotate src coordinates to match rotated GTT view */
2992 if (drm_rotation_90_or_270(rotation))
2993 drm_rect_rotate(&plane_state->uapi.src,
2994 fb->base.width << 16, fb->base.height << 16,
2995 DRM_MODE_ROTATE_270);
2997 return intel_plane_check_stride(plane_state);
3000 static int i9xx_format_to_fourcc(int format)
3003 case DISPPLANE_8BPP:
3004 return DRM_FORMAT_C8;
3005 case DISPPLANE_BGRA555:
3006 return DRM_FORMAT_ARGB1555;
3007 case DISPPLANE_BGRX555:
3008 return DRM_FORMAT_XRGB1555;
3009 case DISPPLANE_BGRX565:
3010 return DRM_FORMAT_RGB565;
3012 case DISPPLANE_BGRX888:
3013 return DRM_FORMAT_XRGB8888;
3014 case DISPPLANE_RGBX888:
3015 return DRM_FORMAT_XBGR8888;
3016 case DISPPLANE_BGRA888:
3017 return DRM_FORMAT_ARGB8888;
3018 case DISPPLANE_RGBA888:
3019 return DRM_FORMAT_ABGR8888;
3020 case DISPPLANE_BGRX101010:
3021 return DRM_FORMAT_XRGB2101010;
3022 case DISPPLANE_RGBX101010:
3023 return DRM_FORMAT_XBGR2101010;
3024 case DISPPLANE_BGRA101010:
3025 return DRM_FORMAT_ARGB2101010;
3026 case DISPPLANE_RGBA101010:
3027 return DRM_FORMAT_ABGR2101010;
3028 case DISPPLANE_RGBX161616:
3029 return DRM_FORMAT_XBGR16161616F;
3033 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3036 case PLANE_CTL_FORMAT_RGB_565:
3037 return DRM_FORMAT_RGB565;
3038 case PLANE_CTL_FORMAT_NV12:
3039 return DRM_FORMAT_NV12;
3040 case PLANE_CTL_FORMAT_P010:
3041 return DRM_FORMAT_P010;
3042 case PLANE_CTL_FORMAT_P012:
3043 return DRM_FORMAT_P012;
3044 case PLANE_CTL_FORMAT_P016:
3045 return DRM_FORMAT_P016;
3046 case PLANE_CTL_FORMAT_Y210:
3047 return DRM_FORMAT_Y210;
3048 case PLANE_CTL_FORMAT_Y212:
3049 return DRM_FORMAT_Y212;
3050 case PLANE_CTL_FORMAT_Y216:
3051 return DRM_FORMAT_Y216;
3052 case PLANE_CTL_FORMAT_Y410:
3053 return DRM_FORMAT_XVYU2101010;
3054 case PLANE_CTL_FORMAT_Y412:
3055 return DRM_FORMAT_XVYU12_16161616;
3056 case PLANE_CTL_FORMAT_Y416:
3057 return DRM_FORMAT_XVYU16161616;
3059 case PLANE_CTL_FORMAT_XRGB_8888:
3062 return DRM_FORMAT_ABGR8888;
3064 return DRM_FORMAT_XBGR8888;
3067 return DRM_FORMAT_ARGB8888;
3069 return DRM_FORMAT_XRGB8888;
3071 case PLANE_CTL_FORMAT_XRGB_2101010:
3074 return DRM_FORMAT_ABGR2101010;
3076 return DRM_FORMAT_XBGR2101010;
3079 return DRM_FORMAT_ARGB2101010;
3081 return DRM_FORMAT_XRGB2101010;
3083 case PLANE_CTL_FORMAT_XRGB_16161616F:
3086 return DRM_FORMAT_ABGR16161616F;
3088 return DRM_FORMAT_XBGR16161616F;
3091 return DRM_FORMAT_ARGB16161616F;
3093 return DRM_FORMAT_XRGB16161616F;
3099 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3100 struct intel_initial_plane_config *plane_config)
3102 struct drm_device *dev = crtc->base.dev;
3103 struct drm_i915_private *dev_priv = to_i915(dev);
3104 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3105 struct drm_framebuffer *fb = &plane_config->fb->base;
3106 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3107 u32 size_aligned = round_up(plane_config->base + plane_config->size,
3109 struct drm_i915_gem_object *obj;
3112 size_aligned -= base_aligned;
3114 if (plane_config->size == 0)
3117 /* If the FB is too big, just don't use it since fbdev is not very
3118 * important and we should probably use that space with FBC or other
3120 if (size_aligned * 2 > dev_priv->stolen_usable_size)
3123 switch (fb->modifier) {
3124 case DRM_FORMAT_MOD_LINEAR:
3125 case I915_FORMAT_MOD_X_TILED:
3126 case I915_FORMAT_MOD_Y_TILED:
3129 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3134 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3141 switch (plane_config->tiling) {
3142 case I915_TILING_NONE:
3146 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3149 MISSING_CASE(plane_config->tiling);
3153 mode_cmd.pixel_format = fb->format->format;
3154 mode_cmd.width = fb->width;
3155 mode_cmd.height = fb->height;
3156 mode_cmd.pitches[0] = fb->pitches[0];
3157 mode_cmd.modifier[0] = fb->modifier;
3158 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3160 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3161 DRM_DEBUG_KMS("intel fb init failed\n");
3166 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3169 i915_gem_object_put(obj);
3174 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3175 struct intel_plane_state *plane_state,
3178 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3180 plane_state->uapi.visible = visible;
3183 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3185 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3188 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3190 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3191 struct drm_plane *plane;
3194 * Active_planes aliases if multiple "primary" or cursor planes
3195 * have been used on the same (or wrong) pipe. plane_mask uses
3196 * unique ids, hence we can use that to reconstruct active_planes.
3198 crtc_state->active_planes = 0;
3200 drm_for_each_plane_mask(plane, &dev_priv->drm,
3201 crtc_state->uapi.plane_mask)
3202 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3205 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3206 struct intel_plane *plane)
3208 struct intel_crtc_state *crtc_state =
3209 to_intel_crtc_state(crtc->base.state);
3210 struct intel_plane_state *plane_state =
3211 to_intel_plane_state(plane->base.state);
3213 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3214 plane->base.base.id, plane->base.name,
3215 crtc->base.base.id, crtc->base.name);
3217 intel_set_plane_visible(crtc_state, plane_state, false);
3218 fixup_active_planes(crtc_state);
3219 crtc_state->data_rate[plane->id] = 0;
3220 crtc_state->min_cdclk[plane->id] = 0;
3222 if (plane->id == PLANE_PRIMARY)
3223 intel_pre_disable_primary_noatomic(&crtc->base);
3225 intel_disable_plane(plane, crtc_state);
3228 static struct intel_frontbuffer *
3229 to_intel_frontbuffer(struct drm_framebuffer *fb)
3231 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3235 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3236 struct intel_initial_plane_config *plane_config)
3238 struct drm_device *dev = intel_crtc->base.dev;
3239 struct drm_i915_private *dev_priv = to_i915(dev);
3241 struct drm_plane *primary = intel_crtc->base.primary;
3242 struct drm_plane_state *plane_state = primary->state;
3243 struct intel_plane *intel_plane = to_intel_plane(primary);
3244 struct intel_plane_state *intel_state =
3245 to_intel_plane_state(plane_state);
3246 struct drm_framebuffer *fb;
3248 if (!plane_config->fb)
3251 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3252 fb = &plane_config->fb->base;
3256 kfree(plane_config->fb);
3259 * Failed to alloc the obj, check to see if we should share
3260 * an fb with another CRTC instead
3262 for_each_crtc(dev, c) {
3263 struct intel_plane_state *state;
3265 if (c == &intel_crtc->base)
3268 if (!to_intel_crtc(c)->active)
3271 state = to_intel_plane_state(c->primary->state);
3275 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3277 drm_framebuffer_get(fb);
3283 * We've failed to reconstruct the BIOS FB. Current display state
3284 * indicates that the primary plane is visible, but has a NULL FB,
3285 * which will lead to problems later if we don't fix it up. The
3286 * simplest solution is to just disable the primary plane now and
3287 * pretend the BIOS never had it enabled.
3289 intel_plane_disable_noatomic(intel_crtc, intel_plane);
3294 intel_state->hw.rotation = plane_config->rotation;
3295 intel_fill_fb_ggtt_view(&intel_state->view, fb,
3296 intel_state->hw.rotation);
3297 intel_state->color_plane[0].stride =
3298 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3301 intel_pin_and_fence_fb_obj(fb,
3303 intel_plane_uses_fence(intel_state),
3304 &intel_state->flags);
3305 if (IS_ERR(intel_state->vma)) {
3306 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3307 intel_crtc->pipe, PTR_ERR(intel_state->vma));
3309 intel_state->vma = NULL;
3310 drm_framebuffer_put(fb);
3314 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3316 plane_state->src_x = 0;
3317 plane_state->src_y = 0;
3318 plane_state->src_w = fb->width << 16;
3319 plane_state->src_h = fb->height << 16;
3321 plane_state->crtc_x = 0;
3322 plane_state->crtc_y = 0;
3323 plane_state->crtc_w = fb->width;
3324 plane_state->crtc_h = fb->height;
3326 intel_state->uapi.src = drm_plane_state_src(plane_state);
3327 intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3329 if (plane_config->tiling)
3330 dev_priv->preserve_bios_swizzle = true;
3332 plane_state->fb = fb;
3333 plane_state->crtc = &intel_crtc->base;
3334 intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3336 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3337 &to_intel_frontbuffer(fb)->bits);
3340 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3342 unsigned int rotation)
3344 int cpp = fb->format->cpp[color_plane];
3346 switch (fb->modifier) {
3347 case DRM_FORMAT_MOD_LINEAR:
3348 case I915_FORMAT_MOD_X_TILED:
3350 * Validated limit is 4k, but has 5k should
3351 * work apart from the following features:
3352 * - Ytile (already limited to 4k)
3353 * - FP16 (already limited to 4k)
3354 * - render compression (already limited to 4k)
3355 * - KVMR sprite and cursor (don't care)
3356 * - horizontal panning (TODO verify this)
3357 * - pipe and plane scaling (TODO verify this)
3363 case I915_FORMAT_MOD_Y_TILED_CCS:
3364 case I915_FORMAT_MOD_Yf_TILED_CCS:
3365 /* FIXME AUX plane? */
3366 case I915_FORMAT_MOD_Y_TILED:
3367 case I915_FORMAT_MOD_Yf_TILED:
3373 MISSING_CASE(fb->modifier);
3378 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3380 unsigned int rotation)
3382 int cpp = fb->format->cpp[color_plane];
3384 switch (fb->modifier) {
3385 case DRM_FORMAT_MOD_LINEAR:
3386 case I915_FORMAT_MOD_X_TILED:
3391 case I915_FORMAT_MOD_Y_TILED_CCS:
3392 case I915_FORMAT_MOD_Yf_TILED_CCS:
3393 /* FIXME AUX plane? */
3394 case I915_FORMAT_MOD_Y_TILED:
3395 case I915_FORMAT_MOD_Yf_TILED:
3401 MISSING_CASE(fb->modifier);
3406 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3408 unsigned int rotation)
3413 static int skl_max_plane_height(void)
3418 static int icl_max_plane_height(void)
3423 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3424 int main_x, int main_y, u32 main_offset)
3426 const struct drm_framebuffer *fb = plane_state->hw.fb;
3427 int hsub = fb->format->hsub;
3428 int vsub = fb->format->vsub;
3429 int aux_x = plane_state->color_plane[1].x;
3430 int aux_y = plane_state->color_plane[1].y;
3431 u32 aux_offset = plane_state->color_plane[1].offset;
3432 u32 alignment = intel_surf_alignment(fb, 1);
3434 while (aux_offset >= main_offset && aux_y <= main_y) {
3437 if (aux_x == main_x && aux_y == main_y)
3440 if (aux_offset == 0)
3445 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3446 aux_offset, aux_offset - alignment);
3447 aux_x = x * hsub + aux_x % hsub;
3448 aux_y = y * vsub + aux_y % vsub;
3451 if (aux_x != main_x || aux_y != main_y)
3454 plane_state->color_plane[1].offset = aux_offset;
3455 plane_state->color_plane[1].x = aux_x;
3456 plane_state->color_plane[1].y = aux_y;
3461 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3463 struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3464 const struct drm_framebuffer *fb = plane_state->hw.fb;
3465 unsigned int rotation = plane_state->hw.rotation;
3466 int x = plane_state->uapi.src.x1 >> 16;
3467 int y = plane_state->uapi.src.y1 >> 16;
3468 int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3469 int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3472 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3474 if (INTEL_GEN(dev_priv) >= 11)
3475 max_width = icl_max_plane_width(fb, 0, rotation);
3476 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3477 max_width = glk_max_plane_width(fb, 0, rotation);
3479 max_width = skl_max_plane_width(fb, 0, rotation);
3481 if (INTEL_GEN(dev_priv) >= 11)
3482 max_height = icl_max_plane_height();
3484 max_height = skl_max_plane_height();
3486 if (w > max_width || h > max_height) {
3487 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3488 w, h, max_width, max_height);
3492 intel_add_fb_offsets(&x, &y, plane_state, 0);
3493 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3494 alignment = intel_surf_alignment(fb, 0);
3497 * AUX surface offset is specified as the distance from the
3498 * main surface offset, and it must be non-negative. Make
3499 * sure that is what we will get.
3501 if (offset > aux_offset)
3502 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3503 offset, aux_offset & ~(alignment - 1));
3506 * When using an X-tiled surface, the plane blows up
3507 * if the x offset + width exceed the stride.
3509 * TODO: linear and Y-tiled seem fine, Yf untested,
3511 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3512 int cpp = fb->format->cpp[0];
3514 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3516 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3520 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3521 offset, offset - alignment);
3526 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3527 * they match with the main surface x/y offsets.
3529 if (is_ccs_modifier(fb->modifier)) {
3530 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3534 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3535 offset, offset - alignment);
3538 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3539 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3544 plane_state->color_plane[0].offset = offset;
3545 plane_state->color_plane[0].x = x;
3546 plane_state->color_plane[0].y = y;
3549 * Put the final coordinates back so that the src
3550 * coordinate checks will see the right values.
3552 drm_rect_translate_to(&plane_state->uapi.src,
3558 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3560 const struct drm_framebuffer *fb = plane_state->hw.fb;
3561 unsigned int rotation = plane_state->hw.rotation;
3562 int max_width = skl_max_plane_width(fb, 1, rotation);
3563 int max_height = 4096;
3564 int x = plane_state->uapi.src.x1 >> 17;
3565 int y = plane_state->uapi.src.y1 >> 17;
3566 int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3567 int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3570 intel_add_fb_offsets(&x, &y, plane_state, 1);
3571 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3573 /* FIXME not quite sure how/if these apply to the chroma plane */
3574 if (w > max_width || h > max_height) {
3575 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3576 w, h, max_width, max_height);
3580 plane_state->color_plane[1].offset = offset;
3581 plane_state->color_plane[1].x = x;
3582 plane_state->color_plane[1].y = y;
3587 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3589 const struct drm_framebuffer *fb = plane_state->hw.fb;
3590 int src_x = plane_state->uapi.src.x1 >> 16;
3591 int src_y = plane_state->uapi.src.y1 >> 16;
3592 int hsub = fb->format->hsub;
3593 int vsub = fb->format->vsub;
3594 int x = src_x / hsub;
3595 int y = src_y / vsub;
3598 intel_add_fb_offsets(&x, &y, plane_state, 1);
3599 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3601 plane_state->color_plane[1].offset = offset;
3602 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3603 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3608 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3610 const struct drm_framebuffer *fb = plane_state->hw.fb;
3613 ret = intel_plane_compute_gtt(plane_state);
3617 if (!plane_state->uapi.visible)
3621 * Handle the AUX surface first since
3622 * the main surface setup depends on it.
3624 if (drm_format_info_is_yuv_semiplanar(fb->format)) {
3625 ret = skl_check_nv12_aux_surface(plane_state);
3628 } else if (is_ccs_modifier(fb->modifier)) {
3629 ret = skl_check_ccs_aux_surface(plane_state);
3633 plane_state->color_plane[1].offset = ~0xfff;
3634 plane_state->color_plane[1].x = 0;
3635 plane_state->color_plane[1].y = 0;
3638 ret = skl_check_main_surface(plane_state);
3645 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
3646 const struct intel_plane_state *plane_state,
3647 unsigned int *num, unsigned int *den)
3649 const struct drm_framebuffer *fb = plane_state->hw.fb;
3650 unsigned int cpp = fb->format->cpp[0];
3653 * g4x bspec says 64bpp pixel rate can't exceed 80%
3654 * of cdclk when the sprite plane is enabled on the
3655 * same pipe. ilk/snb bspec says 64bpp pixel rate is
3656 * never allowed to exceed 80% of cdclk. Let's just go
3657 * with the ilk/snb limit always.
3668 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
3669 const struct intel_plane_state *plane_state)
3671 unsigned int pixel_rate;
3672 unsigned int num, den;
3675 * Note that crtc_state->pixel_rate accounts for both
3676 * horizontal and vertical panel fitter downscaling factors.
3677 * Pre-HSW bspec tells us to only consider the horizontal
3678 * downscaling factor here. We ignore that and just consider
3679 * both for simplicity.
3681 pixel_rate = crtc_state->pixel_rate;
3683 i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
3685 /* two pixels per clock with double wide pipe */
3686 if (crtc_state->double_wide)
3689 return DIV_ROUND_UP(pixel_rate * num, den);
3693 i9xx_plane_max_stride(struct intel_plane *plane,
3694 u32 pixel_format, u64 modifier,
3695 unsigned int rotation)
3697 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3699 if (!HAS_GMCH(dev_priv)) {
3701 } else if (INTEL_GEN(dev_priv) >= 4) {
3702 if (modifier == I915_FORMAT_MOD_X_TILED)
3706 } else if (INTEL_GEN(dev_priv) >= 3) {
3707 if (modifier == I915_FORMAT_MOD_X_TILED)
3712 if (plane->i9xx_plane == PLANE_C)
3719 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3721 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3722 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3725 if (crtc_state->gamma_enable)
3726 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3728 if (crtc_state->csc_enable)
3729 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3731 if (INTEL_GEN(dev_priv) < 5)
3732 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3737 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3738 const struct intel_plane_state *plane_state)
3740 struct drm_i915_private *dev_priv =
3741 to_i915(plane_state->uapi.plane->dev);
3742 const struct drm_framebuffer *fb = plane_state->hw.fb;
3743 unsigned int rotation = plane_state->hw.rotation;
3746 dspcntr = DISPLAY_PLANE_ENABLE;
3748 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3749 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3750 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3752 switch (fb->format->format) {
3754 dspcntr |= DISPPLANE_8BPP;
3756 case DRM_FORMAT_XRGB1555:
3757 dspcntr |= DISPPLANE_BGRX555;
3759 case DRM_FORMAT_ARGB1555:
3760 dspcntr |= DISPPLANE_BGRA555;
3762 case DRM_FORMAT_RGB565:
3763 dspcntr |= DISPPLANE_BGRX565;
3765 case DRM_FORMAT_XRGB8888:
3766 dspcntr |= DISPPLANE_BGRX888;
3768 case DRM_FORMAT_XBGR8888:
3769 dspcntr |= DISPPLANE_RGBX888;
3771 case DRM_FORMAT_ARGB8888:
3772 dspcntr |= DISPPLANE_BGRA888;
3774 case DRM_FORMAT_ABGR8888:
3775 dspcntr |= DISPPLANE_RGBA888;
3777 case DRM_FORMAT_XRGB2101010:
3778 dspcntr |= DISPPLANE_BGRX101010;
3780 case DRM_FORMAT_XBGR2101010:
3781 dspcntr |= DISPPLANE_RGBX101010;
3783 case DRM_FORMAT_ARGB2101010:
3784 dspcntr |= DISPPLANE_BGRA101010;
3786 case DRM_FORMAT_ABGR2101010:
3787 dspcntr |= DISPPLANE_RGBA101010;
3789 case DRM_FORMAT_XBGR16161616F:
3790 dspcntr |= DISPPLANE_RGBX161616;
3793 MISSING_CASE(fb->format->format);
3797 if (INTEL_GEN(dev_priv) >= 4 &&
3798 fb->modifier == I915_FORMAT_MOD_X_TILED)
3799 dspcntr |= DISPPLANE_TILED;
3801 if (rotation & DRM_MODE_ROTATE_180)
3802 dspcntr |= DISPPLANE_ROTATE_180;
3804 if (rotation & DRM_MODE_REFLECT_X)
3805 dspcntr |= DISPPLANE_MIRROR;
3810 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3812 struct drm_i915_private *dev_priv =
3813 to_i915(plane_state->uapi.plane->dev);
3814 const struct drm_framebuffer *fb = plane_state->hw.fb;
3815 int src_x, src_y, src_w;
3819 ret = intel_plane_compute_gtt(plane_state);
3823 if (!plane_state->uapi.visible)
3826 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3827 src_x = plane_state->uapi.src.x1 >> 16;
3828 src_y = plane_state->uapi.src.y1 >> 16;
3830 /* Undocumented hardware limit on i965/g4x/vlv/chv */
3831 if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
3834 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3836 if (INTEL_GEN(dev_priv) >= 4)
3837 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3843 * Put the final coordinates back so that the src
3844 * coordinate checks will see the right values.
3846 drm_rect_translate_to(&plane_state->uapi.src,
3847 src_x << 16, src_y << 16);
3849 /* HSW/BDW do this automagically in hardware */
3850 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3851 unsigned int rotation = plane_state->hw.rotation;
3852 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3853 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3855 if (rotation & DRM_MODE_ROTATE_180) {
3858 } else if (rotation & DRM_MODE_REFLECT_X) {
3863 plane_state->color_plane[0].offset = offset;
3864 plane_state->color_plane[0].x = src_x;
3865 plane_state->color_plane[0].y = src_y;
3870 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
3872 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3873 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3875 if (IS_CHERRYVIEW(dev_priv))
3876 return i9xx_plane == PLANE_B;
3877 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3879 else if (IS_GEN(dev_priv, 4))
3880 return i9xx_plane == PLANE_C;
3882 return i9xx_plane == PLANE_B ||
3883 i9xx_plane == PLANE_C;
3887 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3888 struct intel_plane_state *plane_state)
3890 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3893 ret = chv_plane_check_rotation(plane_state);
3897 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
3899 DRM_PLANE_HELPER_NO_SCALING,
3900 DRM_PLANE_HELPER_NO_SCALING,
3901 i9xx_plane_has_windowing(plane),
3906 ret = i9xx_check_plane_surface(plane_state);
3910 if (!plane_state->uapi.visible)
3913 ret = intel_plane_check_src_coordinates(plane_state);
3917 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3922 static void i9xx_update_plane(struct intel_plane *plane,
3923 const struct intel_crtc_state *crtc_state,
3924 const struct intel_plane_state *plane_state)
3926 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3927 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3929 int x = plane_state->color_plane[0].x;
3930 int y = plane_state->color_plane[0].y;
3931 int crtc_x = plane_state->uapi.dst.x1;
3932 int crtc_y = plane_state->uapi.dst.y1;
3933 int crtc_w = drm_rect_width(&plane_state->uapi.dst);
3934 int crtc_h = drm_rect_height(&plane_state->uapi.dst);
3935 unsigned long irqflags;
3939 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3941 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3943 if (INTEL_GEN(dev_priv) >= 4)
3944 dspaddr_offset = plane_state->color_plane[0].offset;
3946 dspaddr_offset = linear_offset;
3948 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3950 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3952 if (INTEL_GEN(dev_priv) < 4) {
3954 * PLANE_A doesn't actually have a full window
3955 * generator but let's assume we still need to
3956 * program whatever is there.
3958 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3959 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3960 ((crtc_h - 1) << 16) | (crtc_w - 1));
3961 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3962 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3963 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3964 ((crtc_h - 1) << 16) | (crtc_w - 1));
3965 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3968 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3969 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3970 } else if (INTEL_GEN(dev_priv) >= 4) {
3971 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3972 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3976 * The control register self-arms if the plane was previously
3977 * disabled. Try to make the plane enable atomic by writing
3978 * the control register just before the surface register.
3980 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3981 if (INTEL_GEN(dev_priv) >= 4)
3982 I915_WRITE_FW(DSPSURF(i9xx_plane),
3983 intel_plane_ggtt_offset(plane_state) +
3986 I915_WRITE_FW(DSPADDR(i9xx_plane),
3987 intel_plane_ggtt_offset(plane_state) +
3990 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3993 static void i9xx_disable_plane(struct intel_plane *plane,
3994 const struct intel_crtc_state *crtc_state)
3996 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3997 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3998 unsigned long irqflags;
4002 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4003 * enable on ilk+ affect the pipe bottom color as
4004 * well, so we must configure them even if the plane
4007 * On pre-g4x there is no way to gamma correct the
4008 * pipe bottom color but we'll keep on doing this
4009 * anyway so that the crtc state readout works correctly.
4011 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4013 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4015 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
4016 if (INTEL_GEN(dev_priv) >= 4)
4017 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
4019 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
4021 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4024 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4027 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4028 enum intel_display_power_domain power_domain;
4029 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4030 intel_wakeref_t wakeref;
4035 * Not 100% correct for planes that can move between pipes,
4036 * but that's only the case for gen2-4 which don't have any
4037 * display power wells.
4039 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4040 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4044 val = I915_READ(DSPCNTR(i9xx_plane));
4046 ret = val & DISPLAY_PLANE_ENABLE;
4048 if (INTEL_GEN(dev_priv) >= 5)
4049 *pipe = plane->pipe;
4051 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4052 DISPPLANE_SEL_PIPE_SHIFT;
4054 intel_display_power_put(dev_priv, power_domain, wakeref);
4059 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4061 struct drm_device *dev = intel_crtc->base.dev;
4062 struct drm_i915_private *dev_priv = to_i915(dev);
4064 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4065 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4066 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4070 * This function detaches (aka. unbinds) unused scalers in hardware
4072 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4075 const struct intel_crtc_scaler_state *scaler_state =
4076 &crtc_state->scaler_state;
4079 /* loop through and disable scalers that aren't in use */
4080 for (i = 0; i < intel_crtc->num_scalers; i++) {
4081 if (!scaler_state->scalers[i].in_use)
4082 skl_detach_scaler(intel_crtc, i);
4086 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4087 int color_plane, unsigned int rotation)
4090 * The stride is either expressed as a multiple of 64 bytes chunks for
4091 * linear buffers or in number of tiles for tiled buffers.
4093 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
4095 else if (drm_rotation_90_or_270(rotation))
4096 return intel_tile_height(fb, color_plane);
4098 return intel_tile_width_bytes(fb, color_plane);
4101 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4104 const struct drm_framebuffer *fb = plane_state->hw.fb;
4105 unsigned int rotation = plane_state->hw.rotation;
4106 u32 stride = plane_state->color_plane[color_plane].stride;
4108 if (color_plane >= fb->format->num_planes)
4111 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4114 static u32 skl_plane_ctl_format(u32 pixel_format)
4116 switch (pixel_format) {
4118 return PLANE_CTL_FORMAT_INDEXED;
4119 case DRM_FORMAT_RGB565:
4120 return PLANE_CTL_FORMAT_RGB_565;
4121 case DRM_FORMAT_XBGR8888:
4122 case DRM_FORMAT_ABGR8888:
4123 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4124 case DRM_FORMAT_XRGB8888:
4125 case DRM_FORMAT_ARGB8888:
4126 return PLANE_CTL_FORMAT_XRGB_8888;
4127 case DRM_FORMAT_XBGR2101010:
4128 case DRM_FORMAT_ABGR2101010:
4129 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4130 case DRM_FORMAT_XRGB2101010:
4131 case DRM_FORMAT_ARGB2101010:
4132 return PLANE_CTL_FORMAT_XRGB_2101010;
4133 case DRM_FORMAT_XBGR16161616F:
4134 case DRM_FORMAT_ABGR16161616F:
4135 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4136 case DRM_FORMAT_XRGB16161616F:
4137 case DRM_FORMAT_ARGB16161616F:
4138 return PLANE_CTL_FORMAT_XRGB_16161616F;
4139 case DRM_FORMAT_YUYV:
4140 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4141 case DRM_FORMAT_YVYU:
4142 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4143 case DRM_FORMAT_UYVY:
4144 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4145 case DRM_FORMAT_VYUY:
4146 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4147 case DRM_FORMAT_NV12:
4148 return PLANE_CTL_FORMAT_NV12;
4149 case DRM_FORMAT_P010:
4150 return PLANE_CTL_FORMAT_P010;
4151 case DRM_FORMAT_P012:
4152 return PLANE_CTL_FORMAT_P012;
4153 case DRM_FORMAT_P016:
4154 return PLANE_CTL_FORMAT_P016;
4155 case DRM_FORMAT_Y210:
4156 return PLANE_CTL_FORMAT_Y210;
4157 case DRM_FORMAT_Y212:
4158 return PLANE_CTL_FORMAT_Y212;
4159 case DRM_FORMAT_Y216:
4160 return PLANE_CTL_FORMAT_Y216;
4161 case DRM_FORMAT_XVYU2101010:
4162 return PLANE_CTL_FORMAT_Y410;
4163 case DRM_FORMAT_XVYU12_16161616:
4164 return PLANE_CTL_FORMAT_Y412;
4165 case DRM_FORMAT_XVYU16161616:
4166 return PLANE_CTL_FORMAT_Y416;
4168 MISSING_CASE(pixel_format);
4174 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4176 if (!plane_state->hw.fb->format->has_alpha)
4177 return PLANE_CTL_ALPHA_DISABLE;
4179 switch (plane_state->hw.pixel_blend_mode) {
4180 case DRM_MODE_BLEND_PIXEL_NONE:
4181 return PLANE_CTL_ALPHA_DISABLE;
4182 case DRM_MODE_BLEND_PREMULTI:
4183 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4184 case DRM_MODE_BLEND_COVERAGE:
4185 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4187 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4188 return PLANE_CTL_ALPHA_DISABLE;
4192 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4194 if (!plane_state->hw.fb->format->has_alpha)
4195 return PLANE_COLOR_ALPHA_DISABLE;
4197 switch (plane_state->hw.pixel_blend_mode) {
4198 case DRM_MODE_BLEND_PIXEL_NONE:
4199 return PLANE_COLOR_ALPHA_DISABLE;
4200 case DRM_MODE_BLEND_PREMULTI:
4201 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4202 case DRM_MODE_BLEND_COVERAGE:
4203 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4205 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4206 return PLANE_COLOR_ALPHA_DISABLE;
4210 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4212 switch (fb_modifier) {
4213 case DRM_FORMAT_MOD_LINEAR:
4215 case I915_FORMAT_MOD_X_TILED:
4216 return PLANE_CTL_TILED_X;
4217 case I915_FORMAT_MOD_Y_TILED:
4218 return PLANE_CTL_TILED_Y;
4219 case I915_FORMAT_MOD_Y_TILED_CCS:
4220 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4221 case I915_FORMAT_MOD_Yf_TILED:
4222 return PLANE_CTL_TILED_YF;
4223 case I915_FORMAT_MOD_Yf_TILED_CCS:
4224 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4226 MISSING_CASE(fb_modifier);
4232 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4235 case DRM_MODE_ROTATE_0:
4238 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4239 * while i915 HW rotation is clockwise, thats why this swapping.
4241 case DRM_MODE_ROTATE_90:
4242 return PLANE_CTL_ROTATE_270;
4243 case DRM_MODE_ROTATE_180:
4244 return PLANE_CTL_ROTATE_180;
4245 case DRM_MODE_ROTATE_270:
4246 return PLANE_CTL_ROTATE_90;
4248 MISSING_CASE(rotate);
4254 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4259 case DRM_MODE_REFLECT_X:
4260 return PLANE_CTL_FLIP_HORIZONTAL;
4261 case DRM_MODE_REFLECT_Y:
4263 MISSING_CASE(reflect);
4269 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4271 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4274 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4277 if (crtc_state->gamma_enable)
4278 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4280 if (crtc_state->csc_enable)
4281 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4286 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4287 const struct intel_plane_state *plane_state)
4289 struct drm_i915_private *dev_priv =
4290 to_i915(plane_state->uapi.plane->dev);
4291 const struct drm_framebuffer *fb = plane_state->hw.fb;
4292 unsigned int rotation = plane_state->hw.rotation;
4293 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4296 plane_ctl = PLANE_CTL_ENABLE;
4298 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4299 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4300 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4302 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4303 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4305 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4306 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4309 plane_ctl |= skl_plane_ctl_format(fb->format->format);
4310 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4311 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4313 if (INTEL_GEN(dev_priv) >= 10)
4314 plane_ctl |= cnl_plane_ctl_flip(rotation &
4315 DRM_MODE_REFLECT_MASK);
4317 if (key->flags & I915_SET_COLORKEY_DESTINATION)
4318 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4319 else if (key->flags & I915_SET_COLORKEY_SOURCE)
4320 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4325 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4327 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4328 u32 plane_color_ctl = 0;
4330 if (INTEL_GEN(dev_priv) >= 11)
4331 return plane_color_ctl;
4333 if (crtc_state->gamma_enable)
4334 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4336 if (crtc_state->csc_enable)
4337 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4339 return plane_color_ctl;
4342 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4343 const struct intel_plane_state *plane_state)
4345 struct drm_i915_private *dev_priv =
4346 to_i915(plane_state->uapi.plane->dev);
4347 const struct drm_framebuffer *fb = plane_state->hw.fb;
4348 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4349 u32 plane_color_ctl = 0;
4351 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4352 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4354 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4355 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4356 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4358 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4360 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4361 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4362 } else if (fb->format->is_yuv) {
4363 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4366 return plane_color_ctl;
4370 __intel_display_resume(struct drm_device *dev,
4371 struct drm_atomic_state *state,
4372 struct drm_modeset_acquire_ctx *ctx)
4374 struct drm_crtc_state *crtc_state;
4375 struct drm_crtc *crtc;
4378 intel_modeset_setup_hw_state(dev, ctx);
4379 intel_vga_redisable(to_i915(dev));
4385 * We've duplicated the state, pointers to the old state are invalid.
4387 * Don't attempt to use the old state until we commit the duplicated state.
4389 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4391 * Force recalculation even if we restore
4392 * current state. With fast modeset this may not result
4393 * in a modeset when the state is compatible.
4395 crtc_state->mode_changed = true;
4398 /* ignore any reset values/BIOS leftovers in the WM registers */
4399 if (!HAS_GMCH(to_i915(dev)))
4400 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4402 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4404 WARN_ON(ret == -EDEADLK);
4408 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4410 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4411 intel_has_gpu_reset(&dev_priv->gt));
4414 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4416 struct drm_device *dev = &dev_priv->drm;
4417 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4418 struct drm_atomic_state *state;
4421 /* reset doesn't touch the display */
4422 if (!i915_modparams.force_reset_modeset_test &&
4423 !gpu_reset_clobbers_display(dev_priv))
4426 /* We have a modeset vs reset deadlock, defensively unbreak it. */
4427 set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4428 smp_mb__after_atomic();
4429 wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4431 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4432 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4433 intel_gt_set_wedged(&dev_priv->gt);
4437 * Need mode_config.mutex so that we don't
4438 * trample ongoing ->detect() and whatnot.
4440 mutex_lock(&dev->mode_config.mutex);
4441 drm_modeset_acquire_init(ctx, 0);
4443 ret = drm_modeset_lock_all_ctx(dev, ctx);
4444 if (ret != -EDEADLK)
4447 drm_modeset_backoff(ctx);
4450 * Disabling the crtcs gracefully seems nicer. Also the
4451 * g33 docs say we should at least disable all the planes.
4453 state = drm_atomic_helper_duplicate_state(dev, ctx);
4454 if (IS_ERR(state)) {
4455 ret = PTR_ERR(state);
4456 DRM_ERROR("Duplicating state failed with %i\n", ret);
4460 ret = drm_atomic_helper_disable_all(dev, ctx);
4462 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4463 drm_atomic_state_put(state);
4467 dev_priv->modeset_restore_state = state;
4468 state->acquire_ctx = ctx;
4471 void intel_finish_reset(struct drm_i915_private *dev_priv)
4473 struct drm_device *dev = &dev_priv->drm;
4474 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4475 struct drm_atomic_state *state;
4478 /* reset doesn't touch the display */
4479 if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4482 state = fetch_and_zero(&dev_priv->modeset_restore_state);
4486 /* reset doesn't touch the display */
4487 if (!gpu_reset_clobbers_display(dev_priv)) {
4488 /* for testing only restore the display */
4489 ret = __intel_display_resume(dev, state, ctx);
4491 DRM_ERROR("Restoring old state failed with %i\n", ret);
4494 * The display has been reset as well,
4495 * so need a full re-initialization.
4497 intel_pps_unlock_regs_wa(dev_priv);
4498 intel_modeset_init_hw(dev_priv);
4499 intel_init_clock_gating(dev_priv);
4501 spin_lock_irq(&dev_priv->irq_lock);
4502 if (dev_priv->display.hpd_irq_setup)
4503 dev_priv->display.hpd_irq_setup(dev_priv);
4504 spin_unlock_irq(&dev_priv->irq_lock);
4506 ret = __intel_display_resume(dev, state, ctx);
4508 DRM_ERROR("Restoring old state failed with %i\n", ret);
4510 intel_hpd_init(dev_priv);
4513 drm_atomic_state_put(state);
4515 drm_modeset_drop_locks(ctx);
4516 drm_modeset_acquire_fini(ctx);
4517 mutex_unlock(&dev->mode_config.mutex);
4519 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4522 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4524 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4525 enum pipe pipe = crtc->pipe;
4528 tmp = I915_READ(PIPE_CHICKEN(pipe));
4531 * Display WA #1153: icl
4532 * enable hardware to bypass the alpha math
4533 * and rounding for per-pixel values 00 and 0xff
4535 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4537 * Display WA # 1605353570: icl
4538 * Set the pixel rounding bit to 1 for allowing
4539 * passthrough of Frame buffer pixels unmodified
4542 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4543 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4546 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4548 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4549 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4550 u32 trans_ddi_func_ctl2_val;
4554 * Configure the master select and enable Transcoder Port Sync for
4555 * Slave CRTCs transcoder.
4557 if (crtc_state->master_transcoder == INVALID_TRANSCODER)
4560 if (crtc_state->master_transcoder == TRANSCODER_EDP)
4563 master_select = crtc_state->master_transcoder + 1;
4565 /* Set the master select bits for Tranascoder Port Sync */
4566 trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
4567 PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
4568 PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
4569 /* Enable Transcoder Port Sync */
4570 trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
4572 I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
4573 trans_ddi_func_ctl2_val);
4576 static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
4578 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4579 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4581 u32 trans_ddi_func_ctl2_val;
4583 if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
4586 DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
4587 transcoder_name(old_crtc_state->cpu_transcoder));
4589 reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
4590 trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
4591 PORT_SYNC_MODE_MASTER_SELECT_MASK);
4592 I915_WRITE(reg, trans_ddi_func_ctl2_val);
4595 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4597 struct drm_device *dev = crtc->base.dev;
4598 struct drm_i915_private *dev_priv = to_i915(dev);
4599 enum pipe pipe = crtc->pipe;
4603 /* enable normal train */
4604 reg = FDI_TX_CTL(pipe);
4605 temp = I915_READ(reg);
4606 if (IS_IVYBRIDGE(dev_priv)) {
4607 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4608 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4610 temp &= ~FDI_LINK_TRAIN_NONE;
4611 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4613 I915_WRITE(reg, temp);
4615 reg = FDI_RX_CTL(pipe);
4616 temp = I915_READ(reg);
4617 if (HAS_PCH_CPT(dev_priv)) {
4618 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4619 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4621 temp &= ~FDI_LINK_TRAIN_NONE;
4622 temp |= FDI_LINK_TRAIN_NONE;
4624 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4626 /* wait one idle pattern time */
4630 /* IVB wants error correction enabled */
4631 if (IS_IVYBRIDGE(dev_priv))
4632 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4633 FDI_FE_ERRC_ENABLE);
4636 /* The FDI link training functions for ILK/Ibexpeak. */
4637 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4638 const struct intel_crtc_state *crtc_state)
4640 struct drm_device *dev = crtc->base.dev;
4641 struct drm_i915_private *dev_priv = to_i915(dev);
4642 enum pipe pipe = crtc->pipe;
4646 /* FDI needs bits from pipe first */
4647 assert_pipe_enabled(dev_priv, pipe);
4649 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4651 reg = FDI_RX_IMR(pipe);
4652 temp = I915_READ(reg);
4653 temp &= ~FDI_RX_SYMBOL_LOCK;
4654 temp &= ~FDI_RX_BIT_LOCK;
4655 I915_WRITE(reg, temp);
4659 /* enable CPU FDI TX and PCH FDI RX */
4660 reg = FDI_TX_CTL(pipe);
4661 temp = I915_READ(reg);
4662 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4663 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4664 temp &= ~FDI_LINK_TRAIN_NONE;
4665 temp |= FDI_LINK_TRAIN_PATTERN_1;
4666 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4668 reg = FDI_RX_CTL(pipe);
4669 temp = I915_READ(reg);
4670 temp &= ~FDI_LINK_TRAIN_NONE;
4671 temp |= FDI_LINK_TRAIN_PATTERN_1;
4672 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4677 /* Ironlake workaround, enable clock pointer after FDI enable*/
4678 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4679 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4680 FDI_RX_PHASE_SYNC_POINTER_EN);
4682 reg = FDI_RX_IIR(pipe);
4683 for (tries = 0; tries < 5; tries++) {
4684 temp = I915_READ(reg);
4685 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4687 if ((temp & FDI_RX_BIT_LOCK)) {
4688 DRM_DEBUG_KMS("FDI train 1 done.\n");
4689 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4694 DRM_ERROR("FDI train 1 fail!\n");
4697 reg = FDI_TX_CTL(pipe);
4698 temp = I915_READ(reg);
4699 temp &= ~FDI_LINK_TRAIN_NONE;
4700 temp |= FDI_LINK_TRAIN_PATTERN_2;
4701 I915_WRITE(reg, temp);
4703 reg = FDI_RX_CTL(pipe);
4704 temp = I915_READ(reg);
4705 temp &= ~FDI_LINK_TRAIN_NONE;
4706 temp |= FDI_LINK_TRAIN_PATTERN_2;
4707 I915_WRITE(reg, temp);
4712 reg = FDI_RX_IIR(pipe);
4713 for (tries = 0; tries < 5; tries++) {
4714 temp = I915_READ(reg);
4715 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4717 if (temp & FDI_RX_SYMBOL_LOCK) {
4718 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4719 DRM_DEBUG_KMS("FDI train 2 done.\n");
4724 DRM_ERROR("FDI train 2 fail!\n");
4726 DRM_DEBUG_KMS("FDI train done\n");
4730 static const int snb_b_fdi_train_param[] = {
4731 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4732 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4733 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4734 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4737 /* The FDI link training functions for SNB/Cougarpoint. */
4738 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4739 const struct intel_crtc_state *crtc_state)
4741 struct drm_device *dev = crtc->base.dev;
4742 struct drm_i915_private *dev_priv = to_i915(dev);
4743 enum pipe pipe = crtc->pipe;
4747 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4749 reg = FDI_RX_IMR(pipe);
4750 temp = I915_READ(reg);
4751 temp &= ~FDI_RX_SYMBOL_LOCK;
4752 temp &= ~FDI_RX_BIT_LOCK;
4753 I915_WRITE(reg, temp);
4758 /* enable CPU FDI TX and PCH FDI RX */
4759 reg = FDI_TX_CTL(pipe);
4760 temp = I915_READ(reg);
4761 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4762 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4763 temp &= ~FDI_LINK_TRAIN_NONE;
4764 temp |= FDI_LINK_TRAIN_PATTERN_1;
4765 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4767 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4768 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4770 I915_WRITE(FDI_RX_MISC(pipe),
4771 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4773 reg = FDI_RX_CTL(pipe);
4774 temp = I915_READ(reg);
4775 if (HAS_PCH_CPT(dev_priv)) {
4776 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4777 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4779 temp &= ~FDI_LINK_TRAIN_NONE;
4780 temp |= FDI_LINK_TRAIN_PATTERN_1;
4782 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4787 for (i = 0; i < 4; i++) {
4788 reg = FDI_TX_CTL(pipe);
4789 temp = I915_READ(reg);
4790 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4791 temp |= snb_b_fdi_train_param[i];
4792 I915_WRITE(reg, temp);
4797 for (retry = 0; retry < 5; retry++) {
4798 reg = FDI_RX_IIR(pipe);
4799 temp = I915_READ(reg);
4800 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4801 if (temp & FDI_RX_BIT_LOCK) {
4802 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4803 DRM_DEBUG_KMS("FDI train 1 done.\n");
4812 DRM_ERROR("FDI train 1 fail!\n");
4815 reg = FDI_TX_CTL(pipe);
4816 temp = I915_READ(reg);
4817 temp &= ~FDI_LINK_TRAIN_NONE;
4818 temp |= FDI_LINK_TRAIN_PATTERN_2;
4819 if (IS_GEN(dev_priv, 6)) {
4820 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4822 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4824 I915_WRITE(reg, temp);
4826 reg = FDI_RX_CTL(pipe);
4827 temp = I915_READ(reg);
4828 if (HAS_PCH_CPT(dev_priv)) {
4829 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4830 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4832 temp &= ~FDI_LINK_TRAIN_NONE;
4833 temp |= FDI_LINK_TRAIN_PATTERN_2;
4835 I915_WRITE(reg, temp);
4840 for (i = 0; i < 4; i++) {
4841 reg = FDI_TX_CTL(pipe);
4842 temp = I915_READ(reg);
4843 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4844 temp |= snb_b_fdi_train_param[i];
4845 I915_WRITE(reg, temp);
4850 for (retry = 0; retry < 5; retry++) {
4851 reg = FDI_RX_IIR(pipe);
4852 temp = I915_READ(reg);
4853 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4854 if (temp & FDI_RX_SYMBOL_LOCK) {
4855 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4856 DRM_DEBUG_KMS("FDI train 2 done.\n");
4865 DRM_ERROR("FDI train 2 fail!\n");
4867 DRM_DEBUG_KMS("FDI train done.\n");
4870 /* Manual link training for Ivy Bridge A0 parts */
4871 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4872 const struct intel_crtc_state *crtc_state)
4874 struct drm_device *dev = crtc->base.dev;
4875 struct drm_i915_private *dev_priv = to_i915(dev);
4876 enum pipe pipe = crtc->pipe;
4880 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4882 reg = FDI_RX_IMR(pipe);
4883 temp = I915_READ(reg);
4884 temp &= ~FDI_RX_SYMBOL_LOCK;
4885 temp &= ~FDI_RX_BIT_LOCK;
4886 I915_WRITE(reg, temp);
4891 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4892 I915_READ(FDI_RX_IIR(pipe)));
4894 /* Try each vswing and preemphasis setting twice before moving on */
4895 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4896 /* disable first in case we need to retry */
4897 reg = FDI_TX_CTL(pipe);
4898 temp = I915_READ(reg);
4899 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4900 temp &= ~FDI_TX_ENABLE;
4901 I915_WRITE(reg, temp);
4903 reg = FDI_RX_CTL(pipe);
4904 temp = I915_READ(reg);
4905 temp &= ~FDI_LINK_TRAIN_AUTO;
4906 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4907 temp &= ~FDI_RX_ENABLE;
4908 I915_WRITE(reg, temp);
4910 /* enable CPU FDI TX and PCH FDI RX */
4911 reg = FDI_TX_CTL(pipe);
4912 temp = I915_READ(reg);
4913 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4914 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4915 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4916 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4917 temp |= snb_b_fdi_train_param[j/2];
4918 temp |= FDI_COMPOSITE_SYNC;
4919 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4921 I915_WRITE(FDI_RX_MISC(pipe),
4922 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4924 reg = FDI_RX_CTL(pipe);
4925 temp = I915_READ(reg);
4926 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4927 temp |= FDI_COMPOSITE_SYNC;
4928 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4931 udelay(1); /* should be 0.5us */
4933 for (i = 0; i < 4; i++) {
4934 reg = FDI_RX_IIR(pipe);
4935 temp = I915_READ(reg);
4936 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4938 if (temp & FDI_RX_BIT_LOCK ||
4939 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4940 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4941 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4945 udelay(1); /* should be 0.5us */
4948 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4953 reg = FDI_TX_CTL(pipe);
4954 temp = I915_READ(reg);
4955 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4956 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4957 I915_WRITE(reg, temp);
4959 reg = FDI_RX_CTL(pipe);
4960 temp = I915_READ(reg);
4961 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4962 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4963 I915_WRITE(reg, temp);
4966 udelay(2); /* should be 1.5us */
4968 for (i = 0; i < 4; i++) {
4969 reg = FDI_RX_IIR(pipe);
4970 temp = I915_READ(reg);
4971 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4973 if (temp & FDI_RX_SYMBOL_LOCK ||
4974 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4975 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4976 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4980 udelay(2); /* should be 1.5us */
4983 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4987 DRM_DEBUG_KMS("FDI train done.\n");
4990 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4992 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4993 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4994 enum pipe pipe = intel_crtc->pipe;
4998 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4999 reg = FDI_RX_CTL(pipe);
5000 temp = I915_READ(reg);
5001 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5002 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5003 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5004 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
5009 /* Switch from Rawclk to PCDclk */
5010 temp = I915_READ(reg);
5011 I915_WRITE(reg, temp | FDI_PCDCLK);
5016 /* Enable CPU FDI TX PLL, always on for Ironlake */
5017 reg = FDI_TX_CTL(pipe);
5018 temp = I915_READ(reg);
5019 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5020 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5027 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
5029 struct drm_device *dev = intel_crtc->base.dev;
5030 struct drm_i915_private *dev_priv = to_i915(dev);
5031 enum pipe pipe = intel_crtc->pipe;
5035 /* Switch from PCDclk to Rawclk */
5036 reg = FDI_RX_CTL(pipe);
5037 temp = I915_READ(reg);
5038 I915_WRITE(reg, temp & ~FDI_PCDCLK);
5040 /* Disable CPU FDI TX PLL */
5041 reg = FDI_TX_CTL(pipe);
5042 temp = I915_READ(reg);
5043 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
5048 reg = FDI_RX_CTL(pipe);
5049 temp = I915_READ(reg);
5050 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
5052 /* Wait for the clocks to turn off. */
5057 static void ironlake_fdi_disable(struct drm_crtc *crtc)
5059 struct drm_device *dev = crtc->dev;
5060 struct drm_i915_private *dev_priv = to_i915(dev);
5061 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5062 enum pipe pipe = intel_crtc->pipe;
5066 /* disable CPU FDI tx and PCH FDI rx */
5067 reg = FDI_TX_CTL(pipe);
5068 temp = I915_READ(reg);
5069 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
5072 reg = FDI_RX_CTL(pipe);
5073 temp = I915_READ(reg);
5074 temp &= ~(0x7 << 16);
5075 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5076 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
5081 /* Ironlake workaround, disable clock pointer after downing FDI */
5082 if (HAS_PCH_IBX(dev_priv))
5083 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
5085 /* still set train pattern 1 */
5086 reg = FDI_TX_CTL(pipe);
5087 temp = I915_READ(reg);
5088 temp &= ~FDI_LINK_TRAIN_NONE;
5089 temp |= FDI_LINK_TRAIN_PATTERN_1;
5090 I915_WRITE(reg, temp);
5092 reg = FDI_RX_CTL(pipe);
5093 temp = I915_READ(reg);
5094 if (HAS_PCH_CPT(dev_priv)) {
5095 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5096 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5098 temp &= ~FDI_LINK_TRAIN_NONE;
5099 temp |= FDI_LINK_TRAIN_PATTERN_1;
5101 /* BPC in FDI rx is consistent with that in PIPECONF */
5102 temp &= ~(0x07 << 16);
5103 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5104 I915_WRITE(reg, temp);
5110 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5112 struct drm_crtc *crtc;
5115 drm_for_each_crtc(crtc, &dev_priv->drm) {
5116 struct drm_crtc_commit *commit;
5117 spin_lock(&crtc->commit_lock);
5118 commit = list_first_entry_or_null(&crtc->commit_list,
5119 struct drm_crtc_commit, commit_entry);
5120 cleanup_done = commit ?
5121 try_wait_for_completion(&commit->cleanup_done) : true;
5122 spin_unlock(&crtc->commit_lock);
5127 drm_crtc_wait_one_vblank(crtc);
5135 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5139 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
5141 mutex_lock(&dev_priv->sb_lock);
5143 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5144 temp |= SBI_SSCCTL_DISABLE;
5145 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5147 mutex_unlock(&dev_priv->sb_lock);
5150 /* Program iCLKIP clock to the desired frequency */
5151 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5153 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5154 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5155 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5156 u32 divsel, phaseinc, auxdiv, phasedir = 0;
5159 lpt_disable_iclkip(dev_priv);
5161 /* The iCLK virtual clock root frequency is in MHz,
5162 * but the adjusted_mode->crtc_clock in in KHz. To get the
5163 * divisors, it is necessary to divide one by another, so we
5164 * convert the virtual clock precision to KHz here for higher
5167 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5168 u32 iclk_virtual_root_freq = 172800 * 1000;
5169 u32 iclk_pi_range = 64;
5170 u32 desired_divisor;
5172 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5174 divsel = (desired_divisor / iclk_pi_range) - 2;
5175 phaseinc = desired_divisor % iclk_pi_range;
5178 * Near 20MHz is a corner case which is
5179 * out of range for the 7-bit divisor
5185 /* This should not happen with any sane values */
5186 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5187 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5188 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5189 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5191 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5198 mutex_lock(&dev_priv->sb_lock);
5200 /* Program SSCDIVINTPHASE6 */
5201 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5202 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5203 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5204 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5205 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5206 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5207 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5208 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5210 /* Program SSCAUXDIV */
5211 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5212 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5213 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5214 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5216 /* Enable modulator and associated divider */
5217 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5218 temp &= ~SBI_SSCCTL_DISABLE;
5219 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5221 mutex_unlock(&dev_priv->sb_lock);
5223 /* Wait for initialization time */
5226 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5229 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5231 u32 divsel, phaseinc, auxdiv;
5232 u32 iclk_virtual_root_freq = 172800 * 1000;
5233 u32 iclk_pi_range = 64;
5234 u32 desired_divisor;
5237 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5240 mutex_lock(&dev_priv->sb_lock);
5242 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5243 if (temp & SBI_SSCCTL_DISABLE) {
5244 mutex_unlock(&dev_priv->sb_lock);
5248 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5249 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5250 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5251 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5252 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5254 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5255 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5256 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5258 mutex_unlock(&dev_priv->sb_lock);
5260 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5262 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5263 desired_divisor << auxdiv);
5266 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5267 enum pipe pch_transcoder)
5269 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5270 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5271 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5273 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5274 I915_READ(HTOTAL(cpu_transcoder)));
5275 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5276 I915_READ(HBLANK(cpu_transcoder)));
5277 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5278 I915_READ(HSYNC(cpu_transcoder)));
5280 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5281 I915_READ(VTOTAL(cpu_transcoder)));
5282 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5283 I915_READ(VBLANK(cpu_transcoder)));
5284 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5285 I915_READ(VSYNC(cpu_transcoder)));
5286 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5287 I915_READ(VSYNCSHIFT(cpu_transcoder)));
5290 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5294 temp = I915_READ(SOUTH_CHICKEN1);
5295 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5298 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5299 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5301 temp &= ~FDI_BC_BIFURCATION_SELECT;
5303 temp |= FDI_BC_BIFURCATION_SELECT;
5305 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5306 I915_WRITE(SOUTH_CHICKEN1, temp);
5307 POSTING_READ(SOUTH_CHICKEN1);
5310 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5312 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5313 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5315 switch (crtc->pipe) {
5319 if (crtc_state->fdi_lanes > 2)
5320 cpt_set_fdi_bc_bifurcation(dev_priv, false);
5322 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5326 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5335 * Finds the encoder associated with the given CRTC. This can only be
5336 * used when we know that the CRTC isn't feeding multiple encoders!
5338 static struct intel_encoder *
5339 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5340 const struct intel_crtc_state *crtc_state)
5342 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5343 const struct drm_connector_state *connector_state;
5344 const struct drm_connector *connector;
5345 struct intel_encoder *encoder = NULL;
5346 int num_encoders = 0;
5349 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5350 if (connector_state->crtc != &crtc->base)
5353 encoder = to_intel_encoder(connector_state->best_encoder);
5357 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5358 num_encoders, pipe_name(crtc->pipe));
5364 * Enable PCH resources required for PCH ports:
5366 * - FDI training & RX/TX
5367 * - update transcoder timings
5368 * - DP transcoding bits
5371 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5372 const struct intel_crtc_state *crtc_state)
5374 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5375 struct drm_device *dev = crtc->base.dev;
5376 struct drm_i915_private *dev_priv = to_i915(dev);
5377 enum pipe pipe = crtc->pipe;
5380 assert_pch_transcoder_disabled(dev_priv, pipe);
5382 if (IS_IVYBRIDGE(dev_priv))
5383 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5385 /* Write the TU size bits before fdi link training, so that error
5386 * detection works. */
5387 I915_WRITE(FDI_RX_TUSIZE1(pipe),
5388 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5390 /* For PCH output, training FDI link */
5391 dev_priv->display.fdi_link_train(crtc, crtc_state);
5393 /* We need to program the right clock selection before writing the pixel
5394 * mutliplier into the DPLL. */
5395 if (HAS_PCH_CPT(dev_priv)) {
5398 temp = I915_READ(PCH_DPLL_SEL);
5399 temp |= TRANS_DPLL_ENABLE(pipe);
5400 sel = TRANS_DPLLB_SEL(pipe);
5401 if (crtc_state->shared_dpll ==
5402 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5406 I915_WRITE(PCH_DPLL_SEL, temp);
5409 /* XXX: pch pll's can be enabled any time before we enable the PCH
5410 * transcoder, and we actually should do this to not upset any PCH
5411 * transcoder that already use the clock when we share it.
5413 * Note that enable_shared_dpll tries to do the right thing, but
5414 * get_shared_dpll unconditionally resets the pll - we need that to have
5415 * the right LVDS enable sequence. */
5416 intel_enable_shared_dpll(crtc_state);
5418 /* set transcoder timing, panel must allow it */
5419 assert_panel_unlocked(dev_priv, pipe);
5420 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5422 intel_fdi_normal_train(crtc);
5424 /* For PCH DP, enable TRANS_DP_CTL */
5425 if (HAS_PCH_CPT(dev_priv) &&
5426 intel_crtc_has_dp_encoder(crtc_state)) {
5427 const struct drm_display_mode *adjusted_mode =
5428 &crtc_state->hw.adjusted_mode;
5429 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5430 i915_reg_t reg = TRANS_DP_CTL(pipe);
5433 temp = I915_READ(reg);
5434 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5435 TRANS_DP_SYNC_MASK |
5437 temp |= TRANS_DP_OUTPUT_ENABLE;
5438 temp |= bpc << 9; /* same format but at 11:9 */
5440 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5441 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5442 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5443 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5445 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5446 WARN_ON(port < PORT_B || port > PORT_D);
5447 temp |= TRANS_DP_PORT_SEL(port);
5449 I915_WRITE(reg, temp);
5452 ironlake_enable_pch_transcoder(crtc_state);
5455 static void lpt_pch_enable(const struct intel_atomic_state *state,
5456 const struct intel_crtc_state *crtc_state)
5458 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5459 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5460 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5462 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5464 lpt_program_iclkip(crtc_state);
5466 /* Set transcoder timing. */
5467 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5469 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5472 static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
5474 struct drm_i915_private *dev_priv = to_i915(dev);
5475 i915_reg_t dslreg = PIPEDSL(pipe);
5478 temp = I915_READ(dslreg);
5480 if (wait_for(I915_READ(dslreg) != temp, 5)) {
5481 if (wait_for(I915_READ(dslreg) != temp, 5))
5482 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5487 * The hardware phase 0.0 refers to the center of the pixel.
5488 * We want to start from the top/left edge which is phase
5489 * -0.5. That matches how the hardware calculates the scaling
5490 * factors (from top-left of the first pixel to bottom-right
5491 * of the last pixel, as opposed to the pixel centers).
5493 * For 4:2:0 subsampled chroma planes we obviously have to
5494 * adjust that so that the chroma sample position lands in
5497 * Note that for packed YCbCr 4:2:2 formats there is no way to
5498 * control chroma siting. The hardware simply replicates the
5499 * chroma samples for both of the luma samples, and thus we don't
5500 * actually get the expected MPEG2 chroma siting convention :(
5501 * The same behaviour is observed on pre-SKL platforms as well.
5503 * Theory behind the formula (note that we ignore sub-pixel
5504 * source coordinates):
5505 * s = source sample position
5506 * d = destination sample position
5511 * | | 1.5 (initial phase)
5519 * | -0.375 (initial phase)
5526 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5528 int phase = -0x8000;
5532 phase += (sub - 1) * 0x8000 / sub;
5534 phase += scale / (2 * sub);
5537 * Hardware initial phase limited to [-0.5:1.5].
5538 * Since the max hardware scale factor is 3.0, we
5539 * should never actually excdeed 1.0 here.
5541 WARN_ON(phase < -0x8000 || phase > 0x18000);
5544 phase = 0x10000 + phase;
5546 trip = PS_PHASE_TRIP;
5548 return ((phase >> 2) & PS_PHASE_MASK) | trip;
5551 #define SKL_MIN_SRC_W 8
5552 #define SKL_MAX_SRC_W 4096
5553 #define SKL_MIN_SRC_H 8
5554 #define SKL_MAX_SRC_H 4096
5555 #define SKL_MIN_DST_W 8
5556 #define SKL_MAX_DST_W 4096
5557 #define SKL_MIN_DST_H 8
5558 #define SKL_MAX_DST_H 4096
5559 #define ICL_MAX_SRC_W 5120
5560 #define ICL_MAX_SRC_H 4096
5561 #define ICL_MAX_DST_W 5120
5562 #define ICL_MAX_DST_H 4096
5563 #define SKL_MIN_YUV_420_SRC_W 16
5564 #define SKL_MIN_YUV_420_SRC_H 16
5567 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5568 unsigned int scaler_user, int *scaler_id,
5569 int src_w, int src_h, int dst_w, int dst_h,
5570 const struct drm_format_info *format, bool need_scaler)
5572 struct intel_crtc_scaler_state *scaler_state =
5573 &crtc_state->scaler_state;
5574 struct intel_crtc *intel_crtc =
5575 to_intel_crtc(crtc_state->uapi.crtc);
5576 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5577 const struct drm_display_mode *adjusted_mode =
5578 &crtc_state->hw.adjusted_mode;
5581 * Src coordinates are already rotated by 270 degrees for
5582 * the 90/270 degree plane rotation cases (to match the
5583 * GTT mapping), hence no need to account for rotation here.
5585 if (src_w != dst_w || src_h != dst_h)
5589 * Scaling/fitting not supported in IF-ID mode in GEN9+
5590 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5591 * Once NV12 is enabled, handle it here while allocating scaler
5594 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
5595 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5596 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5601 * if plane is being disabled or scaler is no more required or force detach
5602 * - free scaler binded to this plane/crtc
5603 * - in order to do this, update crtc->scaler_usage
5605 * Here scaler state in crtc_state is set free so that
5606 * scaler can be assigned to other user. Actual register
5607 * update to free the scaler is done in plane/panel-fit programming.
5608 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5610 if (force_detach || !need_scaler) {
5611 if (*scaler_id >= 0) {
5612 scaler_state->scaler_users &= ~(1 << scaler_user);
5613 scaler_state->scalers[*scaler_id].in_use = 0;
5615 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5616 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5617 intel_crtc->pipe, scaler_user, *scaler_id,
5618 scaler_state->scaler_users);
5624 if (format && drm_format_info_is_yuv_semiplanar(format) &&
5625 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5626 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5631 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5632 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5633 (INTEL_GEN(dev_priv) >= 11 &&
5634 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5635 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5636 (INTEL_GEN(dev_priv) < 11 &&
5637 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5638 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5639 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5640 "size is out of scaler range\n",
5641 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5645 /* mark this plane as a scaler user in crtc_state */
5646 scaler_state->scaler_users |= (1 << scaler_user);
5647 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5648 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5649 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5650 scaler_state->scaler_users);
5656 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5658 * @state: crtc's scaler state
5661 * 0 - scaler_usage updated successfully
5662 * error - requested scaling cannot be supported or other error condition
5664 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5666 const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
5667 bool need_scaler = false;
5669 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5672 return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
5673 &state->scaler_state.scaler_id,
5674 state->pipe_src_w, state->pipe_src_h,
5675 adjusted_mode->crtc_hdisplay,
5676 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5680 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5681 * @crtc_state: crtc's scaler state
5682 * @plane_state: atomic plane state to update
5685 * 0 - scaler_usage updated successfully
5686 * error - requested scaling cannot be supported or other error condition
5688 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5689 struct intel_plane_state *plane_state)
5691 struct intel_plane *intel_plane =
5692 to_intel_plane(plane_state->uapi.plane);
5693 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5694 struct drm_framebuffer *fb = plane_state->hw.fb;
5696 bool force_detach = !fb || !plane_state->uapi.visible;
5697 bool need_scaler = false;
5699 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5700 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5701 fb && drm_format_info_is_yuv_semiplanar(fb->format))
5704 ret = skl_update_scaler(crtc_state, force_detach,
5705 drm_plane_index(&intel_plane->base),
5706 &plane_state->scaler_id,
5707 drm_rect_width(&plane_state->uapi.src) >> 16,
5708 drm_rect_height(&plane_state->uapi.src) >> 16,
5709 drm_rect_width(&plane_state->uapi.dst),
5710 drm_rect_height(&plane_state->uapi.dst),
5711 fb ? fb->format : NULL, need_scaler);
5713 if (ret || plane_state->scaler_id < 0)
5716 /* check colorkey */
5717 if (plane_state->ckey.flags) {
5718 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5719 intel_plane->base.base.id,
5720 intel_plane->base.name);
5724 /* Check src format */
5725 switch (fb->format->format) {
5726 case DRM_FORMAT_RGB565:
5727 case DRM_FORMAT_XBGR8888:
5728 case DRM_FORMAT_XRGB8888:
5729 case DRM_FORMAT_ABGR8888:
5730 case DRM_FORMAT_ARGB8888:
5731 case DRM_FORMAT_XRGB2101010:
5732 case DRM_FORMAT_XBGR2101010:
5733 case DRM_FORMAT_ARGB2101010:
5734 case DRM_FORMAT_ABGR2101010:
5735 case DRM_FORMAT_YUYV:
5736 case DRM_FORMAT_YVYU:
5737 case DRM_FORMAT_UYVY:
5738 case DRM_FORMAT_VYUY:
5739 case DRM_FORMAT_NV12:
5740 case DRM_FORMAT_P010:
5741 case DRM_FORMAT_P012:
5742 case DRM_FORMAT_P016:
5743 case DRM_FORMAT_Y210:
5744 case DRM_FORMAT_Y212:
5745 case DRM_FORMAT_Y216:
5746 case DRM_FORMAT_XVYU2101010:
5747 case DRM_FORMAT_XVYU12_16161616:
5748 case DRM_FORMAT_XVYU16161616:
5750 case DRM_FORMAT_XBGR16161616F:
5751 case DRM_FORMAT_ABGR16161616F:
5752 case DRM_FORMAT_XRGB16161616F:
5753 case DRM_FORMAT_ARGB16161616F:
5754 if (INTEL_GEN(dev_priv) >= 11)
5758 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5759 intel_plane->base.base.id, intel_plane->base.name,
5760 fb->base.id, fb->format->format);
5767 static void skylake_scaler_disable(struct intel_crtc *crtc)
5771 for (i = 0; i < crtc->num_scalers; i++)
5772 skl_detach_scaler(crtc, i);
5775 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5777 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5778 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5779 enum pipe pipe = crtc->pipe;
5780 const struct intel_crtc_scaler_state *scaler_state =
5781 &crtc_state->scaler_state;
5783 if (crtc_state->pch_pfit.enabled) {
5784 u16 uv_rgb_hphase, uv_rgb_vphase;
5785 int pfit_w, pfit_h, hscale, vscale;
5788 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5791 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5792 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5794 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5795 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5797 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5798 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5800 id = scaler_state->scaler_id;
5801 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5802 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5803 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5804 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5805 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5806 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5807 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5808 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5812 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5814 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5815 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5816 enum pipe pipe = crtc->pipe;
5818 if (crtc_state->pch_pfit.enabled) {
5819 /* Force use of hard-coded filter coefficients
5820 * as some pre-programmed values are broken,
5823 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5824 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5825 PF_PIPE_SEL_IVB(pipe));
5827 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5828 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5829 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5833 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5835 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5836 struct drm_device *dev = crtc->base.dev;
5837 struct drm_i915_private *dev_priv = to_i915(dev);
5839 if (!crtc_state->ips_enabled)
5843 * We can only enable IPS after we enable a plane and wait for a vblank
5844 * This function is called from post_plane_update, which is run after
5847 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5849 if (IS_BROADWELL(dev_priv)) {
5850 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5851 IPS_ENABLE | IPS_PCODE_CONTROL));
5852 /* Quoting Art Runyan: "its not safe to expect any particular
5853 * value in IPS_CTL bit 31 after enabling IPS through the
5854 * mailbox." Moreover, the mailbox may return a bogus state,
5855 * so we need to just enable it and continue on.
5858 I915_WRITE(IPS_CTL, IPS_ENABLE);
5859 /* The bit only becomes 1 in the next vblank, so this wait here
5860 * is essentially intel_wait_for_vblank. If we don't have this
5861 * and don't wait for vblanks until the end of crtc_enable, then
5862 * the HW state readout code will complain that the expected
5863 * IPS_CTL value is not the one we read. */
5864 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5865 DRM_ERROR("Timed out waiting for IPS enable\n");
5869 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5871 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5872 struct drm_device *dev = crtc->base.dev;
5873 struct drm_i915_private *dev_priv = to_i915(dev);
5875 if (!crtc_state->ips_enabled)
5878 if (IS_BROADWELL(dev_priv)) {
5879 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5881 * Wait for PCODE to finish disabling IPS. The BSpec specified
5882 * 42ms timeout value leads to occasional timeouts so use 100ms
5885 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5886 DRM_ERROR("Timed out waiting for IPS disable\n");
5888 I915_WRITE(IPS_CTL, 0);
5889 POSTING_READ(IPS_CTL);
5892 /* We need to wait for a vblank before we can disable the plane. */
5893 intel_wait_for_vblank(dev_priv, crtc->pipe);
5896 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5898 if (intel_crtc->overlay)
5899 (void) intel_overlay_switch_off(intel_crtc->overlay);
5901 /* Let userspace switch the overlay on again. In most cases userspace
5902 * has to recompute where to put it anyway.
5907 * intel_post_enable_primary - Perform operations after enabling primary plane
5908 * @crtc: the CRTC whose primary plane was just enabled
5909 * @new_crtc_state: the enabling state
5911 * Performs potentially sleeping operations that must be done after the primary
5912 * plane is enabled, such as updating FBC and IPS. Note that this may be
5913 * called due to an explicit primary plane update, or due to an implicit
5914 * re-enable that is caused when a sprite plane is updated to no longer
5915 * completely hide the primary plane.
5918 intel_post_enable_primary(struct drm_crtc *crtc,
5919 const struct intel_crtc_state *new_crtc_state)
5921 struct drm_device *dev = crtc->dev;
5922 struct drm_i915_private *dev_priv = to_i915(dev);
5923 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5924 enum pipe pipe = intel_crtc->pipe;
5927 * Gen2 reports pipe underruns whenever all planes are disabled.
5928 * So don't enable underrun reporting before at least some planes
5930 * FIXME: Need to fix the logic to work when we turn off all planes
5931 * but leave the pipe running.
5933 if (IS_GEN(dev_priv, 2))
5934 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5936 /* Underruns don't always raise interrupts, so check manually. */
5937 intel_check_cpu_fifo_underruns(dev_priv);
5938 intel_check_pch_fifo_underruns(dev_priv);
5941 /* FIXME get rid of this and use pre_plane_update */
5943 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5945 struct drm_device *dev = crtc->dev;
5946 struct drm_i915_private *dev_priv = to_i915(dev);
5947 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5948 enum pipe pipe = intel_crtc->pipe;
5951 * Gen2 reports pipe underruns whenever all planes are disabled.
5952 * So disable underrun reporting before all the planes get disabled.
5954 if (IS_GEN(dev_priv, 2))
5955 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5957 hsw_disable_ips(to_intel_crtc_state(crtc->state));
5960 * Vblank time updates from the shadow to live plane control register
5961 * are blocked if the memory self-refresh mode is active at that
5962 * moment. So to make sure the plane gets truly disabled, disable
5963 * first the self-refresh mode. The self-refresh enable bit in turn
5964 * will be checked/applied by the HW only at the next frame start
5965 * event which is after the vblank start event, so we need to have a
5966 * wait-for-vblank between disabling the plane and the pipe.
5968 if (HAS_GMCH(dev_priv) &&
5969 intel_set_memory_cxsr(dev_priv, false))
5970 intel_wait_for_vblank(dev_priv, pipe);
5973 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5974 const struct intel_crtc_state *new_crtc_state)
5976 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5977 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5979 if (!old_crtc_state->ips_enabled)
5982 if (needs_modeset(new_crtc_state))
5986 * Workaround : Do not read or write the pipe palette/gamma data while
5987 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5989 * Disable IPS before we program the LUT.
5991 if (IS_HASWELL(dev_priv) &&
5992 (new_crtc_state->uapi.color_mgmt_changed ||
5993 new_crtc_state->update_pipe) &&
5994 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5997 return !new_crtc_state->ips_enabled;
6000 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6001 const struct intel_crtc_state *new_crtc_state)
6003 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6004 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6006 if (!new_crtc_state->ips_enabled)
6009 if (needs_modeset(new_crtc_state))
6013 * Workaround : Do not read or write the pipe palette/gamma data while
6014 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6016 * Re-enable IPS after the LUT has been programmed.
6018 if (IS_HASWELL(dev_priv) &&
6019 (new_crtc_state->uapi.color_mgmt_changed ||
6020 new_crtc_state->update_pipe) &&
6021 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6025 * We can't read out IPS on broadwell, assume the worst and
6026 * forcibly enable IPS on the first fastset.
6028 if (new_crtc_state->update_pipe &&
6029 old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
6032 return !old_crtc_state->ips_enabled;
6035 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
6036 const struct intel_crtc_state *crtc_state)
6038 if (!crtc_state->nv12_planes)
6041 /* WA Display #0827: Gen9:all */
6042 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6048 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
6049 const struct intel_crtc_state *crtc_state)
6051 /* Wa_2006604312:icl */
6052 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
6058 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
6060 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6061 struct drm_device *dev = crtc->base.dev;
6062 struct drm_i915_private *dev_priv = to_i915(dev);
6063 struct drm_atomic_state *state = old_crtc_state->uapi.state;
6064 struct intel_crtc_state *pipe_config =
6065 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
6067 struct drm_plane *primary = crtc->base.primary;
6068 struct drm_plane_state *old_primary_state =
6069 drm_atomic_get_old_plane_state(state, primary);
6071 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
6073 if (pipe_config->update_wm_post && pipe_config->hw.active)
6074 intel_update_watermarks(crtc);
6076 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
6077 hsw_enable_ips(pipe_config);
6079 if (old_primary_state) {
6080 struct drm_plane_state *new_primary_state =
6081 drm_atomic_get_new_plane_state(state, primary);
6083 intel_fbc_post_update(crtc);
6085 if (new_primary_state->visible &&
6086 (needs_modeset(pipe_config) ||
6087 !old_primary_state->visible))
6088 intel_post_enable_primary(&crtc->base, pipe_config);
6091 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
6092 !needs_nv12_wa(dev_priv, pipe_config))
6093 skl_wa_827(dev_priv, crtc->pipe, false);
6095 if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6096 !needs_scalerclk_wa(dev_priv, pipe_config))
6097 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
6100 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
6101 struct intel_crtc_state *pipe_config)
6103 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6104 struct drm_device *dev = crtc->base.dev;
6105 struct drm_i915_private *dev_priv = to_i915(dev);
6106 struct drm_atomic_state *state = old_crtc_state->uapi.state;
6107 struct drm_plane *primary = crtc->base.primary;
6108 struct drm_plane_state *old_primary_state =
6109 drm_atomic_get_old_plane_state(state, primary);
6110 bool modeset = needs_modeset(pipe_config);
6111 struct intel_atomic_state *intel_state =
6112 to_intel_atomic_state(state);
6114 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
6115 hsw_disable_ips(old_crtc_state);
6117 if (old_primary_state) {
6118 struct intel_plane_state *new_primary_state =
6119 intel_atomic_get_new_plane_state(intel_state,
6120 to_intel_plane(primary));
6122 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
6124 * Gen2 reports pipe underruns whenever all planes are disabled.
6125 * So disable underrun reporting before all the planes get disabled.
6127 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
6128 (modeset || !new_primary_state->uapi.visible))
6129 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
6132 /* Display WA 827 */
6133 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
6134 needs_nv12_wa(dev_priv, pipe_config))
6135 skl_wa_827(dev_priv, crtc->pipe, true);
6137 /* Wa_2006604312:icl */
6138 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6139 needs_scalerclk_wa(dev_priv, pipe_config))
6140 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
6143 * Vblank time updates from the shadow to live plane control register
6144 * are blocked if the memory self-refresh mode is active at that
6145 * moment. So to make sure the plane gets truly disabled, disable
6146 * first the self-refresh mode. The self-refresh enable bit in turn
6147 * will be checked/applied by the HW only at the next frame start
6148 * event which is after the vblank start event, so we need to have a
6149 * wait-for-vblank between disabling the plane and the pipe.
6151 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6152 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6153 intel_wait_for_vblank(dev_priv, crtc->pipe);
6156 * IVB workaround: must disable low power watermarks for at least
6157 * one frame before enabling scaling. LP watermarks can be re-enabled
6158 * when scaling is disabled.
6160 * WaCxSRDisabledForSpriteScaling:ivb
6162 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
6163 old_crtc_state->hw.active)
6164 intel_wait_for_vblank(dev_priv, crtc->pipe);
6167 * If we're doing a modeset, we're done. No need to do any pre-vblank
6168 * watermark programming here.
6170 if (needs_modeset(pipe_config))
6174 * For platforms that support atomic watermarks, program the
6175 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
6176 * will be the intermediate values that are safe for both pre- and
6177 * post- vblank; when vblank happens, the 'active' values will be set
6178 * to the final 'target' values and we'll do this again to get the
6179 * optimal watermarks. For gen9+ platforms, the values we program here
6180 * will be the final target values which will get automatically latched
6181 * at vblank time; no further programming will be necessary.
6183 * If a platform hasn't been transitioned to atomic watermarks yet,
6184 * we'll continue to update watermarks the old way, if flags tell
6187 if (dev_priv->display.initial_watermarks != NULL)
6188 dev_priv->display.initial_watermarks(intel_state,
6190 else if (pipe_config->update_wm_pre)
6191 intel_update_watermarks(crtc);
6194 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6195 struct intel_crtc *crtc)
6197 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6198 const struct intel_crtc_state *new_crtc_state =
6199 intel_atomic_get_new_crtc_state(state, crtc);
6200 unsigned int update_mask = new_crtc_state->update_planes;
6201 const struct intel_plane_state *old_plane_state;
6202 struct intel_plane *plane;
6203 unsigned fb_bits = 0;
6206 intel_crtc_dpms_overlay_disable(crtc);
6208 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6209 if (crtc->pipe != plane->pipe ||
6210 !(update_mask & BIT(plane->id)))
6213 intel_disable_plane(plane, new_crtc_state);
6215 if (old_plane_state->uapi.visible)
6216 fb_bits |= plane->frontbuffer_bit;
6219 intel_frontbuffer_flip(dev_priv, fb_bits);
6223 * intel_connector_primary_encoder - get the primary encoder for a connector
6224 * @connector: connector for which to return the encoder
6226 * Returns the primary encoder for a connector. There is a 1:1 mapping from
6227 * all connectors to their encoder, except for DP-MST connectors which have
6228 * both a virtual and a primary encoder. These DP-MST primary encoders can be
6229 * pointed to by as many DP-MST connectors as there are pipes.
6231 static struct intel_encoder *
6232 intel_connector_primary_encoder(struct intel_connector *connector)
6234 struct intel_encoder *encoder;
6236 if (connector->mst_port)
6237 return &dp_to_dig_port(connector->mst_port)->base;
6239 encoder = intel_attached_encoder(&connector->base);
6246 intel_connector_needs_modeset(struct intel_atomic_state *state,
6247 const struct drm_connector_state *old_conn_state,
6248 const struct drm_connector_state *new_conn_state)
6250 struct intel_crtc *old_crtc = old_conn_state->crtc ?
6251 to_intel_crtc(old_conn_state->crtc) : NULL;
6252 struct intel_crtc *new_crtc = new_conn_state->crtc ?
6253 to_intel_crtc(new_conn_state->crtc) : NULL;
6255 return new_crtc != old_crtc ||
6257 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6260 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6262 struct drm_connector_state *old_conn_state;
6263 struct drm_connector_state *new_conn_state;
6264 struct drm_connector *conn;
6267 for_each_oldnew_connector_in_state(&state->base, conn,
6268 old_conn_state, new_conn_state, i) {
6269 struct intel_encoder *encoder;
6270 struct intel_crtc *crtc;
6272 if (!intel_connector_needs_modeset(state,
6277 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6278 if (!encoder->update_prepare)
6281 crtc = new_conn_state->crtc ?
6282 to_intel_crtc(new_conn_state->crtc) : NULL;
6283 encoder->update_prepare(state, encoder, crtc);
6287 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6289 struct drm_connector_state *old_conn_state;
6290 struct drm_connector_state *new_conn_state;
6291 struct drm_connector *conn;
6294 for_each_oldnew_connector_in_state(&state->base, conn,
6295 old_conn_state, new_conn_state, i) {
6296 struct intel_encoder *encoder;
6297 struct intel_crtc *crtc;
6299 if (!intel_connector_needs_modeset(state,
6304 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6305 if (!encoder->update_complete)
6308 crtc = new_conn_state->crtc ?
6309 to_intel_crtc(new_conn_state->crtc) : NULL;
6310 encoder->update_complete(state, encoder, crtc);
6314 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6315 struct intel_crtc *crtc)
6317 const struct intel_crtc_state *crtc_state =
6318 intel_atomic_get_new_crtc_state(state, crtc);
6319 const struct drm_connector_state *conn_state;
6320 struct drm_connector *conn;
6323 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6324 struct intel_encoder *encoder =
6325 to_intel_encoder(conn_state->best_encoder);
6327 if (conn_state->crtc != &crtc->base)
6330 if (encoder->pre_pll_enable)
6331 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6335 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6336 struct intel_crtc *crtc)
6338 const struct intel_crtc_state *crtc_state =
6339 intel_atomic_get_new_crtc_state(state, crtc);
6340 const struct drm_connector_state *conn_state;
6341 struct drm_connector *conn;
6344 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6345 struct intel_encoder *encoder =
6346 to_intel_encoder(conn_state->best_encoder);
6348 if (conn_state->crtc != &crtc->base)
6351 if (encoder->pre_enable)
6352 encoder->pre_enable(encoder, crtc_state, conn_state);
6356 static void intel_encoders_enable(struct intel_atomic_state *state,
6357 struct intel_crtc *crtc)
6359 const struct intel_crtc_state *crtc_state =
6360 intel_atomic_get_new_crtc_state(state, crtc);
6361 const struct drm_connector_state *conn_state;
6362 struct drm_connector *conn;
6365 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6366 struct intel_encoder *encoder =
6367 to_intel_encoder(conn_state->best_encoder);
6369 if (conn_state->crtc != &crtc->base)
6372 if (encoder->enable)
6373 encoder->enable(encoder, crtc_state, conn_state);
6374 intel_opregion_notify_encoder(encoder, true);
6378 static void intel_encoders_disable(struct intel_atomic_state *state,
6379 struct intel_crtc *crtc)
6381 const struct intel_crtc_state *old_crtc_state =
6382 intel_atomic_get_old_crtc_state(state, crtc);
6383 const struct drm_connector_state *old_conn_state;
6384 struct drm_connector *conn;
6387 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6388 struct intel_encoder *encoder =
6389 to_intel_encoder(old_conn_state->best_encoder);
6391 if (old_conn_state->crtc != &crtc->base)
6394 intel_opregion_notify_encoder(encoder, false);
6395 if (encoder->disable)
6396 encoder->disable(encoder, old_crtc_state, old_conn_state);
6400 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6401 struct intel_crtc *crtc)
6403 const struct intel_crtc_state *old_crtc_state =
6404 intel_atomic_get_old_crtc_state(state, crtc);
6405 const struct drm_connector_state *old_conn_state;
6406 struct drm_connector *conn;
6409 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6410 struct intel_encoder *encoder =
6411 to_intel_encoder(old_conn_state->best_encoder);
6413 if (old_conn_state->crtc != &crtc->base)
6416 if (encoder->post_disable)
6417 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6421 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6422 struct intel_crtc *crtc)
6424 const struct intel_crtc_state *old_crtc_state =
6425 intel_atomic_get_old_crtc_state(state, crtc);
6426 const struct drm_connector_state *old_conn_state;
6427 struct drm_connector *conn;
6430 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6431 struct intel_encoder *encoder =
6432 to_intel_encoder(old_conn_state->best_encoder);
6434 if (old_conn_state->crtc != &crtc->base)
6437 if (encoder->post_pll_disable)
6438 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6442 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6443 struct intel_crtc *crtc)
6445 const struct intel_crtc_state *crtc_state =
6446 intel_atomic_get_new_crtc_state(state, crtc);
6447 const struct drm_connector_state *conn_state;
6448 struct drm_connector *conn;
6451 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6452 struct intel_encoder *encoder =
6453 to_intel_encoder(conn_state->best_encoder);
6455 if (conn_state->crtc != &crtc->base)
6458 if (encoder->update_pipe)
6459 encoder->update_pipe(encoder, crtc_state, conn_state);
6463 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6465 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6466 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6468 plane->disable_plane(plane, crtc_state);
6471 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6472 struct intel_atomic_state *state)
6474 struct drm_crtc *crtc = pipe_config->uapi.crtc;
6475 struct drm_device *dev = crtc->dev;
6476 struct drm_i915_private *dev_priv = to_i915(dev);
6477 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6478 enum pipe pipe = intel_crtc->pipe;
6480 if (WARN_ON(intel_crtc->active))
6484 * Sometimes spurious CPU pipe underruns happen during FDI
6485 * training, at least with VGA+HDMI cloning. Suppress them.
6487 * On ILK we get an occasional spurious CPU pipe underruns
6488 * between eDP port A enable and vdd enable. Also PCH port
6489 * enable seems to result in the occasional CPU pipe underrun.
6491 * Spurious PCH underruns also occur during PCH enabling.
6493 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6494 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6496 if (pipe_config->has_pch_encoder)
6497 intel_prepare_shared_dpll(pipe_config);
6499 if (intel_crtc_has_dp_encoder(pipe_config))
6500 intel_dp_set_m_n(pipe_config, M1_N1);
6502 intel_set_pipe_timings(pipe_config);
6503 intel_set_pipe_src_size(pipe_config);
6505 if (pipe_config->has_pch_encoder) {
6506 intel_cpu_transcoder_set_m_n(pipe_config,
6507 &pipe_config->fdi_m_n, NULL);
6510 ironlake_set_pipeconf(pipe_config);
6512 intel_crtc->active = true;
6514 intel_encoders_pre_enable(state, intel_crtc);
6516 if (pipe_config->has_pch_encoder) {
6517 /* Note: FDI PLL enabling _must_ be done before we enable the
6518 * cpu pipes, hence this is separate from all the other fdi/pch
6520 ironlake_fdi_pll_enable(pipe_config);
6522 assert_fdi_tx_disabled(dev_priv, pipe);
6523 assert_fdi_rx_disabled(dev_priv, pipe);
6526 ironlake_pfit_enable(pipe_config);
6529 * On ILK+ LUT must be loaded before the pipe is running but with
6532 intel_color_load_luts(pipe_config);
6533 intel_color_commit(pipe_config);
6534 /* update DSPCNTR to configure gamma for pipe bottom color */
6535 intel_disable_primary_plane(pipe_config);
6537 if (dev_priv->display.initial_watermarks != NULL)
6538 dev_priv->display.initial_watermarks(state, pipe_config);
6539 intel_enable_pipe(pipe_config);
6541 if (pipe_config->has_pch_encoder)
6542 ironlake_pch_enable(state, pipe_config);
6544 assert_vblank_disabled(crtc);
6545 intel_crtc_vblank_on(pipe_config);
6547 intel_encoders_enable(state, intel_crtc);
6549 if (HAS_PCH_CPT(dev_priv))
6550 cpt_verify_modeset(dev, intel_crtc->pipe);
6553 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6554 * And a second vblank wait is needed at least on ILK with
6555 * some interlaced HDMI modes. Let's do the double wait always
6556 * in case there are more corner cases we don't know about.
6558 if (pipe_config->has_pch_encoder) {
6559 intel_wait_for_vblank(dev_priv, pipe);
6560 intel_wait_for_vblank(dev_priv, pipe);
6562 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6563 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6566 /* IPS only exists on ULT machines and is tied to pipe A. */
6567 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6569 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6572 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6573 enum pipe pipe, bool apply)
6575 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6576 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6583 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6586 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6588 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6589 enum pipe pipe = crtc->pipe;
6592 val = MBUS_DBOX_A_CREDIT(2);
6594 if (INTEL_GEN(dev_priv) >= 12) {
6595 val |= MBUS_DBOX_BW_CREDIT(2);
6596 val |= MBUS_DBOX_B_CREDIT(12);
6598 val |= MBUS_DBOX_BW_CREDIT(1);
6599 val |= MBUS_DBOX_B_CREDIT(8);
6602 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6605 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6607 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6608 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6609 i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6612 val = I915_READ(reg);
6613 val &= ~HSW_FRAME_START_DELAY_MASK;
6614 val |= HSW_FRAME_START_DELAY(0);
6615 I915_WRITE(reg, val);
6618 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6619 struct intel_atomic_state *state)
6621 struct drm_crtc *crtc = pipe_config->uapi.crtc;
6622 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6623 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6624 enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
6625 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6626 bool psl_clkgate_wa;
6628 if (WARN_ON(intel_crtc->active))
6631 intel_encoders_pre_pll_enable(state, intel_crtc);
6633 if (pipe_config->shared_dpll)
6634 intel_enable_shared_dpll(pipe_config);
6636 intel_encoders_pre_enable(state, intel_crtc);
6638 if (intel_crtc_has_dp_encoder(pipe_config))
6639 intel_dp_set_m_n(pipe_config, M1_N1);
6641 if (!transcoder_is_dsi(cpu_transcoder))
6642 intel_set_pipe_timings(pipe_config);
6644 if (INTEL_GEN(dev_priv) >= 11)
6645 icl_enable_trans_port_sync(pipe_config);
6647 intel_set_pipe_src_size(pipe_config);
6649 if (cpu_transcoder != TRANSCODER_EDP &&
6650 !transcoder_is_dsi(cpu_transcoder)) {
6651 I915_WRITE(PIPE_MULT(cpu_transcoder),
6652 pipe_config->pixel_multiplier - 1);
6655 if (pipe_config->has_pch_encoder) {
6656 intel_cpu_transcoder_set_m_n(pipe_config,
6657 &pipe_config->fdi_m_n, NULL);
6660 if (!transcoder_is_dsi(cpu_transcoder)) {
6661 hsw_set_frame_start_delay(pipe_config);
6662 haswell_set_pipeconf(pipe_config);
6665 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6666 bdw_set_pipemisc(pipe_config);
6668 intel_crtc->active = true;
6670 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6671 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6672 pipe_config->pch_pfit.enabled;
6674 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6676 if (INTEL_GEN(dev_priv) >= 9)
6677 skylake_pfit_enable(pipe_config);
6679 ironlake_pfit_enable(pipe_config);
6682 * On ILK+ LUT must be loaded before the pipe is running but with
6685 intel_color_load_luts(pipe_config);
6686 intel_color_commit(pipe_config);
6687 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6688 if (INTEL_GEN(dev_priv) < 9)
6689 intel_disable_primary_plane(pipe_config);
6691 if (INTEL_GEN(dev_priv) >= 11)
6692 icl_set_pipe_chicken(intel_crtc);
6694 if (!transcoder_is_dsi(cpu_transcoder))
6695 intel_ddi_enable_transcoder_func(pipe_config);
6697 if (dev_priv->display.initial_watermarks != NULL)
6698 dev_priv->display.initial_watermarks(state, pipe_config);
6700 if (INTEL_GEN(dev_priv) >= 11)
6701 icl_pipe_mbus_enable(intel_crtc);
6703 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6704 if (!transcoder_is_dsi(cpu_transcoder))
6705 intel_enable_pipe(pipe_config);
6707 if (pipe_config->has_pch_encoder)
6708 lpt_pch_enable(state, pipe_config);
6710 assert_vblank_disabled(crtc);
6711 intel_crtc_vblank_on(pipe_config);
6713 intel_encoders_enable(state, intel_crtc);
6715 if (psl_clkgate_wa) {
6716 intel_wait_for_vblank(dev_priv, pipe);
6717 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6720 /* If we change the relative order between pipe/planes enabling, we need
6721 * to change the workaround. */
6722 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6723 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6724 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6725 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6729 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6731 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6732 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6733 enum pipe pipe = crtc->pipe;
6735 /* To avoid upsetting the power well on haswell only disable the pfit if
6736 * it's in use. The hw state code will make sure we get this right. */
6737 if (old_crtc_state->pch_pfit.enabled) {
6738 I915_WRITE(PF_CTL(pipe), 0);
6739 I915_WRITE(PF_WIN_POS(pipe), 0);
6740 I915_WRITE(PF_WIN_SZ(pipe), 0);
6744 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6745 struct intel_atomic_state *state)
6747 struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
6748 struct drm_device *dev = crtc->dev;
6749 struct drm_i915_private *dev_priv = to_i915(dev);
6750 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6751 enum pipe pipe = intel_crtc->pipe;
6754 * Sometimes spurious CPU pipe underruns happen when the
6755 * pipe is already disabled, but FDI RX/TX is still enabled.
6756 * Happens at least with VGA+HDMI cloning. Suppress them.
6758 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6759 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6761 intel_encoders_disable(state, intel_crtc);
6763 drm_crtc_vblank_off(crtc);
6764 assert_vblank_disabled(crtc);
6766 intel_disable_pipe(old_crtc_state);
6768 ironlake_pfit_disable(old_crtc_state);
6770 if (old_crtc_state->has_pch_encoder)
6771 ironlake_fdi_disable(crtc);
6773 intel_encoders_post_disable(state, intel_crtc);
6775 if (old_crtc_state->has_pch_encoder) {
6776 ironlake_disable_pch_transcoder(dev_priv, pipe);
6778 if (HAS_PCH_CPT(dev_priv)) {
6782 /* disable TRANS_DP_CTL */
6783 reg = TRANS_DP_CTL(pipe);
6784 temp = I915_READ(reg);
6785 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6786 TRANS_DP_PORT_SEL_MASK);
6787 temp |= TRANS_DP_PORT_SEL_NONE;
6788 I915_WRITE(reg, temp);
6790 /* disable DPLL_SEL */
6791 temp = I915_READ(PCH_DPLL_SEL);
6792 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6793 I915_WRITE(PCH_DPLL_SEL, temp);
6796 ironlake_fdi_pll_disable(intel_crtc);
6799 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6800 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6803 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6804 struct intel_atomic_state *state)
6806 struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
6807 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6808 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6809 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6811 intel_encoders_disable(state, intel_crtc);
6813 drm_crtc_vblank_off(crtc);
6814 assert_vblank_disabled(crtc);
6816 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6817 if (!transcoder_is_dsi(cpu_transcoder))
6818 intel_disable_pipe(old_crtc_state);
6820 if (INTEL_GEN(dev_priv) >= 11)
6821 icl_disable_transcoder_port_sync(old_crtc_state);
6823 if (!transcoder_is_dsi(cpu_transcoder))
6824 intel_ddi_disable_transcoder_func(old_crtc_state);
6826 intel_dsc_disable(old_crtc_state);
6828 if (INTEL_GEN(dev_priv) >= 9)
6829 skylake_scaler_disable(intel_crtc);
6831 ironlake_pfit_disable(old_crtc_state);
6833 intel_encoders_post_disable(state, intel_crtc);
6835 intel_encoders_post_pll_disable(state, intel_crtc);
6838 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6840 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6841 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6843 if (!crtc_state->gmch_pfit.control)
6847 * The panel fitter should only be adjusted whilst the pipe is disabled,
6848 * according to register description and PRM.
6850 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6851 assert_pipe_disabled(dev_priv, crtc->pipe);
6853 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6854 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6856 /* Border color in case we don't scale up to the full screen. Black by
6857 * default, change to something else for debugging. */
6858 I915_WRITE(BCLRPAT(crtc->pipe), 0);
6861 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6863 if (phy == PHY_NONE)
6866 if (IS_ELKHARTLAKE(dev_priv))
6867 return phy <= PHY_C;
6869 if (INTEL_GEN(dev_priv) >= 11)
6870 return phy <= PHY_B;
6875 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6877 if (INTEL_GEN(dev_priv) >= 12)
6878 return phy >= PHY_D && phy <= PHY_I;
6880 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6881 return phy >= PHY_C && phy <= PHY_F;
6886 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6888 if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6891 return (enum phy)port;
6894 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6896 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6897 return PORT_TC_NONE;
6899 if (INTEL_GEN(dev_priv) >= 12)
6900 return port - PORT_D;
6902 return port - PORT_C;
6905 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6909 return POWER_DOMAIN_PORT_DDI_A_LANES;
6911 return POWER_DOMAIN_PORT_DDI_B_LANES;
6913 return POWER_DOMAIN_PORT_DDI_C_LANES;
6915 return POWER_DOMAIN_PORT_DDI_D_LANES;
6917 return POWER_DOMAIN_PORT_DDI_E_LANES;
6919 return POWER_DOMAIN_PORT_DDI_F_LANES;
6921 return POWER_DOMAIN_PORT_DDI_G_LANES;
6924 return POWER_DOMAIN_PORT_OTHER;
6928 enum intel_display_power_domain
6929 intel_aux_power_domain(struct intel_digital_port *dig_port)
6931 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
6932 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
6934 if (intel_phy_is_tc(dev_priv, phy) &&
6935 dig_port->tc_mode == TC_PORT_TBT_ALT) {
6936 switch (dig_port->aux_ch) {
6938 return POWER_DOMAIN_AUX_C_TBT;
6940 return POWER_DOMAIN_AUX_D_TBT;
6942 return POWER_DOMAIN_AUX_E_TBT;
6944 return POWER_DOMAIN_AUX_F_TBT;
6946 return POWER_DOMAIN_AUX_G_TBT;
6948 MISSING_CASE(dig_port->aux_ch);
6949 return POWER_DOMAIN_AUX_C_TBT;
6953 switch (dig_port->aux_ch) {
6955 return POWER_DOMAIN_AUX_A;
6957 return POWER_DOMAIN_AUX_B;
6959 return POWER_DOMAIN_AUX_C;
6961 return POWER_DOMAIN_AUX_D;
6963 return POWER_DOMAIN_AUX_E;
6965 return POWER_DOMAIN_AUX_F;
6967 return POWER_DOMAIN_AUX_G;
6969 MISSING_CASE(dig_port->aux_ch);
6970 return POWER_DOMAIN_AUX_A;
6974 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6976 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6977 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6978 struct drm_encoder *encoder;
6979 enum pipe pipe = crtc->pipe;
6981 enum transcoder transcoder = crtc_state->cpu_transcoder;
6983 if (!crtc_state->hw.active)
6986 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6987 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6988 if (crtc_state->pch_pfit.enabled ||
6989 crtc_state->pch_pfit.force_thru)
6990 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6992 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
6993 crtc_state->uapi.encoder_mask) {
6994 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6996 mask |= BIT_ULL(intel_encoder->power_domain);
6999 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7000 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7002 if (crtc_state->shared_dpll)
7003 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7009 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7011 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7012 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7013 enum intel_display_power_domain domain;
7014 u64 domains, new_domains, old_domains;
7016 old_domains = crtc->enabled_power_domains;
7017 crtc->enabled_power_domains = new_domains =
7018 get_crtc_power_domains(crtc_state);
7020 domains = new_domains & ~old_domains;
7022 for_each_power_domain(domain, domains)
7023 intel_display_power_get(dev_priv, domain);
7025 return old_domains & ~new_domains;
7028 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7031 enum intel_display_power_domain domain;
7033 for_each_power_domain(domain, domains)
7034 intel_display_power_put_unchecked(dev_priv, domain);
7037 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
7038 struct intel_atomic_state *state)
7040 struct drm_crtc *crtc = pipe_config->uapi.crtc;
7041 struct drm_device *dev = crtc->dev;
7042 struct drm_i915_private *dev_priv = to_i915(dev);
7043 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7044 enum pipe pipe = intel_crtc->pipe;
7046 if (WARN_ON(intel_crtc->active))
7049 if (intel_crtc_has_dp_encoder(pipe_config))
7050 intel_dp_set_m_n(pipe_config, M1_N1);
7052 intel_set_pipe_timings(pipe_config);
7053 intel_set_pipe_src_size(pipe_config);
7055 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7056 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7057 I915_WRITE(CHV_CANVAS(pipe), 0);
7060 i9xx_set_pipeconf(pipe_config);
7062 intel_crtc->active = true;
7064 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7066 intel_encoders_pre_pll_enable(state, intel_crtc);
7068 if (IS_CHERRYVIEW(dev_priv)) {
7069 chv_prepare_pll(intel_crtc, pipe_config);
7070 chv_enable_pll(intel_crtc, pipe_config);
7072 vlv_prepare_pll(intel_crtc, pipe_config);
7073 vlv_enable_pll(intel_crtc, pipe_config);
7076 intel_encoders_pre_enable(state, intel_crtc);
7078 i9xx_pfit_enable(pipe_config);
7080 intel_color_load_luts(pipe_config);
7081 intel_color_commit(pipe_config);
7082 /* update DSPCNTR to configure gamma for pipe bottom color */
7083 intel_disable_primary_plane(pipe_config);
7085 dev_priv->display.initial_watermarks(state, pipe_config);
7086 intel_enable_pipe(pipe_config);
7088 assert_vblank_disabled(crtc);
7089 intel_crtc_vblank_on(pipe_config);
7091 intel_encoders_enable(state, intel_crtc);
7094 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7096 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7097 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7099 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
7100 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
7103 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
7104 struct intel_atomic_state *state)
7106 struct drm_crtc *crtc = pipe_config->uapi.crtc;
7107 struct drm_device *dev = crtc->dev;
7108 struct drm_i915_private *dev_priv = to_i915(dev);
7109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7110 enum pipe pipe = intel_crtc->pipe;
7112 if (WARN_ON(intel_crtc->active))
7115 i9xx_set_pll_dividers(pipe_config);
7117 if (intel_crtc_has_dp_encoder(pipe_config))
7118 intel_dp_set_m_n(pipe_config, M1_N1);
7120 intel_set_pipe_timings(pipe_config);
7121 intel_set_pipe_src_size(pipe_config);
7123 i9xx_set_pipeconf(pipe_config);
7125 intel_crtc->active = true;
7127 if (!IS_GEN(dev_priv, 2))
7128 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7130 intel_encoders_pre_enable(state, intel_crtc);
7132 i9xx_enable_pll(intel_crtc, pipe_config);
7134 i9xx_pfit_enable(pipe_config);
7136 intel_color_load_luts(pipe_config);
7137 intel_color_commit(pipe_config);
7138 /* update DSPCNTR to configure gamma for pipe bottom color */
7139 intel_disable_primary_plane(pipe_config);
7141 if (dev_priv->display.initial_watermarks != NULL)
7142 dev_priv->display.initial_watermarks(state,
7145 intel_update_watermarks(intel_crtc);
7146 intel_enable_pipe(pipe_config);
7148 assert_vblank_disabled(crtc);
7149 intel_crtc_vblank_on(pipe_config);
7151 intel_encoders_enable(state, intel_crtc);
7154 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7156 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7157 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7159 if (!old_crtc_state->gmch_pfit.control)
7162 assert_pipe_disabled(dev_priv, crtc->pipe);
7164 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
7165 I915_READ(PFIT_CONTROL));
7166 I915_WRITE(PFIT_CONTROL, 0);
7169 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
7170 struct intel_atomic_state *state)
7172 struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
7173 struct drm_device *dev = crtc->dev;
7174 struct drm_i915_private *dev_priv = to_i915(dev);
7175 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7176 enum pipe pipe = intel_crtc->pipe;
7179 * On gen2 planes are double buffered but the pipe isn't, so we must
7180 * wait for planes to fully turn off before disabling the pipe.
7182 if (IS_GEN(dev_priv, 2))
7183 intel_wait_for_vblank(dev_priv, pipe);
7185 intel_encoders_disable(state, intel_crtc);
7187 drm_crtc_vblank_off(crtc);
7188 assert_vblank_disabled(crtc);
7190 intel_disable_pipe(old_crtc_state);
7192 i9xx_pfit_disable(old_crtc_state);
7194 intel_encoders_post_disable(state, intel_crtc);
7196 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7197 if (IS_CHERRYVIEW(dev_priv))
7198 chv_disable_pll(dev_priv, pipe);
7199 else if (IS_VALLEYVIEW(dev_priv))
7200 vlv_disable_pll(dev_priv, pipe);
7202 i9xx_disable_pll(old_crtc_state);
7205 intel_encoders_post_pll_disable(state, intel_crtc);
7207 if (!IS_GEN(dev_priv, 2))
7208 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7210 if (!dev_priv->display.initial_watermarks)
7211 intel_update_watermarks(intel_crtc);
7213 /* clock the pipe down to 640x480@60 to potentially save power */
7214 if (IS_I830(dev_priv))
7215 i830_enable_pipe(dev_priv, pipe);
7218 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
7219 struct drm_modeset_acquire_ctx *ctx)
7221 struct intel_encoder *encoder;
7222 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7223 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
7224 struct intel_bw_state *bw_state =
7225 to_intel_bw_state(dev_priv->bw_obj.state);
7226 struct intel_crtc_state *crtc_state =
7227 to_intel_crtc_state(crtc->state);
7228 enum intel_display_power_domain domain;
7229 struct intel_plane *plane;
7231 struct drm_atomic_state *state;
7232 struct intel_crtc_state *temp_crtc_state;
7235 if (!intel_crtc->active)
7238 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
7239 const struct intel_plane_state *plane_state =
7240 to_intel_plane_state(plane->base.state);
7242 if (plane_state->uapi.visible)
7243 intel_plane_disable_noatomic(intel_crtc, plane);
7246 state = drm_atomic_state_alloc(crtc->dev);
7248 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7249 crtc->base.id, crtc->name);
7253 state->acquire_ctx = ctx;
7255 /* Everything's already locked, -EDEADLK can't happen. */
7256 temp_crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7257 ret = drm_atomic_add_affected_connectors(state, crtc);
7259 WARN_ON(IS_ERR(temp_crtc_state) || ret);
7261 dev_priv->display.crtc_disable(temp_crtc_state, to_intel_atomic_state(state));
7263 drm_atomic_state_put(state);
7265 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7266 crtc->base.id, crtc->name);
7268 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
7269 crtc->state->active = false;
7270 intel_crtc->active = false;
7271 crtc->enabled = false;
7272 crtc->state->connector_mask = 0;
7273 crtc->state->encoder_mask = 0;
7274 intel_crtc_free_hw_state(crtc_state);
7275 memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7277 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
7278 encoder->base.crtc = NULL;
7280 intel_fbc_disable(intel_crtc);
7281 intel_update_watermarks(intel_crtc);
7282 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
7284 domains = intel_crtc->enabled_power_domains;
7285 for_each_power_domain(domain, domains)
7286 intel_display_power_put_unchecked(dev_priv, domain);
7287 intel_crtc->enabled_power_domains = 0;
7289 dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
7290 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
7291 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
7293 bw_state->data_rate[intel_crtc->pipe] = 0;
7294 bw_state->num_active_planes[intel_crtc->pipe] = 0;
7298 * turn all crtc's off, but do not adjust state
7299 * This has to be paired with a call to intel_modeset_setup_hw_state.
7301 int intel_display_suspend(struct drm_device *dev)
7303 struct drm_i915_private *dev_priv = to_i915(dev);
7304 struct drm_atomic_state *state;
7307 state = drm_atomic_helper_suspend(dev);
7308 ret = PTR_ERR_OR_ZERO(state);
7310 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7312 dev_priv->modeset_restore_state = state;
7316 void intel_encoder_destroy(struct drm_encoder *encoder)
7318 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7320 drm_encoder_cleanup(encoder);
7321 kfree(intel_encoder);
7324 /* Cross check the actual hw state with our own modeset state tracking (and it's
7325 * internal consistency). */
7326 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7327 struct drm_connector_state *conn_state)
7329 struct intel_connector *connector = to_intel_connector(conn_state->connector);
7331 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7332 connector->base.base.id,
7333 connector->base.name);
7335 if (connector->get_hw_state(connector)) {
7336 struct intel_encoder *encoder = connector->encoder;
7338 I915_STATE_WARN(!crtc_state,
7339 "connector enabled without attached crtc\n");
7344 I915_STATE_WARN(!crtc_state->hw.active,
7345 "connector is active, but attached crtc isn't\n");
7347 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7350 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7351 "atomic encoder doesn't match attached encoder\n");
7353 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7354 "attached encoder crtc differs from connector crtc\n");
7356 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7357 "attached crtc is active, but connector isn't\n");
7358 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7359 "best encoder set without crtc!\n");
7363 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7365 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7366 return crtc_state->fdi_lanes;
7371 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7372 struct intel_crtc_state *pipe_config)
7374 struct drm_i915_private *dev_priv = to_i915(dev);
7375 struct drm_atomic_state *state = pipe_config->uapi.state;
7376 struct intel_crtc *other_crtc;
7377 struct intel_crtc_state *other_crtc_state;
7379 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7380 pipe_name(pipe), pipe_config->fdi_lanes);
7381 if (pipe_config->fdi_lanes > 4) {
7382 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7383 pipe_name(pipe), pipe_config->fdi_lanes);
7387 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7388 if (pipe_config->fdi_lanes > 2) {
7389 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7390 pipe_config->fdi_lanes);
7397 if (INTEL_NUM_PIPES(dev_priv) == 2)
7400 /* Ivybridge 3 pipe is really complicated */
7405 if (pipe_config->fdi_lanes <= 2)
7408 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7410 intel_atomic_get_crtc_state(state, other_crtc);
7411 if (IS_ERR(other_crtc_state))
7412 return PTR_ERR(other_crtc_state);
7414 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7415 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7416 pipe_name(pipe), pipe_config->fdi_lanes);
7421 if (pipe_config->fdi_lanes > 2) {
7422 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7423 pipe_name(pipe), pipe_config->fdi_lanes);
7427 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7429 intel_atomic_get_crtc_state(state, other_crtc);
7430 if (IS_ERR(other_crtc_state))
7431 return PTR_ERR(other_crtc_state);
7433 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7434 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7444 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7445 struct intel_crtc_state *pipe_config)
7447 struct drm_device *dev = intel_crtc->base.dev;
7448 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7449 int lane, link_bw, fdi_dotclock, ret;
7450 bool needs_recompute = false;
7453 /* FDI is a binary signal running at ~2.7GHz, encoding
7454 * each output octet as 10 bits. The actual frequency
7455 * is stored as a divider into a 100MHz clock, and the
7456 * mode pixel clock is stored in units of 1KHz.
7457 * Hence the bw of each lane in terms of the mode signal
7460 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7462 fdi_dotclock = adjusted_mode->crtc_clock;
7464 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7465 pipe_config->pipe_bpp);
7467 pipe_config->fdi_lanes = lane;
7469 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7470 link_bw, &pipe_config->fdi_m_n, false, false);
7472 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7473 if (ret == -EDEADLK)
7476 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7477 pipe_config->pipe_bpp -= 2*3;
7478 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7479 pipe_config->pipe_bpp);
7480 needs_recompute = true;
7481 pipe_config->bw_constrained = true;
7486 if (needs_recompute)
7492 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7494 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7495 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7497 /* IPS only exists on ULT machines and is tied to pipe A. */
7498 if (!hsw_crtc_supports_ips(crtc))
7501 if (!i915_modparams.enable_ips)
7504 if (crtc_state->pipe_bpp > 24)
7508 * We compare against max which means we must take
7509 * the increased cdclk requirement into account when
7510 * calculating the new cdclk.
7512 * Should measure whether using a lower cdclk w/o IPS
7514 if (IS_BROADWELL(dev_priv) &&
7515 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7521 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7523 struct drm_i915_private *dev_priv =
7524 to_i915(crtc_state->uapi.crtc->dev);
7525 struct intel_atomic_state *intel_state =
7526 to_intel_atomic_state(crtc_state->uapi.state);
7528 if (!hsw_crtc_state_ips_capable(crtc_state))
7532 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7533 * enabled and disabled dynamically based on package C states,
7534 * user space can't make reliable use of the CRCs, so let's just
7535 * completely disable it.
7537 if (crtc_state->crc_enabled)
7540 /* IPS should be fine as long as at least one plane is enabled. */
7541 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7544 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7545 if (IS_BROADWELL(dev_priv) &&
7546 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7552 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7554 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7556 /* GDG double wide on either pipe, otherwise pipe A only */
7557 return INTEL_GEN(dev_priv) < 4 &&
7558 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7561 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7565 pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7568 * We only use IF-ID interlacing. If we ever use
7569 * PF-ID we'll need to adjust the pixel_rate here.
7572 if (pipe_config->pch_pfit.enabled) {
7573 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7574 u32 pfit_size = pipe_config->pch_pfit.size;
7576 pipe_w = pipe_config->pipe_src_w;
7577 pipe_h = pipe_config->pipe_src_h;
7579 pfit_w = (pfit_size >> 16) & 0xFFFF;
7580 pfit_h = pfit_size & 0xFFFF;
7581 if (pipe_w < pfit_w)
7583 if (pipe_h < pfit_h)
7586 if (WARN_ON(!pfit_w || !pfit_h))
7589 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7596 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7598 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7600 if (HAS_GMCH(dev_priv))
7601 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7602 crtc_state->pixel_rate =
7603 crtc_state->hw.adjusted_mode.crtc_clock;
7605 crtc_state->pixel_rate =
7606 ilk_pipe_pixel_rate(crtc_state);
7609 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7610 struct intel_crtc_state *pipe_config)
7612 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7613 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7614 int clock_limit = dev_priv->max_dotclk_freq;
7616 if (INTEL_GEN(dev_priv) < 4) {
7617 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7620 * Enable double wide mode when the dot clock
7621 * is > 90% of the (display) core speed.
7623 if (intel_crtc_supports_double_wide(crtc) &&
7624 adjusted_mode->crtc_clock > clock_limit) {
7625 clock_limit = dev_priv->max_dotclk_freq;
7626 pipe_config->double_wide = true;
7630 if (adjusted_mode->crtc_clock > clock_limit) {
7631 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7632 adjusted_mode->crtc_clock, clock_limit,
7633 yesno(pipe_config->double_wide));
7637 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7638 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7639 pipe_config->hw.ctm) {
7641 * There is only one pipe CSC unit per pipe, and we need that
7642 * for output conversion from RGB->YCBCR. So if CTM is already
7643 * applied we can't support YCBCR420 output.
7645 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7650 * Pipe horizontal size must be even in:
7652 * - LVDS dual channel mode
7653 * - Double wide pipe
7655 if (pipe_config->pipe_src_w & 1) {
7656 if (pipe_config->double_wide) {
7657 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7661 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7662 intel_is_dual_link_lvds(dev_priv)) {
7663 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7668 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7669 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7671 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7672 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7675 intel_crtc_compute_pixel_rate(pipe_config);
7677 if (pipe_config->has_pch_encoder)
7678 return ironlake_fdi_compute_config(crtc, pipe_config);
7684 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7686 while (*num > DATA_LINK_M_N_MASK ||
7687 *den > DATA_LINK_M_N_MASK) {
7693 static void compute_m_n(unsigned int m, unsigned int n,
7694 u32 *ret_m, u32 *ret_n,
7698 * Several DP dongles in particular seem to be fussy about
7699 * too large link M/N values. Give N value as 0x8000 that
7700 * should be acceptable by specific devices. 0x8000 is the
7701 * specified fixed N value for asynchronous clock mode,
7702 * which the devices expect also in synchronous clock mode.
7707 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7709 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7710 intel_reduce_m_n_ratio(ret_m, ret_n);
7714 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7715 int pixel_clock, int link_clock,
7716 struct intel_link_m_n *m_n,
7717 bool constant_n, bool fec_enable)
7719 u32 data_clock = bits_per_pixel * pixel_clock;
7722 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7725 compute_m_n(data_clock,
7726 link_clock * nlanes * 8,
7727 &m_n->gmch_m, &m_n->gmch_n,
7730 compute_m_n(pixel_clock, link_clock,
7731 &m_n->link_m, &m_n->link_n,
7735 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7738 * There may be no VBT; and if the BIOS enabled SSC we can
7739 * just keep using it to avoid unnecessary flicker. Whereas if the
7740 * BIOS isn't using it, don't assume it will work even if the VBT
7741 * indicates as much.
7743 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7744 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
7747 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7748 DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
7749 enableddisabled(bios_lvds_use_ssc),
7750 enableddisabled(dev_priv->vbt.lvds_use_ssc));
7751 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7756 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7758 if (i915_modparams.panel_use_ssc >= 0)
7759 return i915_modparams.panel_use_ssc != 0;
7760 return dev_priv->vbt.lvds_use_ssc
7761 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7764 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7766 return (1 << dpll->n) << 16 | dpll->m2;
7769 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7771 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7774 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7775 struct intel_crtc_state *crtc_state,
7776 struct dpll *reduced_clock)
7778 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7781 if (IS_PINEVIEW(dev_priv)) {
7782 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7784 fp2 = pnv_dpll_compute_fp(reduced_clock);
7786 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7788 fp2 = i9xx_dpll_compute_fp(reduced_clock);
7791 crtc_state->dpll_hw_state.fp0 = fp;
7793 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7795 crtc_state->dpll_hw_state.fp1 = fp2;
7797 crtc_state->dpll_hw_state.fp1 = fp;
7801 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7807 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7808 * and set it to a reasonable value instead.
7810 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7811 reg_val &= 0xffffff00;
7812 reg_val |= 0x00000030;
7813 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7815 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7816 reg_val &= 0x00ffffff;
7817 reg_val |= 0x8c000000;
7818 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7820 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7821 reg_val &= 0xffffff00;
7822 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7824 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7825 reg_val &= 0x00ffffff;
7826 reg_val |= 0xb0000000;
7827 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7830 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7831 const struct intel_link_m_n *m_n)
7833 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7834 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7835 enum pipe pipe = crtc->pipe;
7837 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7838 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7839 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7840 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7843 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7844 enum transcoder transcoder)
7846 if (IS_HASWELL(dev_priv))
7847 return transcoder == TRANSCODER_EDP;
7850 * Strictly speaking some registers are available before
7851 * gen7, but we only support DRRS on gen7+
7853 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7856 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7857 const struct intel_link_m_n *m_n,
7858 const struct intel_link_m_n *m2_n2)
7860 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7861 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7862 enum pipe pipe = crtc->pipe;
7863 enum transcoder transcoder = crtc_state->cpu_transcoder;
7865 if (INTEL_GEN(dev_priv) >= 5) {
7866 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7867 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7868 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7869 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7871 * M2_N2 registers are set only if DRRS is supported
7872 * (to make sure the registers are not unnecessarily accessed).
7874 if (m2_n2 && crtc_state->has_drrs &&
7875 transcoder_has_m2_n2(dev_priv, transcoder)) {
7876 I915_WRITE(PIPE_DATA_M2(transcoder),
7877 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7878 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7879 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7880 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7883 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7884 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7885 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7886 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7890 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7892 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7895 dp_m_n = &crtc_state->dp_m_n;
7896 dp_m2_n2 = &crtc_state->dp_m2_n2;
7897 } else if (m_n == M2_N2) {
7900 * M2_N2 registers are not supported. Hence m2_n2 divider value
7901 * needs to be programmed into M1_N1.
7903 dp_m_n = &crtc_state->dp_m2_n2;
7905 DRM_ERROR("Unsupported divider value\n");
7909 if (crtc_state->has_pch_encoder)
7910 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7912 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7915 static void vlv_compute_dpll(struct intel_crtc *crtc,
7916 struct intel_crtc_state *pipe_config)
7918 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7919 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7920 if (crtc->pipe != PIPE_A)
7921 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7923 /* DPLL not used with DSI, but still need the rest set up */
7924 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7925 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7926 DPLL_EXT_BUFFER_ENABLE_VLV;
7928 pipe_config->dpll_hw_state.dpll_md =
7929 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7932 static void chv_compute_dpll(struct intel_crtc *crtc,
7933 struct intel_crtc_state *pipe_config)
7935 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7936 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7937 if (crtc->pipe != PIPE_A)
7938 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7940 /* DPLL not used with DSI, but still need the rest set up */
7941 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7942 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7944 pipe_config->dpll_hw_state.dpll_md =
7945 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7948 static void vlv_prepare_pll(struct intel_crtc *crtc,
7949 const struct intel_crtc_state *pipe_config)
7951 struct drm_device *dev = crtc->base.dev;
7952 struct drm_i915_private *dev_priv = to_i915(dev);
7953 enum pipe pipe = crtc->pipe;
7955 u32 bestn, bestm1, bestm2, bestp1, bestp2;
7956 u32 coreclk, reg_val;
7959 I915_WRITE(DPLL(pipe),
7960 pipe_config->dpll_hw_state.dpll &
7961 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7963 /* No need to actually set up the DPLL with DSI */
7964 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7967 vlv_dpio_get(dev_priv);
7969 bestn = pipe_config->dpll.n;
7970 bestm1 = pipe_config->dpll.m1;
7971 bestm2 = pipe_config->dpll.m2;
7972 bestp1 = pipe_config->dpll.p1;
7973 bestp2 = pipe_config->dpll.p2;
7975 /* See eDP HDMI DPIO driver vbios notes doc */
7977 /* PLL B needs special handling */
7979 vlv_pllb_recal_opamp(dev_priv, pipe);
7981 /* Set up Tx target for periodic Rcomp update */
7982 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7984 /* Disable target IRef on PLL */
7985 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7986 reg_val &= 0x00ffffff;
7987 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7989 /* Disable fast lock */
7990 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7992 /* Set idtafcrecal before PLL is enabled */
7993 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7994 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7995 mdiv |= ((bestn << DPIO_N_SHIFT));
7996 mdiv |= (1 << DPIO_K_SHIFT);
7999 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8000 * but we don't support that).
8001 * Note: don't use the DAC post divider as it seems unstable.
8003 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8004 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8006 mdiv |= DPIO_ENABLE_CALIBRATION;
8007 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8009 /* Set HBR and RBR LPF coefficients */
8010 if (pipe_config->port_clock == 162000 ||
8011 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8012 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8013 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8016 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8019 if (intel_crtc_has_dp_encoder(pipe_config)) {
8020 /* Use SSC source */
8022 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8025 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8027 } else { /* HDMI or VGA */
8028 /* Use bend source */
8030 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8033 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8037 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8038 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8039 if (intel_crtc_has_dp_encoder(pipe_config))
8040 coreclk |= 0x01000000;
8041 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8043 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8045 vlv_dpio_put(dev_priv);
8048 static void chv_prepare_pll(struct intel_crtc *crtc,
8049 const struct intel_crtc_state *pipe_config)
8051 struct drm_device *dev = crtc->base.dev;
8052 struct drm_i915_private *dev_priv = to_i915(dev);
8053 enum pipe pipe = crtc->pipe;
8054 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8055 u32 loopfilter, tribuf_calcntr;
8056 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8060 /* Enable Refclk and SSC */
8061 I915_WRITE(DPLL(pipe),
8062 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8064 /* No need to actually set up the DPLL with DSI */
8065 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8068 bestn = pipe_config->dpll.n;
8069 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8070 bestm1 = pipe_config->dpll.m1;
8071 bestm2 = pipe_config->dpll.m2 >> 22;
8072 bestp1 = pipe_config->dpll.p1;
8073 bestp2 = pipe_config->dpll.p2;
8074 vco = pipe_config->dpll.vco;
8078 vlv_dpio_get(dev_priv);
8080 /* p1 and p2 divider */
8081 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8082 5 << DPIO_CHV_S1_DIV_SHIFT |
8083 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8084 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8085 1 << DPIO_CHV_K_DIV_SHIFT);
8087 /* Feedback post-divider - m2 */
8088 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8090 /* Feedback refclk divider - n and m1 */
8091 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8092 DPIO_CHV_M1_DIV_BY_2 |
8093 1 << DPIO_CHV_N_DIV_SHIFT);
8095 /* M2 fraction division */
8096 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8098 /* M2 fraction division enable */
8099 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8100 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8101 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8103 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8104 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8106 /* Program digital lock detect threshold */
8107 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8108 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8109 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8110 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8112 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8113 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8116 if (vco == 5400000) {
8117 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8118 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8119 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8120 tribuf_calcntr = 0x9;
8121 } else if (vco <= 6200000) {
8122 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8123 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8124 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8125 tribuf_calcntr = 0x9;
8126 } else if (vco <= 6480000) {
8127 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8128 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8129 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8130 tribuf_calcntr = 0x8;
8132 /* Not supported. Apply the same limits as in the max case */
8133 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8134 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8135 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8138 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8140 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8141 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8142 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8143 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8146 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8147 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8150 vlv_dpio_put(dev_priv);
8154 * vlv_force_pll_on - forcibly enable just the PLL
8155 * @dev_priv: i915 private structure
8156 * @pipe: pipe PLL to enable
8157 * @dpll: PLL configuration
8159 * Enable the PLL for @pipe using the supplied @dpll config. To be used
8160 * in cases where we need the PLL enabled even when @pipe is not going to
8163 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8164 const struct dpll *dpll)
8166 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8167 struct intel_crtc_state *pipe_config;
8169 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8173 pipe_config->uapi.crtc = &crtc->base;
8174 pipe_config->pixel_multiplier = 1;
8175 pipe_config->dpll = *dpll;
8177 if (IS_CHERRYVIEW(dev_priv)) {
8178 chv_compute_dpll(crtc, pipe_config);
8179 chv_prepare_pll(crtc, pipe_config);
8180 chv_enable_pll(crtc, pipe_config);
8182 vlv_compute_dpll(crtc, pipe_config);
8183 vlv_prepare_pll(crtc, pipe_config);
8184 vlv_enable_pll(crtc, pipe_config);
8193 * vlv_force_pll_off - forcibly disable just the PLL
8194 * @dev_priv: i915 private structure
8195 * @pipe: pipe PLL to disable
8197 * Disable the PLL for @pipe. To be used in cases where we need
8198 * the PLL enabled even when @pipe is not going to be enabled.
8200 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8202 if (IS_CHERRYVIEW(dev_priv))
8203 chv_disable_pll(dev_priv, pipe);
8205 vlv_disable_pll(dev_priv, pipe);
8208 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8209 struct intel_crtc_state *crtc_state,
8210 struct dpll *reduced_clock)
8212 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8214 struct dpll *clock = &crtc_state->dpll;
8216 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8218 dpll = DPLL_VGA_MODE_DIS;
8220 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8221 dpll |= DPLLB_MODE_LVDS;
8223 dpll |= DPLLB_MODE_DAC_SERIAL;
8225 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8226 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8227 dpll |= (crtc_state->pixel_multiplier - 1)
8228 << SDVO_MULTIPLIER_SHIFT_HIRES;
8231 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8232 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8233 dpll |= DPLL_SDVO_HIGH_SPEED;
8235 if (intel_crtc_has_dp_encoder(crtc_state))
8236 dpll |= DPLL_SDVO_HIGH_SPEED;
8238 /* compute bitmask from p1 value */
8239 if (IS_PINEVIEW(dev_priv))
8240 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8242 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8243 if (IS_G4X(dev_priv) && reduced_clock)
8244 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8246 switch (clock->p2) {
8248 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8251 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8254 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8257 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8260 if (INTEL_GEN(dev_priv) >= 4)
8261 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8263 if (crtc_state->sdvo_tv_clock)
8264 dpll |= PLL_REF_INPUT_TVCLKINBC;
8265 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8266 intel_panel_use_ssc(dev_priv))
8267 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8269 dpll |= PLL_REF_INPUT_DREFCLK;
8271 dpll |= DPLL_VCO_ENABLE;
8272 crtc_state->dpll_hw_state.dpll = dpll;
8274 if (INTEL_GEN(dev_priv) >= 4) {
8275 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8276 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8277 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8281 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8282 struct intel_crtc_state *crtc_state,
8283 struct dpll *reduced_clock)
8285 struct drm_device *dev = crtc->base.dev;
8286 struct drm_i915_private *dev_priv = to_i915(dev);
8288 struct dpll *clock = &crtc_state->dpll;
8290 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8292 dpll = DPLL_VGA_MODE_DIS;
8294 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8295 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8298 dpll |= PLL_P1_DIVIDE_BY_TWO;
8300 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8302 dpll |= PLL_P2_DIVIDE_BY_4;
8307 * "[Almador Errata}: For the correct operation of the muxed DVO pins
8308 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8309 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8310 * Enable) must be set to “1” in both the DPLL A Control Register
8311 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8313 * For simplicity We simply keep both bits always enabled in
8314 * both DPLLS. The spec says we should disable the DVO 2X clock
8315 * when not needed, but this seems to work fine in practice.
8317 if (IS_I830(dev_priv) ||
8318 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8319 dpll |= DPLL_DVO_2X_MODE;
8321 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8322 intel_panel_use_ssc(dev_priv))
8323 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8325 dpll |= PLL_REF_INPUT_DREFCLK;
8327 dpll |= DPLL_VCO_ENABLE;
8328 crtc_state->dpll_hw_state.dpll = dpll;
8331 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8333 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8334 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8335 enum pipe pipe = crtc->pipe;
8336 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8337 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8338 u32 crtc_vtotal, crtc_vblank_end;
8341 /* We need to be careful not to changed the adjusted mode, for otherwise
8342 * the hw state checker will get angry at the mismatch. */
8343 crtc_vtotal = adjusted_mode->crtc_vtotal;
8344 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8346 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8347 /* the chip adds 2 halflines automatically */
8349 crtc_vblank_end -= 1;
8351 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8352 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8354 vsyncshift = adjusted_mode->crtc_hsync_start -
8355 adjusted_mode->crtc_htotal / 2;
8357 vsyncshift += adjusted_mode->crtc_htotal;
8360 if (INTEL_GEN(dev_priv) > 3)
8361 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8363 I915_WRITE(HTOTAL(cpu_transcoder),
8364 (adjusted_mode->crtc_hdisplay - 1) |
8365 ((adjusted_mode->crtc_htotal - 1) << 16));
8366 I915_WRITE(HBLANK(cpu_transcoder),
8367 (adjusted_mode->crtc_hblank_start - 1) |
8368 ((adjusted_mode->crtc_hblank_end - 1) << 16));
8369 I915_WRITE(HSYNC(cpu_transcoder),
8370 (adjusted_mode->crtc_hsync_start - 1) |
8371 ((adjusted_mode->crtc_hsync_end - 1) << 16));
8373 I915_WRITE(VTOTAL(cpu_transcoder),
8374 (adjusted_mode->crtc_vdisplay - 1) |
8375 ((crtc_vtotal - 1) << 16));
8376 I915_WRITE(VBLANK(cpu_transcoder),
8377 (adjusted_mode->crtc_vblank_start - 1) |
8378 ((crtc_vblank_end - 1) << 16));
8379 I915_WRITE(VSYNC(cpu_transcoder),
8380 (adjusted_mode->crtc_vsync_start - 1) |
8381 ((adjusted_mode->crtc_vsync_end - 1) << 16));
8383 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8384 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8385 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8387 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8388 (pipe == PIPE_B || pipe == PIPE_C))
8389 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8393 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8395 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8396 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8397 enum pipe pipe = crtc->pipe;
8399 /* pipesrc controls the size that is scaled from, which should
8400 * always be the user's requested size.
8402 I915_WRITE(PIPESRC(pipe),
8403 ((crtc_state->pipe_src_w - 1) << 16) |
8404 (crtc_state->pipe_src_h - 1));
8407 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8409 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8410 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8412 if (IS_GEN(dev_priv, 2))
8415 if (INTEL_GEN(dev_priv) >= 9 ||
8416 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8417 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8419 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8422 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8423 struct intel_crtc_state *pipe_config)
8425 struct drm_device *dev = crtc->base.dev;
8426 struct drm_i915_private *dev_priv = to_i915(dev);
8427 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8430 tmp = I915_READ(HTOTAL(cpu_transcoder));
8431 pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8432 pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8434 if (!transcoder_is_dsi(cpu_transcoder)) {
8435 tmp = I915_READ(HBLANK(cpu_transcoder));
8436 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8438 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8439 ((tmp >> 16) & 0xffff) + 1;
8441 tmp = I915_READ(HSYNC(cpu_transcoder));
8442 pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8443 pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8445 tmp = I915_READ(VTOTAL(cpu_transcoder));
8446 pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8447 pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8449 if (!transcoder_is_dsi(cpu_transcoder)) {
8450 tmp = I915_READ(VBLANK(cpu_transcoder));
8451 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8453 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8454 ((tmp >> 16) & 0xffff) + 1;
8456 tmp = I915_READ(VSYNC(cpu_transcoder));
8457 pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8458 pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8460 if (intel_pipe_is_interlaced(pipe_config)) {
8461 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8462 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8463 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8467 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8468 struct intel_crtc_state *pipe_config)
8470 struct drm_device *dev = crtc->base.dev;
8471 struct drm_i915_private *dev_priv = to_i915(dev);
8474 tmp = I915_READ(PIPESRC(crtc->pipe));
8475 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8476 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8478 pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8479 pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8482 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8483 struct intel_crtc_state *pipe_config)
8485 mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8486 mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8487 mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8488 mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8490 mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8491 mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8492 mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8493 mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8495 mode->flags = pipe_config->hw.adjusted_mode.flags;
8496 mode->type = DRM_MODE_TYPE_DRIVER;
8498 mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8500 mode->hsync = drm_mode_hsync(mode);
8501 mode->vrefresh = drm_mode_vrefresh(mode);
8502 drm_mode_set_name(mode);
8505 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8507 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8508 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8513 /* we keep both pipes enabled on 830 */
8514 if (IS_I830(dev_priv))
8515 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8517 if (crtc_state->double_wide)
8518 pipeconf |= PIPECONF_DOUBLE_WIDE;
8520 /* only g4x and later have fancy bpc/dither controls */
8521 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8522 IS_CHERRYVIEW(dev_priv)) {
8523 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8524 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8525 pipeconf |= PIPECONF_DITHER_EN |
8526 PIPECONF_DITHER_TYPE_SP;
8528 switch (crtc_state->pipe_bpp) {
8530 pipeconf |= PIPECONF_6BPC;
8533 pipeconf |= PIPECONF_8BPC;
8536 pipeconf |= PIPECONF_10BPC;
8539 /* Case prevented by intel_choose_pipe_bpp_dither. */
8544 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8545 if (INTEL_GEN(dev_priv) < 4 ||
8546 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8547 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8549 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8551 pipeconf |= PIPECONF_PROGRESSIVE;
8554 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8555 crtc_state->limited_color_range)
8556 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8558 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8560 pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8562 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8563 POSTING_READ(PIPECONF(crtc->pipe));
8566 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8567 struct intel_crtc_state *crtc_state)
8569 struct drm_device *dev = crtc->base.dev;
8570 struct drm_i915_private *dev_priv = to_i915(dev);
8571 const struct intel_limit *limit;
8574 memset(&crtc_state->dpll_hw_state, 0,
8575 sizeof(crtc_state->dpll_hw_state));
8577 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8578 if (intel_panel_use_ssc(dev_priv)) {
8579 refclk = dev_priv->vbt.lvds_ssc_freq;
8580 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8583 limit = &intel_limits_i8xx_lvds;
8584 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8585 limit = &intel_limits_i8xx_dvo;
8587 limit = &intel_limits_i8xx_dac;
8590 if (!crtc_state->clock_set &&
8591 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8592 refclk, NULL, &crtc_state->dpll)) {
8593 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8597 i8xx_compute_dpll(crtc, crtc_state, NULL);
8602 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8603 struct intel_crtc_state *crtc_state)
8605 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8606 const struct intel_limit *limit;
8609 memset(&crtc_state->dpll_hw_state, 0,
8610 sizeof(crtc_state->dpll_hw_state));
8612 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8613 if (intel_panel_use_ssc(dev_priv)) {
8614 refclk = dev_priv->vbt.lvds_ssc_freq;
8615 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8618 if (intel_is_dual_link_lvds(dev_priv))
8619 limit = &intel_limits_g4x_dual_channel_lvds;
8621 limit = &intel_limits_g4x_single_channel_lvds;
8622 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8623 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8624 limit = &intel_limits_g4x_hdmi;
8625 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8626 limit = &intel_limits_g4x_sdvo;
8628 /* The option is for other outputs */
8629 limit = &intel_limits_i9xx_sdvo;
8632 if (!crtc_state->clock_set &&
8633 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8634 refclk, NULL, &crtc_state->dpll)) {
8635 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8639 i9xx_compute_dpll(crtc, crtc_state, NULL);
8644 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8645 struct intel_crtc_state *crtc_state)
8647 struct drm_device *dev = crtc->base.dev;
8648 struct drm_i915_private *dev_priv = to_i915(dev);
8649 const struct intel_limit *limit;
8652 memset(&crtc_state->dpll_hw_state, 0,
8653 sizeof(crtc_state->dpll_hw_state));
8655 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8656 if (intel_panel_use_ssc(dev_priv)) {
8657 refclk = dev_priv->vbt.lvds_ssc_freq;
8658 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8661 limit = &intel_limits_pineview_lvds;
8663 limit = &intel_limits_pineview_sdvo;
8666 if (!crtc_state->clock_set &&
8667 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8668 refclk, NULL, &crtc_state->dpll)) {
8669 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8673 i9xx_compute_dpll(crtc, crtc_state, NULL);
8678 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8679 struct intel_crtc_state *crtc_state)
8681 struct drm_device *dev = crtc->base.dev;
8682 struct drm_i915_private *dev_priv = to_i915(dev);
8683 const struct intel_limit *limit;
8686 memset(&crtc_state->dpll_hw_state, 0,
8687 sizeof(crtc_state->dpll_hw_state));
8689 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8690 if (intel_panel_use_ssc(dev_priv)) {
8691 refclk = dev_priv->vbt.lvds_ssc_freq;
8692 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8695 limit = &intel_limits_i9xx_lvds;
8697 limit = &intel_limits_i9xx_sdvo;
8700 if (!crtc_state->clock_set &&
8701 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8702 refclk, NULL, &crtc_state->dpll)) {
8703 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8707 i9xx_compute_dpll(crtc, crtc_state, NULL);
8712 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8713 struct intel_crtc_state *crtc_state)
8715 int refclk = 100000;
8716 const struct intel_limit *limit = &intel_limits_chv;
8718 memset(&crtc_state->dpll_hw_state, 0,
8719 sizeof(crtc_state->dpll_hw_state));
8721 if (!crtc_state->clock_set &&
8722 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8723 refclk, NULL, &crtc_state->dpll)) {
8724 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8728 chv_compute_dpll(crtc, crtc_state);
8733 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8734 struct intel_crtc_state *crtc_state)
8736 int refclk = 100000;
8737 const struct intel_limit *limit = &intel_limits_vlv;
8739 memset(&crtc_state->dpll_hw_state, 0,
8740 sizeof(crtc_state->dpll_hw_state));
8742 if (!crtc_state->clock_set &&
8743 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8744 refclk, NULL, &crtc_state->dpll)) {
8745 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8749 vlv_compute_dpll(crtc, crtc_state);
8754 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8756 if (IS_I830(dev_priv))
8759 return INTEL_GEN(dev_priv) >= 4 ||
8760 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8763 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8764 struct intel_crtc_state *pipe_config)
8766 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8769 if (!i9xx_has_pfit(dev_priv))
8772 tmp = I915_READ(PFIT_CONTROL);
8773 if (!(tmp & PFIT_ENABLE))
8776 /* Check whether the pfit is attached to our pipe. */
8777 if (INTEL_GEN(dev_priv) < 4) {
8778 if (crtc->pipe != PIPE_B)
8781 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8785 pipe_config->gmch_pfit.control = tmp;
8786 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8789 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8790 struct intel_crtc_state *pipe_config)
8792 struct drm_device *dev = crtc->base.dev;
8793 struct drm_i915_private *dev_priv = to_i915(dev);
8794 enum pipe pipe = crtc->pipe;
8797 int refclk = 100000;
8799 /* In case of DSI, DPLL will not be used */
8800 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8803 vlv_dpio_get(dev_priv);
8804 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8805 vlv_dpio_put(dev_priv);
8807 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8808 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8809 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8810 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8811 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8813 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8817 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8818 struct intel_initial_plane_config *plane_config)
8820 struct drm_device *dev = crtc->base.dev;
8821 struct drm_i915_private *dev_priv = to_i915(dev);
8822 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8823 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8825 u32 val, base, offset;
8826 int fourcc, pixel_format;
8827 unsigned int aligned_height;
8828 struct drm_framebuffer *fb;
8829 struct intel_framebuffer *intel_fb;
8831 if (!plane->get_hw_state(plane, &pipe))
8834 WARN_ON(pipe != crtc->pipe);
8836 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8838 DRM_DEBUG_KMS("failed to alloc fb\n");
8842 fb = &intel_fb->base;
8846 val = I915_READ(DSPCNTR(i9xx_plane));
8848 if (INTEL_GEN(dev_priv) >= 4) {
8849 if (val & DISPPLANE_TILED) {
8850 plane_config->tiling = I915_TILING_X;
8851 fb->modifier = I915_FORMAT_MOD_X_TILED;
8854 if (val & DISPPLANE_ROTATE_180)
8855 plane_config->rotation = DRM_MODE_ROTATE_180;
8858 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8859 val & DISPPLANE_MIRROR)
8860 plane_config->rotation |= DRM_MODE_REFLECT_X;
8862 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8863 fourcc = i9xx_format_to_fourcc(pixel_format);
8864 fb->format = drm_format_info(fourcc);
8866 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8867 offset = I915_READ(DSPOFFSET(i9xx_plane));
8868 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8869 } else if (INTEL_GEN(dev_priv) >= 4) {
8870 if (plane_config->tiling)
8871 offset = I915_READ(DSPTILEOFF(i9xx_plane));
8873 offset = I915_READ(DSPLINOFF(i9xx_plane));
8874 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8876 base = I915_READ(DSPADDR(i9xx_plane));
8878 plane_config->base = base;
8880 val = I915_READ(PIPESRC(pipe));
8881 fb->width = ((val >> 16) & 0xfff) + 1;
8882 fb->height = ((val >> 0) & 0xfff) + 1;
8884 val = I915_READ(DSPSTRIDE(i9xx_plane));
8885 fb->pitches[0] = val & 0xffffffc0;
8887 aligned_height = intel_fb_align_height(fb, 0, fb->height);
8889 plane_config->size = fb->pitches[0] * aligned_height;
8891 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8892 crtc->base.name, plane->base.name, fb->width, fb->height,
8893 fb->format->cpp[0] * 8, base, fb->pitches[0],
8894 plane_config->size);
8896 plane_config->fb = intel_fb;
8899 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8900 struct intel_crtc_state *pipe_config)
8902 struct drm_device *dev = crtc->base.dev;
8903 struct drm_i915_private *dev_priv = to_i915(dev);
8904 enum pipe pipe = crtc->pipe;
8905 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8907 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8908 int refclk = 100000;
8910 /* In case of DSI, DPLL will not be used */
8911 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8914 vlv_dpio_get(dev_priv);
8915 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8916 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8917 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8918 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8919 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8920 vlv_dpio_put(dev_priv);
8922 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8923 clock.m2 = (pll_dw0 & 0xff) << 22;
8924 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8925 clock.m2 |= pll_dw2 & 0x3fffff;
8926 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8927 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8928 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8930 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8933 static enum intel_output_format
8934 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
8936 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8939 tmp = I915_READ(PIPEMISC(crtc->pipe));
8941 if (tmp & PIPEMISC_YUV420_ENABLE) {
8942 /* We support 4:2:0 in full blend mode only */
8943 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
8945 return INTEL_OUTPUT_FORMAT_YCBCR420;
8946 } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8947 return INTEL_OUTPUT_FORMAT_YCBCR444;
8949 return INTEL_OUTPUT_FORMAT_RGB;
8953 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8955 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8956 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8957 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8958 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8961 tmp = I915_READ(DSPCNTR(i9xx_plane));
8963 if (tmp & DISPPLANE_GAMMA_ENABLE)
8964 crtc_state->gamma_enable = true;
8966 if (!HAS_GMCH(dev_priv) &&
8967 tmp & DISPPLANE_PIPE_CSC_ENABLE)
8968 crtc_state->csc_enable = true;
8971 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8972 struct intel_crtc_state *pipe_config)
8974 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8975 enum intel_display_power_domain power_domain;
8976 intel_wakeref_t wakeref;
8980 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8981 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8985 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8986 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8987 pipe_config->shared_dpll = NULL;
8988 pipe_config->master_transcoder = INVALID_TRANSCODER;
8992 tmp = I915_READ(PIPECONF(crtc->pipe));
8993 if (!(tmp & PIPECONF_ENABLE))
8996 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8997 IS_CHERRYVIEW(dev_priv)) {
8998 switch (tmp & PIPECONF_BPC_MASK) {
9000 pipe_config->pipe_bpp = 18;
9003 pipe_config->pipe_bpp = 24;
9005 case PIPECONF_10BPC:
9006 pipe_config->pipe_bpp = 30;
9013 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9014 (tmp & PIPECONF_COLOR_RANGE_SELECT))
9015 pipe_config->limited_color_range = true;
9017 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9018 PIPECONF_GAMMA_MODE_SHIFT;
9020 if (IS_CHERRYVIEW(dev_priv))
9021 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
9023 i9xx_get_pipe_color_config(pipe_config);
9024 intel_color_get_config(pipe_config);
9026 if (INTEL_GEN(dev_priv) < 4)
9027 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9029 intel_get_pipe_timings(crtc, pipe_config);
9030 intel_get_pipe_src_size(crtc, pipe_config);
9032 i9xx_get_pfit_config(crtc, pipe_config);
9034 if (INTEL_GEN(dev_priv) >= 4) {
9035 /* No way to read it out on pipes B and C */
9036 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9037 tmp = dev_priv->chv_dpll_md[crtc->pipe];
9039 tmp = I915_READ(DPLL_MD(crtc->pipe));
9040 pipe_config->pixel_multiplier =
9041 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9042 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9043 pipe_config->dpll_hw_state.dpll_md = tmp;
9044 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9045 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9046 tmp = I915_READ(DPLL(crtc->pipe));
9047 pipe_config->pixel_multiplier =
9048 ((tmp & SDVO_MULTIPLIER_MASK)
9049 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9051 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9052 * port and will be fixed up in the encoder->get_config
9054 pipe_config->pixel_multiplier = 1;
9056 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
9057 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9058 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
9059 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
9061 /* Mask out read-only status bits. */
9062 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9063 DPLL_PORTC_READY_MASK |
9064 DPLL_PORTB_READY_MASK);
9067 if (IS_CHERRYVIEW(dev_priv))
9068 chv_crtc_clock_get(crtc, pipe_config);
9069 else if (IS_VALLEYVIEW(dev_priv))
9070 vlv_crtc_clock_get(crtc, pipe_config);
9072 i9xx_crtc_clock_get(crtc, pipe_config);
9075 * Normally the dotclock is filled in by the encoder .get_config()
9076 * but in case the pipe is enabled w/o any ports we need a sane
9079 pipe_config->hw.adjusted_mode.crtc_clock =
9080 pipe_config->port_clock / pipe_config->pixel_multiplier;
9085 intel_display_power_put(dev_priv, power_domain, wakeref);
9090 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
9092 struct intel_encoder *encoder;
9095 bool has_lvds = false;
9096 bool has_cpu_edp = false;
9097 bool has_panel = false;
9098 bool has_ck505 = false;
9099 bool can_ssc = false;
9100 bool using_ssc_source = false;
9102 /* We need to take the global config into account */
9103 for_each_intel_encoder(&dev_priv->drm, encoder) {
9104 switch (encoder->type) {
9105 case INTEL_OUTPUT_LVDS:
9109 case INTEL_OUTPUT_EDP:
9111 if (encoder->port == PORT_A)
9119 if (HAS_PCH_IBX(dev_priv)) {
9120 has_ck505 = dev_priv->vbt.display_clock_mode;
9121 can_ssc = has_ck505;
9127 /* Check if any DPLLs are using the SSC source */
9128 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9129 u32 temp = I915_READ(PCH_DPLL(i));
9131 if (!(temp & DPLL_VCO_ENABLE))
9134 if ((temp & PLL_REF_INPUT_MASK) ==
9135 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9136 using_ssc_source = true;
9141 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9142 has_panel, has_lvds, has_ck505, using_ssc_source);
9144 /* Ironlake: try to setup display ref clock before DPLL
9145 * enabling. This is only under driver's control after
9146 * PCH B stepping, previous chipset stepping should be
9147 * ignoring this setting.
9149 val = I915_READ(PCH_DREF_CONTROL);
9151 /* As we must carefully and slowly disable/enable each source in turn,
9152 * compute the final state we want first and check if we need to
9153 * make any changes at all.
9156 final &= ~DREF_NONSPREAD_SOURCE_MASK;
9158 final |= DREF_NONSPREAD_CK505_ENABLE;
9160 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9162 final &= ~DREF_SSC_SOURCE_MASK;
9163 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9164 final &= ~DREF_SSC1_ENABLE;
9167 final |= DREF_SSC_SOURCE_ENABLE;
9169 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9170 final |= DREF_SSC1_ENABLE;
9173 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9174 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9176 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9178 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9179 } else if (using_ssc_source) {
9180 final |= DREF_SSC_SOURCE_ENABLE;
9181 final |= DREF_SSC1_ENABLE;
9187 /* Always enable nonspread source */
9188 val &= ~DREF_NONSPREAD_SOURCE_MASK;
9191 val |= DREF_NONSPREAD_CK505_ENABLE;
9193 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9196 val &= ~DREF_SSC_SOURCE_MASK;
9197 val |= DREF_SSC_SOURCE_ENABLE;
9199 /* SSC must be turned on before enabling the CPU output */
9200 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9201 DRM_DEBUG_KMS("Using SSC on panel\n");
9202 val |= DREF_SSC1_ENABLE;
9204 val &= ~DREF_SSC1_ENABLE;
9206 /* Get SSC going before enabling the outputs */
9207 I915_WRITE(PCH_DREF_CONTROL, val);
9208 POSTING_READ(PCH_DREF_CONTROL);
9211 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9213 /* Enable CPU source on CPU attached eDP */
9215 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9216 DRM_DEBUG_KMS("Using SSC on eDP\n");
9217 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9219 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9221 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9223 I915_WRITE(PCH_DREF_CONTROL, val);
9224 POSTING_READ(PCH_DREF_CONTROL);
9227 DRM_DEBUG_KMS("Disabling CPU source output\n");
9229 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9231 /* Turn off CPU output */
9232 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9234 I915_WRITE(PCH_DREF_CONTROL, val);
9235 POSTING_READ(PCH_DREF_CONTROL);
9238 if (!using_ssc_source) {
9239 DRM_DEBUG_KMS("Disabling SSC source\n");
9241 /* Turn off the SSC source */
9242 val &= ~DREF_SSC_SOURCE_MASK;
9243 val |= DREF_SSC_SOURCE_DISABLE;
9246 val &= ~DREF_SSC1_ENABLE;
9248 I915_WRITE(PCH_DREF_CONTROL, val);
9249 POSTING_READ(PCH_DREF_CONTROL);
9254 BUG_ON(val != final);
9257 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9261 tmp = I915_READ(SOUTH_CHICKEN2);
9262 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9263 I915_WRITE(SOUTH_CHICKEN2, tmp);
9265 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9266 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9267 DRM_ERROR("FDI mPHY reset assert timeout\n");
9269 tmp = I915_READ(SOUTH_CHICKEN2);
9270 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9271 I915_WRITE(SOUTH_CHICKEN2, tmp);
9273 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9274 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9275 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9278 /* WaMPhyProgramming:hsw */
9279 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9283 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9284 tmp &= ~(0xFF << 24);
9285 tmp |= (0x12 << 24);
9286 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9288 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9290 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9292 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9294 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9296 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9297 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9298 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9300 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9301 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9302 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9304 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9307 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9309 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9312 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9314 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9317 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9319 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9322 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9324 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9325 tmp &= ~(0xFF << 16);
9326 tmp |= (0x1C << 16);
9327 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9329 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9330 tmp &= ~(0xFF << 16);
9331 tmp |= (0x1C << 16);
9332 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9334 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9336 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9338 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9340 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9342 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9343 tmp &= ~(0xF << 28);
9345 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9347 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9348 tmp &= ~(0xF << 28);
9350 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9353 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9354 * Programming" based on the parameters passed:
9355 * - Sequence to enable CLKOUT_DP
9356 * - Sequence to enable CLKOUT_DP without spread
9357 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9359 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9360 bool with_spread, bool with_fdi)
9364 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9366 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9367 with_fdi, "LP PCH doesn't have FDI\n"))
9370 mutex_lock(&dev_priv->sb_lock);
9372 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9373 tmp &= ~SBI_SSCCTL_DISABLE;
9374 tmp |= SBI_SSCCTL_PATHALT;
9375 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9380 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9381 tmp &= ~SBI_SSCCTL_PATHALT;
9382 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9385 lpt_reset_fdi_mphy(dev_priv);
9386 lpt_program_fdi_mphy(dev_priv);
9390 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9391 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9392 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9393 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9395 mutex_unlock(&dev_priv->sb_lock);
9398 /* Sequence to disable CLKOUT_DP */
9399 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9403 mutex_lock(&dev_priv->sb_lock);
9405 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9406 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9407 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9408 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9410 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9411 if (!(tmp & SBI_SSCCTL_DISABLE)) {
9412 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9413 tmp |= SBI_SSCCTL_PATHALT;
9414 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9417 tmp |= SBI_SSCCTL_DISABLE;
9418 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9421 mutex_unlock(&dev_priv->sb_lock);
9424 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9426 static const u16 sscdivintphase[] = {
9427 [BEND_IDX( 50)] = 0x3B23,
9428 [BEND_IDX( 45)] = 0x3B23,
9429 [BEND_IDX( 40)] = 0x3C23,
9430 [BEND_IDX( 35)] = 0x3C23,
9431 [BEND_IDX( 30)] = 0x3D23,
9432 [BEND_IDX( 25)] = 0x3D23,
9433 [BEND_IDX( 20)] = 0x3E23,
9434 [BEND_IDX( 15)] = 0x3E23,
9435 [BEND_IDX( 10)] = 0x3F23,
9436 [BEND_IDX( 5)] = 0x3F23,
9437 [BEND_IDX( 0)] = 0x0025,
9438 [BEND_IDX( -5)] = 0x0025,
9439 [BEND_IDX(-10)] = 0x0125,
9440 [BEND_IDX(-15)] = 0x0125,
9441 [BEND_IDX(-20)] = 0x0225,
9442 [BEND_IDX(-25)] = 0x0225,
9443 [BEND_IDX(-30)] = 0x0325,
9444 [BEND_IDX(-35)] = 0x0325,
9445 [BEND_IDX(-40)] = 0x0425,
9446 [BEND_IDX(-45)] = 0x0425,
9447 [BEND_IDX(-50)] = 0x0525,
9452 * steps -50 to 50 inclusive, in steps of 5
9453 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9454 * change in clock period = -(steps / 10) * 5.787 ps
9456 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9459 int idx = BEND_IDX(steps);
9461 if (WARN_ON(steps % 5 != 0))
9464 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9467 mutex_lock(&dev_priv->sb_lock);
9469 if (steps % 10 != 0)
9473 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9475 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9477 tmp |= sscdivintphase[idx];
9478 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9480 mutex_unlock(&dev_priv->sb_lock);
9485 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9487 u32 fuse_strap = I915_READ(FUSE_STRAP);
9488 u32 ctl = I915_READ(SPLL_CTL);
9490 if ((ctl & SPLL_PLL_ENABLE) == 0)
9493 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9494 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9497 if (IS_BROADWELL(dev_priv) &&
9498 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9504 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9505 enum intel_dpll_id id)
9507 u32 fuse_strap = I915_READ(FUSE_STRAP);
9508 u32 ctl = I915_READ(WRPLL_CTL(id));
9510 if ((ctl & WRPLL_PLL_ENABLE) == 0)
9513 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9516 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9517 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9518 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9524 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9526 struct intel_encoder *encoder;
9527 bool has_fdi = false;
9529 for_each_intel_encoder(&dev_priv->drm, encoder) {
9530 switch (encoder->type) {
9531 case INTEL_OUTPUT_ANALOG:
9540 * The BIOS may have decided to use the PCH SSC
9541 * reference so we must not disable it until the
9542 * relevant PLLs have stopped relying on it. We'll
9543 * just leave the PCH SSC reference enabled in case
9544 * any active PLL is using it. It will get disabled
9545 * after runtime suspend if we don't have FDI.
9547 * TODO: Move the whole reference clock handling
9548 * to the modeset sequence proper so that we can
9549 * actually enable/disable/reconfigure these things
9550 * safely. To do that we need to introduce a real
9551 * clock hierarchy. That would also allow us to do
9552 * clock bending finally.
9554 dev_priv->pch_ssc_use = 0;
9556 if (spll_uses_pch_ssc(dev_priv)) {
9557 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9558 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9561 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9562 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9563 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9566 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9567 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9568 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9571 if (dev_priv->pch_ssc_use)
9575 lpt_bend_clkout_dp(dev_priv, 0);
9576 lpt_enable_clkout_dp(dev_priv, true, true);
9578 lpt_disable_clkout_dp(dev_priv);
9583 * Initialize reference clocks when the driver loads
9585 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9587 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9588 ironlake_init_pch_refclk(dev_priv);
9589 else if (HAS_PCH_LPT(dev_priv))
9590 lpt_init_pch_refclk(dev_priv);
9593 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9595 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9596 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9597 enum pipe pipe = crtc->pipe;
9602 switch (crtc_state->pipe_bpp) {
9604 val |= PIPECONF_6BPC;
9607 val |= PIPECONF_8BPC;
9610 val |= PIPECONF_10BPC;
9613 val |= PIPECONF_12BPC;
9616 /* Case prevented by intel_choose_pipe_bpp_dither. */
9620 if (crtc_state->dither)
9621 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9623 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9624 val |= PIPECONF_INTERLACED_ILK;
9626 val |= PIPECONF_PROGRESSIVE;
9629 * This would end up with an odd purple hue over
9630 * the entire display. Make sure we don't do it.
9632 WARN_ON(crtc_state->limited_color_range &&
9633 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9635 if (crtc_state->limited_color_range)
9636 val |= PIPECONF_COLOR_RANGE_SELECT;
9638 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9639 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9641 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9643 val |= PIPECONF_FRAME_START_DELAY(0);
9645 I915_WRITE(PIPECONF(pipe), val);
9646 POSTING_READ(PIPECONF(pipe));
9649 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9651 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9652 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9653 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9656 if (IS_HASWELL(dev_priv) && crtc_state->dither)
9657 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9659 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9660 val |= PIPECONF_INTERLACED_ILK;
9662 val |= PIPECONF_PROGRESSIVE;
9664 if (IS_HASWELL(dev_priv) &&
9665 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9666 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9668 I915_WRITE(PIPECONF(cpu_transcoder), val);
9669 POSTING_READ(PIPECONF(cpu_transcoder));
9672 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9674 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9675 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9678 switch (crtc_state->pipe_bpp) {
9680 val |= PIPEMISC_DITHER_6_BPC;
9683 val |= PIPEMISC_DITHER_8_BPC;
9686 val |= PIPEMISC_DITHER_10_BPC;
9689 val |= PIPEMISC_DITHER_12_BPC;
9692 MISSING_CASE(crtc_state->pipe_bpp);
9696 if (crtc_state->dither)
9697 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9699 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9700 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9701 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9703 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9704 val |= PIPEMISC_YUV420_ENABLE |
9705 PIPEMISC_YUV420_MODE_FULL_BLEND;
9707 if (INTEL_GEN(dev_priv) >= 11 &&
9708 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9709 BIT(PLANE_CURSOR))) == 0)
9710 val |= PIPEMISC_HDR_MODE_PRECISION;
9712 I915_WRITE(PIPEMISC(crtc->pipe), val);
9715 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9717 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9720 tmp = I915_READ(PIPEMISC(crtc->pipe));
9722 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9723 case PIPEMISC_DITHER_6_BPC:
9725 case PIPEMISC_DITHER_8_BPC:
9727 case PIPEMISC_DITHER_10_BPC:
9729 case PIPEMISC_DITHER_12_BPC:
9737 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9740 * Account for spread spectrum to avoid
9741 * oversubscribing the link. Max center spread
9742 * is 2.5%; use 5% for safety's sake.
9744 u32 bps = target_clock * bpp * 21 / 20;
9745 return DIV_ROUND_UP(bps, link_bw * 8);
9748 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9750 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9753 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9754 struct intel_crtc_state *crtc_state,
9755 struct dpll *reduced_clock)
9757 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9761 /* Enable autotuning of the PLL clock (if permissible) */
9763 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9764 if ((intel_panel_use_ssc(dev_priv) &&
9765 dev_priv->vbt.lvds_ssc_freq == 100000) ||
9766 (HAS_PCH_IBX(dev_priv) &&
9767 intel_is_dual_link_lvds(dev_priv)))
9769 } else if (crtc_state->sdvo_tv_clock) {
9773 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9775 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9778 if (reduced_clock) {
9779 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9781 if (reduced_clock->m < factor * reduced_clock->n)
9789 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9790 dpll |= DPLLB_MODE_LVDS;
9792 dpll |= DPLLB_MODE_DAC_SERIAL;
9794 dpll |= (crtc_state->pixel_multiplier - 1)
9795 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9797 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9798 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9799 dpll |= DPLL_SDVO_HIGH_SPEED;
9801 if (intel_crtc_has_dp_encoder(crtc_state))
9802 dpll |= DPLL_SDVO_HIGH_SPEED;
9805 * The high speed IO clock is only really required for
9806 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9807 * possible to share the DPLL between CRT and HDMI. Enabling
9808 * the clock needlessly does no real harm, except use up a
9809 * bit of power potentially.
9811 * We'll limit this to IVB with 3 pipes, since it has only two
9812 * DPLLs and so DPLL sharing is the only way to get three pipes
9813 * driving PCH ports at the same time. On SNB we could do this,
9814 * and potentially avoid enabling the second DPLL, but it's not
9815 * clear if it''s a win or loss power wise. No point in doing
9816 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9818 if (INTEL_NUM_PIPES(dev_priv) == 3 &&
9819 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9820 dpll |= DPLL_SDVO_HIGH_SPEED;
9822 /* compute bitmask from p1 value */
9823 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9825 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9827 switch (crtc_state->dpll.p2) {
9829 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9832 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9835 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9838 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9842 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9843 intel_panel_use_ssc(dev_priv))
9844 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9846 dpll |= PLL_REF_INPUT_DREFCLK;
9848 dpll |= DPLL_VCO_ENABLE;
9850 crtc_state->dpll_hw_state.dpll = dpll;
9851 crtc_state->dpll_hw_state.fp0 = fp;
9852 crtc_state->dpll_hw_state.fp1 = fp2;
9855 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9856 struct intel_crtc_state *crtc_state)
9858 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9859 struct intel_atomic_state *state =
9860 to_intel_atomic_state(crtc_state->uapi.state);
9861 const struct intel_limit *limit;
9862 int refclk = 120000;
9864 memset(&crtc_state->dpll_hw_state, 0,
9865 sizeof(crtc_state->dpll_hw_state));
9867 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9868 if (!crtc_state->has_pch_encoder)
9871 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9872 if (intel_panel_use_ssc(dev_priv)) {
9873 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9874 dev_priv->vbt.lvds_ssc_freq);
9875 refclk = dev_priv->vbt.lvds_ssc_freq;
9878 if (intel_is_dual_link_lvds(dev_priv)) {
9879 if (refclk == 100000)
9880 limit = &intel_limits_ironlake_dual_lvds_100m;
9882 limit = &intel_limits_ironlake_dual_lvds;
9884 if (refclk == 100000)
9885 limit = &intel_limits_ironlake_single_lvds_100m;
9887 limit = &intel_limits_ironlake_single_lvds;
9890 limit = &intel_limits_ironlake_dac;
9893 if (!crtc_state->clock_set &&
9894 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9895 refclk, NULL, &crtc_state->dpll)) {
9896 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9900 ironlake_compute_dpll(crtc, crtc_state, NULL);
9902 if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9903 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9904 pipe_name(crtc->pipe));
9911 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9912 struct intel_link_m_n *m_n)
9914 struct drm_device *dev = crtc->base.dev;
9915 struct drm_i915_private *dev_priv = to_i915(dev);
9916 enum pipe pipe = crtc->pipe;
9918 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9919 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9920 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9922 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9923 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9924 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9927 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9928 enum transcoder transcoder,
9929 struct intel_link_m_n *m_n,
9930 struct intel_link_m_n *m2_n2)
9932 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9933 enum pipe pipe = crtc->pipe;
9935 if (INTEL_GEN(dev_priv) >= 5) {
9936 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9937 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9938 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9940 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9941 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9942 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9944 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9945 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9946 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9947 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9949 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9950 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9951 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9954 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9955 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9956 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9958 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9959 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9960 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9964 void intel_dp_get_m_n(struct intel_crtc *crtc,
9965 struct intel_crtc_state *pipe_config)
9967 if (pipe_config->has_pch_encoder)
9968 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9970 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9971 &pipe_config->dp_m_n,
9972 &pipe_config->dp_m2_n2);
9975 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9976 struct intel_crtc_state *pipe_config)
9978 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9979 &pipe_config->fdi_m_n, NULL);
9982 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9983 struct intel_crtc_state *pipe_config)
9985 struct drm_device *dev = crtc->base.dev;
9986 struct drm_i915_private *dev_priv = to_i915(dev);
9987 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9992 /* find scaler attached to this pipe */
9993 for (i = 0; i < crtc->num_scalers; i++) {
9994 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9995 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9997 pipe_config->pch_pfit.enabled = true;
9998 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9999 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
10000 scaler_state->scalers[i].in_use = true;
10005 scaler_state->scaler_id = id;
10007 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10009 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10014 skylake_get_initial_plane_config(struct intel_crtc *crtc,
10015 struct intel_initial_plane_config *plane_config)
10017 struct drm_device *dev = crtc->base.dev;
10018 struct drm_i915_private *dev_priv = to_i915(dev);
10019 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10020 enum plane_id plane_id = plane->id;
10022 u32 val, base, offset, stride_mult, tiling, alpha;
10023 int fourcc, pixel_format;
10024 unsigned int aligned_height;
10025 struct drm_framebuffer *fb;
10026 struct intel_framebuffer *intel_fb;
10028 if (!plane->get_hw_state(plane, &pipe))
10031 WARN_ON(pipe != crtc->pipe);
10033 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10035 DRM_DEBUG_KMS("failed to alloc fb\n");
10039 fb = &intel_fb->base;
10043 val = I915_READ(PLANE_CTL(pipe, plane_id));
10045 if (INTEL_GEN(dev_priv) >= 11)
10046 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10048 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10050 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10051 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
10052 alpha &= PLANE_COLOR_ALPHA_MASK;
10054 alpha = val & PLANE_CTL_ALPHA_MASK;
10057 fourcc = skl_format_to_fourcc(pixel_format,
10058 val & PLANE_CTL_ORDER_RGBX, alpha);
10059 fb->format = drm_format_info(fourcc);
10061 tiling = val & PLANE_CTL_TILED_MASK;
10063 case PLANE_CTL_TILED_LINEAR:
10064 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10066 case PLANE_CTL_TILED_X:
10067 plane_config->tiling = I915_TILING_X;
10068 fb->modifier = I915_FORMAT_MOD_X_TILED;
10070 case PLANE_CTL_TILED_Y:
10071 plane_config->tiling = I915_TILING_Y;
10072 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10073 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
10075 fb->modifier = I915_FORMAT_MOD_Y_TILED;
10077 case PLANE_CTL_TILED_YF:
10078 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10079 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10081 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10084 MISSING_CASE(tiling);
10089 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10090 * while i915 HW rotation is clockwise, thats why this swapping.
10092 switch (val & PLANE_CTL_ROTATE_MASK) {
10093 case PLANE_CTL_ROTATE_0:
10094 plane_config->rotation = DRM_MODE_ROTATE_0;
10096 case PLANE_CTL_ROTATE_90:
10097 plane_config->rotation = DRM_MODE_ROTATE_270;
10099 case PLANE_CTL_ROTATE_180:
10100 plane_config->rotation = DRM_MODE_ROTATE_180;
10102 case PLANE_CTL_ROTATE_270:
10103 plane_config->rotation = DRM_MODE_ROTATE_90;
10107 if (INTEL_GEN(dev_priv) >= 10 &&
10108 val & PLANE_CTL_FLIP_HORIZONTAL)
10109 plane_config->rotation |= DRM_MODE_REFLECT_X;
10111 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10112 plane_config->base = base;
10114 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
10116 val = I915_READ(PLANE_SIZE(pipe, plane_id));
10117 fb->height = ((val >> 16) & 0xffff) + 1;
10118 fb->width = ((val >> 0) & 0xffff) + 1;
10120 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
10121 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10122 fb->pitches[0] = (val & 0x3ff) * stride_mult;
10124 aligned_height = intel_fb_align_height(fb, 0, fb->height);
10126 plane_config->size = fb->pitches[0] * aligned_height;
10128 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10129 crtc->base.name, plane->base.name, fb->width, fb->height,
10130 fb->format->cpp[0] * 8, base, fb->pitches[0],
10131 plane_config->size);
10133 plane_config->fb = intel_fb;
10140 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
10141 struct intel_crtc_state *pipe_config)
10143 struct drm_device *dev = crtc->base.dev;
10144 struct drm_i915_private *dev_priv = to_i915(dev);
10147 tmp = I915_READ(PF_CTL(crtc->pipe));
10149 if (tmp & PF_ENABLE) {
10150 pipe_config->pch_pfit.enabled = true;
10151 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
10152 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
10154 /* We currently do not free assignements of panel fitters on
10155 * ivb/hsw (since we don't use the higher upscaling modes which
10156 * differentiates them) so just WARN about this case for now. */
10157 if (IS_GEN(dev_priv, 7)) {
10158 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10159 PF_PIPE_SEL_IVB(crtc->pipe));
10164 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
10165 struct intel_crtc_state *pipe_config)
10167 struct drm_device *dev = crtc->base.dev;
10168 struct drm_i915_private *dev_priv = to_i915(dev);
10169 enum intel_display_power_domain power_domain;
10170 intel_wakeref_t wakeref;
10174 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10175 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10179 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10180 pipe_config->shared_dpll = NULL;
10181 pipe_config->master_transcoder = INVALID_TRANSCODER;
10184 tmp = I915_READ(PIPECONF(crtc->pipe));
10185 if (!(tmp & PIPECONF_ENABLE))
10188 switch (tmp & PIPECONF_BPC_MASK) {
10189 case PIPECONF_6BPC:
10190 pipe_config->pipe_bpp = 18;
10192 case PIPECONF_8BPC:
10193 pipe_config->pipe_bpp = 24;
10195 case PIPECONF_10BPC:
10196 pipe_config->pipe_bpp = 30;
10198 case PIPECONF_12BPC:
10199 pipe_config->pipe_bpp = 36;
10205 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10206 pipe_config->limited_color_range = true;
10208 switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10209 case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10210 case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10211 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10214 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10218 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10219 PIPECONF_GAMMA_MODE_SHIFT;
10221 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10223 i9xx_get_pipe_color_config(pipe_config);
10224 intel_color_get_config(pipe_config);
10226 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10227 struct intel_shared_dpll *pll;
10228 enum intel_dpll_id pll_id;
10230 pipe_config->has_pch_encoder = true;
10232 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10233 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10234 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10236 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10238 if (HAS_PCH_IBX(dev_priv)) {
10240 * The pipe->pch transcoder and pch transcoder->pll
10241 * mapping is fixed.
10243 pll_id = (enum intel_dpll_id) crtc->pipe;
10245 tmp = I915_READ(PCH_DPLL_SEL);
10246 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10247 pll_id = DPLL_ID_PCH_PLL_B;
10249 pll_id= DPLL_ID_PCH_PLL_A;
10252 pipe_config->shared_dpll =
10253 intel_get_shared_dpll_by_id(dev_priv, pll_id);
10254 pll = pipe_config->shared_dpll;
10256 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10257 &pipe_config->dpll_hw_state));
10259 tmp = pipe_config->dpll_hw_state.dpll;
10260 pipe_config->pixel_multiplier =
10261 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10262 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10264 ironlake_pch_clock_get(crtc, pipe_config);
10266 pipe_config->pixel_multiplier = 1;
10269 intel_get_pipe_timings(crtc, pipe_config);
10270 intel_get_pipe_src_size(crtc, pipe_config);
10272 ironlake_get_pfit_config(crtc, pipe_config);
10277 intel_display_power_put(dev_priv, power_domain, wakeref);
10281 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10282 struct intel_crtc_state *crtc_state)
10284 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10285 struct intel_atomic_state *state =
10286 to_intel_atomic_state(crtc_state->uapi.state);
10288 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10289 INTEL_GEN(dev_priv) >= 11) {
10290 struct intel_encoder *encoder =
10291 intel_get_crtc_new_encoder(state, crtc_state);
10293 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10294 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10295 pipe_name(crtc->pipe));
10303 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10305 struct intel_crtc_state *pipe_config)
10307 enum intel_dpll_id id;
10310 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10311 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10313 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10316 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10319 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10321 struct intel_crtc_state *pipe_config)
10323 enum phy phy = intel_port_to_phy(dev_priv, port);
10324 enum icl_port_dpll_id port_dpll_id;
10325 enum intel_dpll_id id;
10328 if (intel_phy_is_combo(dev_priv, phy)) {
10329 temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10330 ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10331 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10332 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10333 } else if (intel_phy_is_tc(dev_priv, phy)) {
10334 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10336 if (clk_sel == DDI_CLK_SEL_MG) {
10337 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10339 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10341 WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10342 id = DPLL_ID_ICL_TBTPLL;
10343 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10346 WARN(1, "Invalid port %x\n", port);
10350 pipe_config->icl_port_dplls[port_dpll_id].pll =
10351 intel_get_shared_dpll_by_id(dev_priv, id);
10353 icl_set_active_port_dpll(pipe_config, port_dpll_id);
10356 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10358 struct intel_crtc_state *pipe_config)
10360 enum intel_dpll_id id;
10364 id = DPLL_ID_SKL_DPLL0;
10367 id = DPLL_ID_SKL_DPLL1;
10370 id = DPLL_ID_SKL_DPLL2;
10373 DRM_ERROR("Incorrect port type\n");
10377 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10380 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10382 struct intel_crtc_state *pipe_config)
10384 enum intel_dpll_id id;
10387 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10388 id = temp >> (port * 3 + 1);
10390 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10393 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10396 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10398 struct intel_crtc_state *pipe_config)
10400 enum intel_dpll_id id;
10401 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10403 switch (ddi_pll_sel) {
10404 case PORT_CLK_SEL_WRPLL1:
10405 id = DPLL_ID_WRPLL1;
10407 case PORT_CLK_SEL_WRPLL2:
10408 id = DPLL_ID_WRPLL2;
10410 case PORT_CLK_SEL_SPLL:
10413 case PORT_CLK_SEL_LCPLL_810:
10414 id = DPLL_ID_LCPLL_810;
10416 case PORT_CLK_SEL_LCPLL_1350:
10417 id = DPLL_ID_LCPLL_1350;
10419 case PORT_CLK_SEL_LCPLL_2700:
10420 id = DPLL_ID_LCPLL_2700;
10423 MISSING_CASE(ddi_pll_sel);
10425 case PORT_CLK_SEL_NONE:
10429 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10432 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10433 struct intel_crtc_state *pipe_config,
10434 u64 *power_domain_mask,
10435 intel_wakeref_t *wakerefs)
10437 struct drm_device *dev = crtc->base.dev;
10438 struct drm_i915_private *dev_priv = to_i915(dev);
10439 enum intel_display_power_domain power_domain;
10440 unsigned long panel_transcoder_mask = 0;
10441 unsigned long enabled_panel_transcoders = 0;
10442 enum transcoder panel_transcoder;
10443 intel_wakeref_t wf;
10446 if (INTEL_GEN(dev_priv) >= 11)
10447 panel_transcoder_mask |=
10448 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10450 if (HAS_TRANSCODER_EDP(dev_priv))
10451 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10454 * The pipe->transcoder mapping is fixed with the exception of the eDP
10455 * and DSI transcoders handled below.
10457 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10460 * XXX: Do intel_display_power_get_if_enabled before reading this (for
10461 * consistency and less surprising code; it's in always on power).
10463 for_each_set_bit(panel_transcoder,
10464 &panel_transcoder_mask,
10465 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10466 bool force_thru = false;
10467 enum pipe trans_pipe;
10469 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10470 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10474 * Log all enabled ones, only use the first one.
10476 * FIXME: This won't work for two separate DSI displays.
10478 enabled_panel_transcoders |= BIT(panel_transcoder);
10479 if (enabled_panel_transcoders != BIT(panel_transcoder))
10482 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10484 WARN(1, "unknown pipe linked to transcoder %s\n",
10485 transcoder_name(panel_transcoder));
10487 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10490 case TRANS_DDI_EDP_INPUT_A_ON:
10491 trans_pipe = PIPE_A;
10493 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10494 trans_pipe = PIPE_B;
10496 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10497 trans_pipe = PIPE_C;
10501 if (trans_pipe == crtc->pipe) {
10502 pipe_config->cpu_transcoder = panel_transcoder;
10503 pipe_config->pch_pfit.force_thru = force_thru;
10508 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10510 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10511 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10513 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10514 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10516 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10520 wakerefs[power_domain] = wf;
10521 *power_domain_mask |= BIT_ULL(power_domain);
10523 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10525 return tmp & PIPECONF_ENABLE;
10528 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10529 struct intel_crtc_state *pipe_config,
10530 u64 *power_domain_mask,
10531 intel_wakeref_t *wakerefs)
10533 struct drm_device *dev = crtc->base.dev;
10534 struct drm_i915_private *dev_priv = to_i915(dev);
10535 enum intel_display_power_domain power_domain;
10536 enum transcoder cpu_transcoder;
10537 intel_wakeref_t wf;
10541 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10542 if (port == PORT_A)
10543 cpu_transcoder = TRANSCODER_DSI_A;
10545 cpu_transcoder = TRANSCODER_DSI_C;
10547 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10548 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10550 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10554 wakerefs[power_domain] = wf;
10555 *power_domain_mask |= BIT_ULL(power_domain);
10558 * The PLL needs to be enabled with a valid divider
10559 * configuration, otherwise accessing DSI registers will hang
10560 * the machine. See BSpec North Display Engine
10561 * registers/MIPI[BXT]. We can break out here early, since we
10562 * need the same DSI PLL to be enabled for both DSI ports.
10564 if (!bxt_dsi_pll_is_enabled(dev_priv))
10567 /* XXX: this works for video mode only */
10568 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10569 if (!(tmp & DPI_ENABLE))
10572 tmp = I915_READ(MIPI_CTRL(port));
10573 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10576 pipe_config->cpu_transcoder = cpu_transcoder;
10580 return transcoder_is_dsi(pipe_config->cpu_transcoder);
10583 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10584 struct intel_crtc_state *pipe_config)
10586 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10587 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
10588 struct intel_shared_dpll *pll;
10592 if (transcoder_is_dsi(cpu_transcoder)) {
10593 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
10596 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
10597 if (INTEL_GEN(dev_priv) >= 12)
10598 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10600 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10603 if (INTEL_GEN(dev_priv) >= 11)
10604 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10605 else if (IS_CANNONLAKE(dev_priv))
10606 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10607 else if (IS_GEN9_BC(dev_priv))
10608 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10609 else if (IS_GEN9_LP(dev_priv))
10610 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10612 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10614 pll = pipe_config->shared_dpll;
10616 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10617 &pipe_config->dpll_hw_state));
10621 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10622 * DDI E. So just check whether this pipe is wired to DDI E and whether
10623 * the PCH transcoder is on.
10625 if (INTEL_GEN(dev_priv) < 9 &&
10626 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10627 pipe_config->has_pch_encoder = true;
10629 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10630 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10631 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10633 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10637 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
10638 enum transcoder cpu_transcoder)
10640 u32 trans_port_sync, master_select;
10642 trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
10644 if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
10645 return INVALID_TRANSCODER;
10647 master_select = trans_port_sync &
10648 PORT_SYNC_MODE_MASTER_SELECT_MASK;
10649 if (master_select == 0)
10650 return TRANSCODER_EDP;
10652 return master_select - 1;
10655 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
10657 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10659 enum transcoder cpu_transcoder;
10661 crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
10662 crtc_state->cpu_transcoder);
10664 transcoders = BIT(TRANSCODER_A) |
10665 BIT(TRANSCODER_B) |
10666 BIT(TRANSCODER_C) |
10668 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
10669 enum intel_display_power_domain power_domain;
10670 intel_wakeref_t trans_wakeref;
10672 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10673 trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
10676 if (!trans_wakeref)
10679 if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
10680 crtc_state->cpu_transcoder)
10681 crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
10683 intel_display_power_put(dev_priv, power_domain, trans_wakeref);
10686 WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
10687 crtc_state->sync_mode_slaves_mask);
10690 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10691 struct intel_crtc_state *pipe_config)
10693 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10694 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10695 enum intel_display_power_domain power_domain;
10696 u64 power_domain_mask;
10699 intel_crtc_init_scalers(crtc, pipe_config);
10701 pipe_config->master_transcoder = INVALID_TRANSCODER;
10703 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10704 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10708 wakerefs[power_domain] = wf;
10709 power_domain_mask = BIT_ULL(power_domain);
10711 pipe_config->shared_dpll = NULL;
10713 active = hsw_get_transcoder_state(crtc, pipe_config,
10714 &power_domain_mask, wakerefs);
10716 if (IS_GEN9_LP(dev_priv) &&
10717 bxt_get_dsi_transcoder_state(crtc, pipe_config,
10718 &power_domain_mask, wakerefs)) {
10726 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10727 INTEL_GEN(dev_priv) >= 11) {
10728 haswell_get_ddi_port_state(crtc, pipe_config);
10729 intel_get_pipe_timings(crtc, pipe_config);
10732 intel_get_pipe_src_size(crtc, pipe_config);
10734 if (IS_HASWELL(dev_priv)) {
10735 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10737 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10738 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10740 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10742 pipe_config->output_format =
10743 bdw_get_pipemisc_output_format(crtc);
10746 * Currently there is no interface defined to
10747 * check user preference between RGB/YCBCR444
10748 * or YCBCR420. So the only possible case for
10749 * YCBCR444 usage is driving YCBCR420 output
10750 * with LSPCON, when pipe is configured for
10751 * YCBCR444 output and LSPCON takes care of
10754 pipe_config->lspcon_downsampling =
10755 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
10758 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10760 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10762 if (INTEL_GEN(dev_priv) >= 9) {
10763 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10765 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10766 pipe_config->gamma_enable = true;
10768 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10769 pipe_config->csc_enable = true;
10771 i9xx_get_pipe_color_config(pipe_config);
10774 intel_color_get_config(pipe_config);
10776 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10777 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10779 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10781 wakerefs[power_domain] = wf;
10782 power_domain_mask |= BIT_ULL(power_domain);
10784 if (INTEL_GEN(dev_priv) >= 9)
10785 skylake_get_pfit_config(crtc, pipe_config);
10787 ironlake_get_pfit_config(crtc, pipe_config);
10790 if (hsw_crtc_supports_ips(crtc)) {
10791 if (IS_HASWELL(dev_priv))
10792 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10795 * We cannot readout IPS state on broadwell, set to
10796 * true so we can set it to a defined state on first
10799 pipe_config->ips_enabled = true;
10803 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10804 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10805 pipe_config->pixel_multiplier =
10806 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10808 pipe_config->pixel_multiplier = 1;
10811 if (INTEL_GEN(dev_priv) >= 11 &&
10812 !transcoder_is_dsi(pipe_config->cpu_transcoder))
10813 icelake_get_trans_port_sync_config(pipe_config);
10816 for_each_power_domain(power_domain, power_domain_mask)
10817 intel_display_power_put(dev_priv,
10818 power_domain, wakerefs[power_domain]);
10823 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10825 struct drm_i915_private *dev_priv =
10826 to_i915(plane_state->uapi.plane->dev);
10827 const struct drm_framebuffer *fb = plane_state->hw.fb;
10828 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10831 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10832 base = obj->phys_handle->busaddr;
10834 base = intel_plane_ggtt_offset(plane_state);
10836 return base + plane_state->color_plane[0].offset;
10839 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10841 int x = plane_state->uapi.dst.x1;
10842 int y = plane_state->uapi.dst.y1;
10846 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10849 pos |= x << CURSOR_X_SHIFT;
10852 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10855 pos |= y << CURSOR_Y_SHIFT;
10860 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10862 const struct drm_mode_config *config =
10863 &plane_state->uapi.plane->dev->mode_config;
10864 int width = drm_rect_width(&plane_state->uapi.dst);
10865 int height = drm_rect_height(&plane_state->uapi.dst);
10867 return width > 0 && width <= config->cursor_width &&
10868 height > 0 && height <= config->cursor_height;
10871 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10873 struct drm_i915_private *dev_priv =
10874 to_i915(plane_state->uapi.plane->dev);
10875 unsigned int rotation = plane_state->hw.rotation;
10880 ret = intel_plane_compute_gtt(plane_state);
10884 if (!plane_state->uapi.visible)
10887 src_x = plane_state->uapi.src.x1 >> 16;
10888 src_y = plane_state->uapi.src.y1 >> 16;
10890 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10891 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10894 if (src_x != 0 || src_y != 0) {
10895 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10900 * Put the final coordinates back so that the src
10901 * coordinate checks will see the right values.
10903 drm_rect_translate_to(&plane_state->uapi.src,
10904 src_x << 16, src_y << 16);
10906 /* ILK+ do this automagically in hardware */
10907 if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
10908 const struct drm_framebuffer *fb = plane_state->hw.fb;
10909 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
10910 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
10912 offset += (src_h * src_w - 1) * fb->format->cpp[0];
10915 plane_state->color_plane[0].offset = offset;
10916 plane_state->color_plane[0].x = src_x;
10917 plane_state->color_plane[0].y = src_y;
10922 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10923 struct intel_plane_state *plane_state)
10925 const struct drm_framebuffer *fb = plane_state->hw.fb;
10928 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10929 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10933 ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
10935 DRM_PLANE_HELPER_NO_SCALING,
10936 DRM_PLANE_HELPER_NO_SCALING,
10941 /* Use the unclipped src/dst rectangles, which we program to hw */
10942 plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
10943 plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
10945 ret = intel_cursor_check_surface(plane_state);
10949 if (!plane_state->uapi.visible)
10952 ret = intel_plane_check_src_coordinates(plane_state);
10959 static unsigned int
10960 i845_cursor_max_stride(struct intel_plane *plane,
10961 u32 pixel_format, u64 modifier,
10962 unsigned int rotation)
10967 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10971 if (crtc_state->gamma_enable)
10972 cntl |= CURSOR_GAMMA_ENABLE;
10977 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10978 const struct intel_plane_state *plane_state)
10980 return CURSOR_ENABLE |
10981 CURSOR_FORMAT_ARGB |
10982 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10985 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10987 int width = drm_rect_width(&plane_state->uapi.dst);
10990 * 845g/865g are only limited by the width of their cursors,
10991 * the height is arbitrary up to the precision of the register.
10993 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10996 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10997 struct intel_plane_state *plane_state)
10999 const struct drm_framebuffer *fb = plane_state->hw.fb;
11002 ret = intel_check_cursor(crtc_state, plane_state);
11006 /* if we want to turn off the cursor ignore width and height */
11010 /* Check for which cursor types we support */
11011 if (!i845_cursor_size_ok(plane_state)) {
11012 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11013 drm_rect_width(&plane_state->uapi.dst),
11014 drm_rect_height(&plane_state->uapi.dst));
11018 WARN_ON(plane_state->uapi.visible &&
11019 plane_state->color_plane[0].stride != fb->pitches[0]);
11021 switch (fb->pitches[0]) {
11028 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
11033 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11038 static void i845_update_cursor(struct intel_plane *plane,
11039 const struct intel_crtc_state *crtc_state,
11040 const struct intel_plane_state *plane_state)
11042 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11043 u32 cntl = 0, base = 0, pos = 0, size = 0;
11044 unsigned long irqflags;
11046 if (plane_state && plane_state->uapi.visible) {
11047 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11048 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11050 cntl = plane_state->ctl |
11051 i845_cursor_ctl_crtc(crtc_state);
11053 size = (height << 12) | width;
11055 base = intel_cursor_base(plane_state);
11056 pos = intel_cursor_position(plane_state);
11059 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11061 /* On these chipsets we can only modify the base/size/stride
11062 * whilst the cursor is disabled.
11064 if (plane->cursor.base != base ||
11065 plane->cursor.size != size ||
11066 plane->cursor.cntl != cntl) {
11067 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
11068 I915_WRITE_FW(CURBASE(PIPE_A), base);
11069 I915_WRITE_FW(CURSIZE, size);
11070 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11071 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
11073 plane->cursor.base = base;
11074 plane->cursor.size = size;
11075 plane->cursor.cntl = cntl;
11077 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11080 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11083 static void i845_disable_cursor(struct intel_plane *plane,
11084 const struct intel_crtc_state *crtc_state)
11086 i845_update_cursor(plane, crtc_state, NULL);
11089 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11092 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11093 enum intel_display_power_domain power_domain;
11094 intel_wakeref_t wakeref;
11097 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11098 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11102 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11106 intel_display_power_put(dev_priv, power_domain, wakeref);
11111 static unsigned int
11112 i9xx_cursor_max_stride(struct intel_plane *plane,
11113 u32 pixel_format, u64 modifier,
11114 unsigned int rotation)
11116 return plane->base.dev->mode_config.cursor_width * 4;
11119 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11121 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11122 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11125 if (INTEL_GEN(dev_priv) >= 11)
11128 if (crtc_state->gamma_enable)
11129 cntl = MCURSOR_GAMMA_ENABLE;
11131 if (crtc_state->csc_enable)
11132 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11134 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11135 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11140 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11141 const struct intel_plane_state *plane_state)
11143 struct drm_i915_private *dev_priv =
11144 to_i915(plane_state->uapi.plane->dev);
11147 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11148 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11150 switch (drm_rect_width(&plane_state->uapi.dst)) {
11152 cntl |= MCURSOR_MODE_64_ARGB_AX;
11155 cntl |= MCURSOR_MODE_128_ARGB_AX;
11158 cntl |= MCURSOR_MODE_256_ARGB_AX;
11161 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11165 if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11166 cntl |= MCURSOR_ROTATE_180;
11171 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11173 struct drm_i915_private *dev_priv =
11174 to_i915(plane_state->uapi.plane->dev);
11175 int width = drm_rect_width(&plane_state->uapi.dst);
11176 int height = drm_rect_height(&plane_state->uapi.dst);
11178 if (!intel_cursor_size_ok(plane_state))
11181 /* Cursor width is limited to a few power-of-two sizes */
11192 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11193 * height from 8 lines up to the cursor width, when the
11194 * cursor is not rotated. Everything else requires square
11197 if (HAS_CUR_FBC(dev_priv) &&
11198 plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11199 if (height < 8 || height > width)
11202 if (height != width)
11209 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11210 struct intel_plane_state *plane_state)
11212 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11213 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11214 const struct drm_framebuffer *fb = plane_state->hw.fb;
11215 enum pipe pipe = plane->pipe;
11218 ret = intel_check_cursor(crtc_state, plane_state);
11222 /* if we want to turn off the cursor ignore width and height */
11226 /* Check for which cursor types we support */
11227 if (!i9xx_cursor_size_ok(plane_state)) {
11228 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11229 drm_rect_width(&plane_state->uapi.dst),
11230 drm_rect_height(&plane_state->uapi.dst));
11234 WARN_ON(plane_state->uapi.visible &&
11235 plane_state->color_plane[0].stride != fb->pitches[0]);
11237 if (fb->pitches[0] !=
11238 drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11239 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
11241 drm_rect_width(&plane_state->uapi.dst));
11246 * There's something wrong with the cursor on CHV pipe C.
11247 * If it straddles the left edge of the screen then
11248 * moving it away from the edge or disabling it often
11249 * results in a pipe underrun, and often that can lead to
11250 * dead pipe (constant underrun reported, and it scans
11251 * out just a solid color). To recover from that, the
11252 * display power well must be turned off and on again.
11253 * Refuse the put the cursor into that compromised position.
11255 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11256 plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11257 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
11261 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11266 static void i9xx_update_cursor(struct intel_plane *plane,
11267 const struct intel_crtc_state *crtc_state,
11268 const struct intel_plane_state *plane_state)
11270 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11271 enum pipe pipe = plane->pipe;
11272 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11273 unsigned long irqflags;
11275 if (plane_state && plane_state->uapi.visible) {
11276 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11277 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11279 cntl = plane_state->ctl |
11280 i9xx_cursor_ctl_crtc(crtc_state);
11282 if (width != height)
11283 fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11285 base = intel_cursor_base(plane_state);
11286 pos = intel_cursor_position(plane_state);
11289 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11292 * On some platforms writing CURCNTR first will also
11293 * cause CURPOS to be armed by the CURBASE write.
11294 * Without the CURCNTR write the CURPOS write would
11295 * arm itself. Thus we always update CURCNTR before
11298 * On other platforms CURPOS always requires the
11299 * CURBASE write to arm the update. Additonally
11300 * a write to any of the cursor register will cancel
11301 * an already armed cursor update. Thus leaving out
11302 * the CURBASE write after CURPOS could lead to a
11303 * cursor that doesn't appear to move, or even change
11304 * shape. Thus we always write CURBASE.
11306 * The other registers are armed by by the CURBASE write
11307 * except when the plane is getting enabled at which time
11308 * the CURCNTR write arms the update.
11311 if (INTEL_GEN(dev_priv) >= 9)
11312 skl_write_cursor_wm(plane, crtc_state);
11314 if (plane->cursor.base != base ||
11315 plane->cursor.size != fbc_ctl ||
11316 plane->cursor.cntl != cntl) {
11317 if (HAS_CUR_FBC(dev_priv))
11318 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
11319 I915_WRITE_FW(CURCNTR(pipe), cntl);
11320 I915_WRITE_FW(CURPOS(pipe), pos);
11321 I915_WRITE_FW(CURBASE(pipe), base);
11323 plane->cursor.base = base;
11324 plane->cursor.size = fbc_ctl;
11325 plane->cursor.cntl = cntl;
11327 I915_WRITE_FW(CURPOS(pipe), pos);
11328 I915_WRITE_FW(CURBASE(pipe), base);
11331 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11334 static void i9xx_disable_cursor(struct intel_plane *plane,
11335 const struct intel_crtc_state *crtc_state)
11337 i9xx_update_cursor(plane, crtc_state, NULL);
11340 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11343 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11344 enum intel_display_power_domain power_domain;
11345 intel_wakeref_t wakeref;
11350 * Not 100% correct for planes that can move between pipes,
11351 * but that's only the case for gen2-3 which don't have any
11352 * display power wells.
11354 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11355 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11359 val = I915_READ(CURCNTR(plane->pipe));
11361 ret = val & MCURSOR_MODE;
11363 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11364 *pipe = plane->pipe;
11366 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11367 MCURSOR_PIPE_SELECT_SHIFT;
11369 intel_display_power_put(dev_priv, power_domain, wakeref);
11374 /* VESA 640x480x72Hz mode to set on the pipe */
11375 static const struct drm_display_mode load_detect_mode = {
11376 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11377 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11380 struct drm_framebuffer *
11381 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11382 struct drm_mode_fb_cmd2 *mode_cmd)
11384 struct intel_framebuffer *intel_fb;
11387 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11389 return ERR_PTR(-ENOMEM);
11391 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11395 return &intel_fb->base;
11399 return ERR_PTR(ret);
11402 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11403 struct drm_crtc *crtc)
11405 struct drm_plane *plane;
11406 struct drm_plane_state *plane_state;
11409 ret = drm_atomic_add_affected_planes(state, crtc);
11413 for_each_new_plane_in_state(state, plane, plane_state, i) {
11414 if (plane_state->crtc != crtc)
11417 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11421 drm_atomic_set_fb_for_plane(plane_state, NULL);
11427 int intel_get_load_detect_pipe(struct drm_connector *connector,
11428 struct intel_load_detect_pipe *old,
11429 struct drm_modeset_acquire_ctx *ctx)
11431 struct intel_crtc *intel_crtc;
11432 struct intel_encoder *intel_encoder =
11433 intel_attached_encoder(connector);
11434 struct drm_crtc *possible_crtc;
11435 struct drm_encoder *encoder = &intel_encoder->base;
11436 struct drm_crtc *crtc = NULL;
11437 struct drm_device *dev = encoder->dev;
11438 struct drm_i915_private *dev_priv = to_i915(dev);
11439 struct drm_mode_config *config = &dev->mode_config;
11440 struct drm_atomic_state *state = NULL, *restore_state = NULL;
11441 struct drm_connector_state *connector_state;
11442 struct intel_crtc_state *crtc_state;
11445 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11446 connector->base.id, connector->name,
11447 encoder->base.id, encoder->name);
11449 old->restore_state = NULL;
11451 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11454 * Algorithm gets a little messy:
11456 * - if the connector already has an assigned crtc, use it (but make
11457 * sure it's on first)
11459 * - try to find the first unused crtc that can drive this connector,
11460 * and use that if we find one
11463 /* See if we already have a CRTC for this connector */
11464 if (connector->state->crtc) {
11465 crtc = connector->state->crtc;
11467 ret = drm_modeset_lock(&crtc->mutex, ctx);
11471 /* Make sure the crtc and connector are running */
11475 /* Find an unused one (if possible) */
11476 for_each_crtc(dev, possible_crtc) {
11478 if (!(encoder->possible_crtcs & (1 << i)))
11481 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11485 if (possible_crtc->state->enable) {
11486 drm_modeset_unlock(&possible_crtc->mutex);
11490 crtc = possible_crtc;
11495 * If we didn't find an unused CRTC, don't use any.
11498 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11504 intel_crtc = to_intel_crtc(crtc);
11506 state = drm_atomic_state_alloc(dev);
11507 restore_state = drm_atomic_state_alloc(dev);
11508 if (!state || !restore_state) {
11513 state->acquire_ctx = ctx;
11514 restore_state->acquire_ctx = ctx;
11516 connector_state = drm_atomic_get_connector_state(state, connector);
11517 if (IS_ERR(connector_state)) {
11518 ret = PTR_ERR(connector_state);
11522 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11526 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11527 if (IS_ERR(crtc_state)) {
11528 ret = PTR_ERR(crtc_state);
11532 crtc_state->uapi.active = true;
11534 ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11535 &load_detect_mode);
11539 ret = intel_modeset_disable_planes(state, crtc);
11543 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11545 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11547 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11549 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11553 ret = drm_atomic_commit(state);
11555 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11559 old->restore_state = restore_state;
11560 drm_atomic_state_put(state);
11562 /* let the connector get through one full cycle before testing */
11563 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11568 drm_atomic_state_put(state);
11571 if (restore_state) {
11572 drm_atomic_state_put(restore_state);
11573 restore_state = NULL;
11576 if (ret == -EDEADLK)
11582 void intel_release_load_detect_pipe(struct drm_connector *connector,
11583 struct intel_load_detect_pipe *old,
11584 struct drm_modeset_acquire_ctx *ctx)
11586 struct intel_encoder *intel_encoder =
11587 intel_attached_encoder(connector);
11588 struct drm_encoder *encoder = &intel_encoder->base;
11589 struct drm_atomic_state *state = old->restore_state;
11592 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11593 connector->base.id, connector->name,
11594 encoder->base.id, encoder->name);
11599 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11601 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11602 drm_atomic_state_put(state);
11605 static int i9xx_pll_refclk(struct drm_device *dev,
11606 const struct intel_crtc_state *pipe_config)
11608 struct drm_i915_private *dev_priv = to_i915(dev);
11609 u32 dpll = pipe_config->dpll_hw_state.dpll;
11611 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11612 return dev_priv->vbt.lvds_ssc_freq;
11613 else if (HAS_PCH_SPLIT(dev_priv))
11615 else if (!IS_GEN(dev_priv, 2))
11621 /* Returns the clock of the currently programmed mode of the given pipe. */
11622 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11623 struct intel_crtc_state *pipe_config)
11625 struct drm_device *dev = crtc->base.dev;
11626 struct drm_i915_private *dev_priv = to_i915(dev);
11627 enum pipe pipe = crtc->pipe;
11628 u32 dpll = pipe_config->dpll_hw_state.dpll;
11632 int refclk = i9xx_pll_refclk(dev, pipe_config);
11634 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11635 fp = pipe_config->dpll_hw_state.fp0;
11637 fp = pipe_config->dpll_hw_state.fp1;
11639 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11640 if (IS_PINEVIEW(dev_priv)) {
11641 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11642 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11644 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11645 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11648 if (!IS_GEN(dev_priv, 2)) {
11649 if (IS_PINEVIEW(dev_priv))
11650 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11651 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11653 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11654 DPLL_FPA01_P1_POST_DIV_SHIFT);
11656 switch (dpll & DPLL_MODE_MASK) {
11657 case DPLLB_MODE_DAC_SERIAL:
11658 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11661 case DPLLB_MODE_LVDS:
11662 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11666 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11667 "mode\n", (int)(dpll & DPLL_MODE_MASK));
11671 if (IS_PINEVIEW(dev_priv))
11672 port_clock = pnv_calc_dpll_params(refclk, &clock);
11674 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11676 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11677 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11680 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11681 DPLL_FPA01_P1_POST_DIV_SHIFT);
11683 if (lvds & LVDS_CLKB_POWER_UP)
11688 if (dpll & PLL_P1_DIVIDE_BY_TWO)
11691 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11692 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11694 if (dpll & PLL_P2_DIVIDE_BY_4)
11700 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11704 * This value includes pixel_multiplier. We will use
11705 * port_clock to compute adjusted_mode.crtc_clock in the
11706 * encoder's get_config() function.
11708 pipe_config->port_clock = port_clock;
11711 int intel_dotclock_calculate(int link_freq,
11712 const struct intel_link_m_n *m_n)
11715 * The calculation for the data clock is:
11716 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11717 * But we want to avoid losing precison if possible, so:
11718 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11720 * and the link clock is simpler:
11721 * link_clock = (m * link_clock) / n
11727 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11730 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11731 struct intel_crtc_state *pipe_config)
11733 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11735 /* read out port_clock from the DPLL */
11736 i9xx_crtc_clock_get(crtc, pipe_config);
11739 * In case there is an active pipe without active ports,
11740 * we may need some idea for the dotclock anyway.
11741 * Calculate one based on the FDI configuration.
11743 pipe_config->hw.adjusted_mode.crtc_clock =
11744 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11745 &pipe_config->fdi_m_n);
11748 /* Returns the currently programmed mode of the given encoder. */
11749 struct drm_display_mode *
11750 intel_encoder_current_mode(struct intel_encoder *encoder)
11752 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11753 struct intel_crtc_state *crtc_state;
11754 struct drm_display_mode *mode;
11755 struct intel_crtc *crtc;
11758 if (!encoder->get_hw_state(encoder, &pipe))
11761 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11763 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11767 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11773 crtc_state->uapi.crtc = &crtc->base;
11775 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11781 encoder->get_config(encoder, crtc_state);
11783 intel_mode_from_pipe_config(mode, crtc_state);
11790 static void intel_crtc_destroy(struct drm_crtc *crtc)
11792 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11794 drm_crtc_cleanup(crtc);
11799 * intel_wm_need_update - Check whether watermarks need updating
11800 * @cur: current plane state
11801 * @new: new plane state
11803 * Check current plane state versus the new one to determine whether
11804 * watermarks need to be recalculated.
11806 * Returns true or false.
11808 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11809 struct intel_plane_state *new)
11811 /* Update watermarks on tiling or size changes. */
11812 if (new->uapi.visible != cur->uapi.visible)
11815 if (!cur->hw.fb || !new->hw.fb)
11818 if (cur->hw.fb->modifier != new->hw.fb->modifier ||
11819 cur->hw.rotation != new->hw.rotation ||
11820 drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
11821 drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
11822 drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
11823 drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
11829 static bool needs_scaling(const struct intel_plane_state *state)
11831 int src_w = drm_rect_width(&state->uapi.src) >> 16;
11832 int src_h = drm_rect_height(&state->uapi.src) >> 16;
11833 int dst_w = drm_rect_width(&state->uapi.dst);
11834 int dst_h = drm_rect_height(&state->uapi.dst);
11836 return (src_w != dst_w || src_h != dst_h);
11839 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11840 struct intel_crtc_state *crtc_state,
11841 const struct intel_plane_state *old_plane_state,
11842 struct intel_plane_state *plane_state)
11844 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11845 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11846 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11847 bool mode_changed = needs_modeset(crtc_state);
11848 bool was_crtc_enabled = old_crtc_state->hw.active;
11849 bool is_crtc_enabled = crtc_state->hw.active;
11850 bool turn_off, turn_on, visible, was_visible;
11853 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11854 ret = skl_update_scaler_plane(crtc_state, plane_state);
11859 was_visible = old_plane_state->uapi.visible;
11860 visible = plane_state->uapi.visible;
11862 if (!was_crtc_enabled && WARN_ON(was_visible))
11863 was_visible = false;
11866 * Visibility is calculated as if the crtc was on, but
11867 * after scaler setup everything depends on it being off
11868 * when the crtc isn't active.
11870 * FIXME this is wrong for watermarks. Watermarks should also
11871 * be computed as if the pipe would be active. Perhaps move
11872 * per-plane wm computation to the .check_plane() hook, and
11873 * only combine the results from all planes in the current place?
11875 if (!is_crtc_enabled) {
11876 plane_state->uapi.visible = visible = false;
11877 crtc_state->active_planes &= ~BIT(plane->id);
11878 crtc_state->data_rate[plane->id] = 0;
11879 crtc_state->min_cdclk[plane->id] = 0;
11882 if (!was_visible && !visible)
11885 turn_off = was_visible && (!visible || mode_changed);
11886 turn_on = visible && (!was_visible || mode_changed);
11888 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11889 crtc->base.base.id, crtc->base.name,
11890 plane->base.base.id, plane->base.name,
11891 was_visible, visible,
11892 turn_off, turn_on, mode_changed);
11895 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11896 crtc_state->update_wm_pre = true;
11898 /* must disable cxsr around plane enable/disable */
11899 if (plane->id != PLANE_CURSOR)
11900 crtc_state->disable_cxsr = true;
11901 } else if (turn_off) {
11902 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11903 crtc_state->update_wm_post = true;
11905 /* must disable cxsr around plane enable/disable */
11906 if (plane->id != PLANE_CURSOR)
11907 crtc_state->disable_cxsr = true;
11908 } else if (intel_wm_need_update(old_plane_state, plane_state)) {
11909 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11910 /* FIXME bollocks */
11911 crtc_state->update_wm_pre = true;
11912 crtc_state->update_wm_post = true;
11916 if (visible || was_visible)
11917 crtc_state->fb_bits |= plane->frontbuffer_bit;
11920 * ILK/SNB DVSACNTR/Sprite Enable
11921 * IVB SPR_CTL/Sprite Enable
11922 * "When in Self Refresh Big FIFO mode, a write to enable the
11923 * plane will be internally buffered and delayed while Big FIFO
11924 * mode is exiting."
11926 * Which means that enabling the sprite can take an extra frame
11927 * when we start in big FIFO mode (LP1+). Thus we need to drop
11928 * down to LP0 and wait for vblank in order to make sure the
11929 * sprite gets enabled on the next vblank after the register write.
11930 * Doing otherwise would risk enabling the sprite one frame after
11931 * we've already signalled flip completion. We can resume LP1+
11932 * once the sprite has been enabled.
11935 * WaCxSRDisabledForSpriteScaling:ivb
11936 * IVB SPR_SCALE/Scaling Enable
11937 * "Low Power watermarks must be disabled for at least one
11938 * frame before enabling sprite scaling, and kept disabled
11939 * until sprite scaling is disabled."
11941 * ILK/SNB DVSASCALE/Scaling Enable
11942 * "When in Self Refresh Big FIFO mode, scaling enable will be
11943 * masked off while Big FIFO mode is exiting."
11945 * Despite the w/a only being listed for IVB we assume that
11946 * the ILK/SNB note has similar ramifications, hence we apply
11947 * the w/a on all three platforms.
11949 * With experimental results seems this is needed also for primary
11950 * plane, not only sprite plane.
11952 if (plane->id != PLANE_CURSOR &&
11953 (IS_GEN_RANGE(dev_priv, 5, 6) ||
11954 IS_IVYBRIDGE(dev_priv)) &&
11955 (turn_on || (!needs_scaling(old_plane_state) &&
11956 needs_scaling(plane_state))))
11957 crtc_state->disable_lp_wm = true;
11962 static bool encoders_cloneable(const struct intel_encoder *a,
11963 const struct intel_encoder *b)
11965 /* masks could be asymmetric, so check both ways */
11966 return a == b || (a->cloneable & (1 << b->type) &&
11967 b->cloneable & (1 << a->type));
11970 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11971 struct intel_crtc *crtc,
11972 struct intel_encoder *encoder)
11974 struct intel_encoder *source_encoder;
11975 struct drm_connector *connector;
11976 struct drm_connector_state *connector_state;
11979 for_each_new_connector_in_state(state, connector, connector_state, i) {
11980 if (connector_state->crtc != &crtc->base)
11984 to_intel_encoder(connector_state->best_encoder);
11985 if (!encoders_cloneable(encoder, source_encoder))
11992 static int icl_add_linked_planes(struct intel_atomic_state *state)
11994 struct intel_plane *plane, *linked;
11995 struct intel_plane_state *plane_state, *linked_plane_state;
11998 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11999 linked = plane_state->planar_linked_plane;
12004 linked_plane_state = intel_atomic_get_plane_state(state, linked);
12005 if (IS_ERR(linked_plane_state))
12006 return PTR_ERR(linked_plane_state);
12008 WARN_ON(linked_plane_state->planar_linked_plane != plane);
12009 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
12015 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12017 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12018 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12019 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12020 struct intel_plane *plane, *linked;
12021 struct intel_plane_state *plane_state;
12024 if (INTEL_GEN(dev_priv) < 11)
12028 * Destroy all old plane links and make the slave plane invisible
12029 * in the crtc_state->active_planes mask.
12031 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12032 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12035 plane_state->planar_linked_plane = NULL;
12036 if (plane_state->planar_slave && !plane_state->uapi.visible) {
12037 crtc_state->active_planes &= ~BIT(plane->id);
12038 crtc_state->update_planes |= BIT(plane->id);
12041 plane_state->planar_slave = false;
12044 if (!crtc_state->nv12_planes)
12047 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12048 struct intel_plane_state *linked_state = NULL;
12050 if (plane->pipe != crtc->pipe ||
12051 !(crtc_state->nv12_planes & BIT(plane->id)))
12054 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12055 if (!icl_is_nv12_y_plane(linked->id))
12058 if (crtc_state->active_planes & BIT(linked->id))
12061 linked_state = intel_atomic_get_plane_state(state, linked);
12062 if (IS_ERR(linked_state))
12063 return PTR_ERR(linked_state);
12068 if (!linked_state) {
12069 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
12070 hweight8(crtc_state->nv12_planes));
12075 plane_state->planar_linked_plane = linked;
12077 linked_state->planar_slave = true;
12078 linked_state->planar_linked_plane = plane;
12079 crtc_state->active_planes |= BIT(linked->id);
12080 crtc_state->update_planes |= BIT(linked->id);
12081 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
12083 /* Copy parameters to slave plane */
12084 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12085 linked_state->color_ctl = plane_state->color_ctl;
12086 linked_state->color_plane[0] = plane_state->color_plane[0];
12088 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12089 linked_state->uapi.src = plane_state->uapi.src;
12090 linked_state->uapi.dst = plane_state->uapi.dst;
12092 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12093 if (linked->id == PLANE_SPRITE5)
12094 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12095 else if (linked->id == PLANE_SPRITE4)
12096 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12098 MISSING_CASE(linked->id);
12105 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12107 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12108 struct intel_atomic_state *state =
12109 to_intel_atomic_state(new_crtc_state->uapi.state);
12110 const struct intel_crtc_state *old_crtc_state =
12111 intel_atomic_get_old_crtc_state(state, crtc);
12113 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12116 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
12118 struct drm_crtc *crtc = crtc_state->uapi.crtc;
12119 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12120 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12121 struct drm_connector *master_connector, *connector;
12122 struct drm_connector_state *connector_state;
12123 struct drm_connector_list_iter conn_iter;
12124 struct drm_crtc *master_crtc = NULL;
12125 struct drm_crtc_state *master_crtc_state;
12126 struct intel_crtc_state *master_pipe_config;
12127 int i, tile_group_id;
12129 if (INTEL_GEN(dev_priv) < 11)
12133 * In case of tiled displays there could be one or more slaves but there is
12134 * only one master. Lets make the CRTC used by the connector corresponding
12135 * to the last horizonal and last vertical tile a master/genlock CRTC.
12136 * All the other CRTCs corresponding to other tiles of the same Tile group
12137 * are the slave CRTCs and hold a pointer to their genlock CRTC.
12139 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12140 if (connector_state->crtc != crtc)
12142 if (!connector->has_tile)
12144 if (crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
12145 crtc_state->hw.mode.vdisplay != connector->tile_v_size)
12147 if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12148 connector->tile_v_loc == connector->num_v_tile - 1)
12150 crtc_state->sync_mode_slaves_mask = 0;
12151 tile_group_id = connector->tile_group->id;
12152 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12153 drm_for_each_connector_iter(master_connector, &conn_iter) {
12154 struct drm_connector_state *master_conn_state = NULL;
12156 if (!master_connector->has_tile)
12158 if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12159 master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12161 if (master_connector->tile_group->id != tile_group_id)
12164 master_conn_state = drm_atomic_get_connector_state(&state->base,
12166 if (IS_ERR(master_conn_state)) {
12167 drm_connector_list_iter_end(&conn_iter);
12168 return PTR_ERR(master_conn_state);
12170 if (master_conn_state->crtc) {
12171 master_crtc = master_conn_state->crtc;
12175 drm_connector_list_iter_end(&conn_iter);
12177 if (!master_crtc) {
12178 DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
12179 connector_state->crtc->base.id);
12183 master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12185 if (IS_ERR(master_crtc_state))
12186 return PTR_ERR(master_crtc_state);
12188 master_pipe_config = to_intel_crtc_state(master_crtc_state);
12189 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12190 master_pipe_config->sync_mode_slaves_mask |=
12191 BIT(crtc_state->cpu_transcoder);
12192 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12193 transcoder_name(crtc_state->master_transcoder),
12194 crtc_state->uapi.crtc->base.id,
12195 master_pipe_config->sync_mode_slaves_mask);
12201 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12202 struct intel_crtc *crtc)
12204 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12205 struct intel_crtc_state *crtc_state =
12206 intel_atomic_get_new_crtc_state(state, crtc);
12207 bool mode_changed = needs_modeset(crtc_state);
12210 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12211 mode_changed && !crtc_state->hw.active)
12212 crtc_state->update_wm_post = true;
12214 if (mode_changed && crtc_state->hw.enable &&
12215 dev_priv->display.crtc_compute_clock &&
12216 !WARN_ON(crtc_state->shared_dpll)) {
12217 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12223 * May need to update pipe gamma enable bits
12224 * when C8 planes are getting enabled/disabled.
12226 if (c8_planes_changed(crtc_state))
12227 crtc_state->uapi.color_mgmt_changed = true;
12229 if (mode_changed || crtc_state->update_pipe ||
12230 crtc_state->uapi.color_mgmt_changed) {
12231 ret = intel_color_check(crtc_state);
12237 if (dev_priv->display.compute_pipe_wm) {
12238 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12240 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12245 if (dev_priv->display.compute_intermediate_wm) {
12246 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12250 * Calculate 'intermediate' watermarks that satisfy both the
12251 * old state and the new state. We can program these
12254 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12256 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12261 if (INTEL_GEN(dev_priv) >= 9) {
12262 if (mode_changed || crtc_state->update_pipe)
12263 ret = skl_update_scaler_crtc(crtc_state);
12265 ret = intel_atomic_setup_scalers(dev_priv, crtc,
12269 if (HAS_IPS(dev_priv))
12270 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
12275 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12277 struct intel_connector *connector;
12278 struct drm_connector_list_iter conn_iter;
12280 drm_connector_list_iter_begin(dev, &conn_iter);
12281 for_each_intel_connector_iter(connector, &conn_iter) {
12282 if (connector->base.state->crtc)
12283 drm_connector_put(&connector->base);
12285 if (connector->base.encoder) {
12286 connector->base.state->best_encoder =
12287 connector->base.encoder;
12288 connector->base.state->crtc =
12289 connector->base.encoder->crtc;
12291 drm_connector_get(&connector->base);
12293 connector->base.state->best_encoder = NULL;
12294 connector->base.state->crtc = NULL;
12297 drm_connector_list_iter_end(&conn_iter);
12301 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12302 struct intel_crtc_state *pipe_config)
12304 struct drm_connector *connector = conn_state->connector;
12305 const struct drm_display_info *info = &connector->display_info;
12308 switch (conn_state->max_bpc) {
12325 if (bpp < pipe_config->pipe_bpp) {
12326 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12327 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12328 connector->base.id, connector->name,
12329 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
12330 pipe_config->pipe_bpp);
12332 pipe_config->pipe_bpp = bpp;
12339 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12340 struct intel_crtc_state *pipe_config)
12342 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12343 struct drm_atomic_state *state = pipe_config->uapi.state;
12344 struct drm_connector *connector;
12345 struct drm_connector_state *connector_state;
12348 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12349 IS_CHERRYVIEW(dev_priv)))
12351 else if (INTEL_GEN(dev_priv) >= 5)
12356 pipe_config->pipe_bpp = bpp;
12358 /* Clamp display bpp to connector max bpp */
12359 for_each_new_connector_in_state(state, connector, connector_state, i) {
12362 if (connector_state->crtc != &crtc->base)
12365 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12373 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12375 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12376 "type: 0x%x flags: 0x%x\n",
12378 mode->crtc_hdisplay, mode->crtc_hsync_start,
12379 mode->crtc_hsync_end, mode->crtc_htotal,
12380 mode->crtc_vdisplay, mode->crtc_vsync_start,
12381 mode->crtc_vsync_end, mode->crtc_vtotal,
12382 mode->type, mode->flags);
12386 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12387 const char *id, unsigned int lane_count,
12388 const struct intel_link_m_n *m_n)
12390 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12392 m_n->gmch_m, m_n->gmch_n,
12393 m_n->link_m, m_n->link_n, m_n->tu);
12397 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12398 const union hdmi_infoframe *frame)
12400 if ((drm_debug & DRM_UT_KMS) == 0)
12403 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12406 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12408 static const char * const output_type_str[] = {
12409 OUTPUT_TYPE(UNUSED),
12410 OUTPUT_TYPE(ANALOG),
12414 OUTPUT_TYPE(TVOUT),
12420 OUTPUT_TYPE(DP_MST),
12425 static void snprintf_output_types(char *buf, size_t len,
12426 unsigned int output_types)
12433 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12436 if ((output_types & BIT(i)) == 0)
12439 r = snprintf(str, len, "%s%s",
12440 str != buf ? "," : "", output_type_str[i]);
12446 output_types &= ~BIT(i);
12449 WARN_ON_ONCE(output_types != 0);
12452 static const char * const output_format_str[] = {
12453 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12454 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12455 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12456 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12459 static const char *output_formats(enum intel_output_format format)
12461 if (format >= ARRAY_SIZE(output_format_str))
12462 format = INTEL_OUTPUT_FORMAT_INVALID;
12463 return output_format_str[format];
12466 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12468 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12469 const struct drm_framebuffer *fb = plane_state->hw.fb;
12470 struct drm_format_name_buf format_name;
12473 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12474 plane->base.base.id, plane->base.name,
12475 yesno(plane_state->uapi.visible));
12479 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12480 plane->base.base.id, plane->base.name,
12481 fb->base.id, fb->width, fb->height,
12482 drm_get_format_name(fb->format->format, &format_name),
12483 yesno(plane_state->uapi.visible));
12484 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12485 plane_state->hw.rotation, plane_state->scaler_id);
12486 if (plane_state->uapi.visible)
12487 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12488 DRM_RECT_FP_ARG(&plane_state->uapi.src),
12489 DRM_RECT_ARG(&plane_state->uapi.dst));
12492 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12493 struct intel_atomic_state *state,
12494 const char *context)
12496 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12497 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12498 const struct intel_plane_state *plane_state;
12499 struct intel_plane *plane;
12503 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12504 crtc->base.base.id, crtc->base.name,
12505 yesno(pipe_config->hw.enable), context);
12507 if (!pipe_config->hw.enable)
12510 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12511 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12512 yesno(pipe_config->hw.active),
12513 buf, pipe_config->output_types,
12514 output_formats(pipe_config->output_format));
12516 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12517 transcoder_name(pipe_config->cpu_transcoder),
12518 pipe_config->pipe_bpp, pipe_config->dither);
12520 if (pipe_config->has_pch_encoder)
12521 intel_dump_m_n_config(pipe_config, "fdi",
12522 pipe_config->fdi_lanes,
12523 &pipe_config->fdi_m_n);
12525 if (intel_crtc_has_dp_encoder(pipe_config)) {
12526 intel_dump_m_n_config(pipe_config, "dp m_n",
12527 pipe_config->lane_count, &pipe_config->dp_m_n);
12528 if (pipe_config->has_drrs)
12529 intel_dump_m_n_config(pipe_config, "dp m2_n2",
12530 pipe_config->lane_count,
12531 &pipe_config->dp_m2_n2);
12534 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12535 pipe_config->has_audio, pipe_config->has_infoframe,
12536 pipe_config->infoframes.enable);
12538 if (pipe_config->infoframes.enable &
12539 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12540 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12541 if (pipe_config->infoframes.enable &
12542 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12543 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12544 if (pipe_config->infoframes.enable &
12545 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12546 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12547 if (pipe_config->infoframes.enable &
12548 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12549 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12551 DRM_DEBUG_KMS("requested mode:\n");
12552 drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12553 DRM_DEBUG_KMS("adjusted mode:\n");
12554 drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12555 intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
12556 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12557 pipe_config->port_clock,
12558 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12559 pipe_config->pixel_rate);
12561 if (INTEL_GEN(dev_priv) >= 9)
12562 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12564 pipe_config->scaler_state.scaler_users,
12565 pipe_config->scaler_state.scaler_id);
12567 if (HAS_GMCH(dev_priv))
12568 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12569 pipe_config->gmch_pfit.control,
12570 pipe_config->gmch_pfit.pgm_ratios,
12571 pipe_config->gmch_pfit.lvds_border_bits);
12573 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12574 pipe_config->pch_pfit.pos,
12575 pipe_config->pch_pfit.size,
12576 enableddisabled(pipe_config->pch_pfit.enabled),
12577 yesno(pipe_config->pch_pfit.force_thru));
12579 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12580 pipe_config->ips_enabled, pipe_config->double_wide);
12582 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12584 if (IS_CHERRYVIEW(dev_priv))
12585 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12586 pipe_config->cgm_mode, pipe_config->gamma_mode,
12587 pipe_config->gamma_enable, pipe_config->csc_enable);
12589 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12590 pipe_config->csc_mode, pipe_config->gamma_mode,
12591 pipe_config->gamma_enable, pipe_config->csc_enable);
12597 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12598 if (plane->pipe == crtc->pipe)
12599 intel_dump_plane_state(plane_state);
12603 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12605 struct drm_device *dev = state->base.dev;
12606 struct drm_connector *connector;
12607 struct drm_connector_list_iter conn_iter;
12608 unsigned int used_ports = 0;
12609 unsigned int used_mst_ports = 0;
12613 * We're going to peek into connector->state,
12614 * hence connection_mutex must be held.
12616 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12619 * Walk the connector list instead of the encoder
12620 * list to detect the problem on ddi platforms
12621 * where there's just one encoder per digital port.
12623 drm_connector_list_iter_begin(dev, &conn_iter);
12624 drm_for_each_connector_iter(connector, &conn_iter) {
12625 struct drm_connector_state *connector_state;
12626 struct intel_encoder *encoder;
12629 drm_atomic_get_new_connector_state(&state->base,
12631 if (!connector_state)
12632 connector_state = connector->state;
12634 if (!connector_state->best_encoder)
12637 encoder = to_intel_encoder(connector_state->best_encoder);
12639 WARN_ON(!connector_state->crtc);
12641 switch (encoder->type) {
12642 unsigned int port_mask;
12643 case INTEL_OUTPUT_DDI:
12644 if (WARN_ON(!HAS_DDI(to_i915(dev))))
12646 /* else, fall through */
12647 case INTEL_OUTPUT_DP:
12648 case INTEL_OUTPUT_HDMI:
12649 case INTEL_OUTPUT_EDP:
12650 port_mask = 1 << encoder->port;
12652 /* the same port mustn't appear more than once */
12653 if (used_ports & port_mask)
12656 used_ports |= port_mask;
12658 case INTEL_OUTPUT_DP_MST:
12660 1 << encoder->port;
12666 drm_connector_list_iter_end(&conn_iter);
12668 /* can't mix MST and SST/HDMI on the same port */
12669 if (used_ports & used_mst_ports)
12676 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
12678 intel_crtc_copy_color_blobs(crtc_state);
12682 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
12684 crtc_state->hw.enable = crtc_state->uapi.enable;
12685 crtc_state->hw.active = crtc_state->uapi.active;
12686 crtc_state->hw.mode = crtc_state->uapi.mode;
12687 crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
12688 intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
12691 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
12693 crtc_state->uapi.enable = crtc_state->hw.enable;
12694 crtc_state->uapi.active = crtc_state->hw.active;
12695 WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
12697 crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
12699 /* copy color blobs to uapi */
12700 drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
12701 crtc_state->hw.degamma_lut);
12702 drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
12703 crtc_state->hw.gamma_lut);
12704 drm_property_replace_blob(&crtc_state->uapi.ctm,
12705 crtc_state->hw.ctm);
12709 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
12711 struct drm_i915_private *dev_priv =
12712 to_i915(crtc_state->uapi.crtc->dev);
12713 struct intel_crtc_state *saved_state;
12715 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12719 /* free the old crtc_state->hw members */
12720 intel_crtc_free_hw_state(crtc_state);
12722 /* FIXME: before the switch to atomic started, a new pipe_config was
12723 * kzalloc'd. Code that depends on any field being zero should be
12724 * fixed, so that the crtc_state can be safely duplicated. For now,
12725 * only fields that are know to not cause problems are preserved. */
12727 saved_state->uapi = crtc_state->uapi;
12728 saved_state->scaler_state = crtc_state->scaler_state;
12729 saved_state->shared_dpll = crtc_state->shared_dpll;
12730 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12731 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12732 sizeof(saved_state->icl_port_dplls));
12733 saved_state->crc_enabled = crtc_state->crc_enabled;
12734 if (IS_G4X(dev_priv) ||
12735 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12736 saved_state->wm = crtc_state->wm;
12738 * Save the slave bitmask which gets filled for master crtc state during
12739 * slave atomic check call.
12741 if (is_trans_port_sync_master(crtc_state))
12742 saved_state->sync_mode_slaves_mask =
12743 crtc_state->sync_mode_slaves_mask;
12745 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12746 kfree(saved_state);
12748 intel_crtc_copy_uapi_to_hw_state(crtc_state);
12754 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12756 struct drm_crtc *crtc = pipe_config->uapi.crtc;
12757 struct drm_atomic_state *state = pipe_config->uapi.state;
12758 struct intel_encoder *encoder;
12759 struct drm_connector *connector;
12760 struct drm_connector_state *connector_state;
12765 pipe_config->cpu_transcoder =
12766 (enum transcoder) to_intel_crtc(crtc)->pipe;
12769 * Sanitize sync polarity flags based on requested ones. If neither
12770 * positive or negative polarity is requested, treat this as meaning
12771 * negative polarity.
12773 if (!(pipe_config->hw.adjusted_mode.flags &
12774 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12775 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12777 if (!(pipe_config->hw.adjusted_mode.flags &
12778 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12779 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12781 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12786 base_bpp = pipe_config->pipe_bpp;
12789 * Determine the real pipe dimensions. Note that stereo modes can
12790 * increase the actual pipe size due to the frame doubling and
12791 * insertion of additional space for blanks between the frame. This
12792 * is stored in the crtc timings. We use the requested mode to do this
12793 * computation to clearly distinguish it from the adjusted mode, which
12794 * can be changed by the connectors in the below retry loop.
12796 drm_mode_get_hv_timing(&pipe_config->hw.mode,
12797 &pipe_config->pipe_src_w,
12798 &pipe_config->pipe_src_h);
12800 for_each_new_connector_in_state(state, connector, connector_state, i) {
12801 if (connector_state->crtc != crtc)
12804 encoder = to_intel_encoder(connector_state->best_encoder);
12806 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12807 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12812 * Determine output_types before calling the .compute_config()
12813 * hooks so that the hooks can use this information safely.
12815 if (encoder->compute_output_type)
12816 pipe_config->output_types |=
12817 BIT(encoder->compute_output_type(encoder, pipe_config,
12820 pipe_config->output_types |= BIT(encoder->type);
12824 /* Ensure the port clock defaults are reset when retrying. */
12825 pipe_config->port_clock = 0;
12826 pipe_config->pixel_multiplier = 1;
12828 /* Fill in default crtc timings, allow encoders to overwrite them. */
12829 drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
12830 CRTC_STEREO_DOUBLE);
12832 /* Set the crtc_state defaults for trans_port_sync */
12833 pipe_config->master_transcoder = INVALID_TRANSCODER;
12834 ret = icl_add_sync_mode_crtcs(pipe_config);
12836 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
12841 /* Pass our mode to the connectors and the CRTC to give them a chance to
12842 * adjust it according to limitations or connector properties, and also
12843 * a chance to reject the mode entirely.
12845 for_each_new_connector_in_state(state, connector, connector_state, i) {
12846 if (connector_state->crtc != crtc)
12849 encoder = to_intel_encoder(connector_state->best_encoder);
12850 ret = encoder->compute_config(encoder, pipe_config,
12853 if (ret != -EDEADLK)
12854 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12860 /* Set default port clock if not overwritten by the encoder. Needs to be
12861 * done afterwards in case the encoder adjusts the mode. */
12862 if (!pipe_config->port_clock)
12863 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
12864 * pipe_config->pixel_multiplier;
12866 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12867 if (ret == -EDEADLK)
12870 DRM_DEBUG_KMS("CRTC fixup failed\n");
12874 if (ret == RETRY) {
12875 if (WARN(!retry, "loop in pipe configuration computation\n"))
12878 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12880 goto encoder_retry;
12883 /* Dithering seems to not pass-through bits correctly when it should, so
12884 * only enable it on 6bpc panels and when its not a compliance
12885 * test requesting 6bpc video pattern.
12887 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12888 !pipe_config->dither_force_disable;
12889 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12890 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12893 * Make drm_calc_timestamping_constants in
12894 * drm_atomic_helper_update_legacy_modeset_state() happy
12896 pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
12901 bool intel_fuzzy_clock_check(int clock1, int clock2)
12905 if (clock1 == clock2)
12908 if (!clock1 || !clock2)
12911 diff = abs(clock1 - clock2);
12913 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12920 intel_compare_m_n(unsigned int m, unsigned int n,
12921 unsigned int m2, unsigned int n2,
12924 if (m == m2 && n == n2)
12927 if (exact || !m || !n || !m2 || !n2)
12930 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12937 } else if (n < n2) {
12947 return intel_fuzzy_clock_check(m, m2);
12951 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12952 const struct intel_link_m_n *m2_n2,
12955 return m_n->tu == m2_n2->tu &&
12956 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12957 m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12958 intel_compare_m_n(m_n->link_m, m_n->link_n,
12959 m2_n2->link_m, m2_n2->link_n, exact);
12963 intel_compare_infoframe(const union hdmi_infoframe *a,
12964 const union hdmi_infoframe *b)
12966 return memcmp(a, b, sizeof(*a)) == 0;
12970 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12971 bool fastset, const char *name,
12972 const union hdmi_infoframe *a,
12973 const union hdmi_infoframe *b)
12976 if ((drm_debug & DRM_UT_KMS) == 0)
12979 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
12980 DRM_DEBUG_KMS("expected:\n");
12981 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12982 DRM_DEBUG_KMS("found:\n");
12983 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12985 DRM_ERROR("mismatch in %s infoframe\n", name);
12986 DRM_ERROR("expected:\n");
12987 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12988 DRM_ERROR("found:\n");
12989 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12993 static void __printf(4, 5)
12994 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
12995 const char *name, const char *format, ...)
12997 struct va_format vaf;
13000 va_start(args, format);
13005 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13006 crtc->base.base.id, crtc->base.name, name, &vaf);
13008 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
13009 crtc->base.base.id, crtc->base.name, name, &vaf);
13014 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13016 if (i915_modparams.fastboot != -1)
13017 return i915_modparams.fastboot;
13019 /* Enable fastboot by default on Skylake and newer */
13020 if (INTEL_GEN(dev_priv) >= 9)
13023 /* Enable fastboot by default on VLV and CHV */
13024 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13027 /* Disabled by default on all others */
13032 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13033 const struct intel_crtc_state *pipe_config,
13036 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13037 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13040 bool fixup_inherited = fastset &&
13041 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
13042 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
13044 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13045 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
13049 #define PIPE_CONF_CHECK_X(name) do { \
13050 if (current_config->name != pipe_config->name) { \
13051 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13052 "(expected 0x%08x, found 0x%08x)", \
13053 current_config->name, \
13054 pipe_config->name); \
13059 #define PIPE_CONF_CHECK_I(name) do { \
13060 if (current_config->name != pipe_config->name) { \
13061 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13062 "(expected %i, found %i)", \
13063 current_config->name, \
13064 pipe_config->name); \
13069 #define PIPE_CONF_CHECK_BOOL(name) do { \
13070 if (current_config->name != pipe_config->name) { \
13071 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13072 "(expected %s, found %s)", \
13073 yesno(current_config->name), \
13074 yesno(pipe_config->name)); \
13080 * Checks state where we only read out the enabling, but not the entire
13081 * state itself (like full infoframes or ELD for audio). These states
13082 * require a full modeset on bootup to fix up.
13084 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13085 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13086 PIPE_CONF_CHECK_BOOL(name); \
13088 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13089 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13090 yesno(current_config->name), \
13091 yesno(pipe_config->name)); \
13096 #define PIPE_CONF_CHECK_P(name) do { \
13097 if (current_config->name != pipe_config->name) { \
13098 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13099 "(expected %p, found %p)", \
13100 current_config->name, \
13101 pipe_config->name); \
13106 #define PIPE_CONF_CHECK_M_N(name) do { \
13107 if (!intel_compare_link_m_n(¤t_config->name, \
13108 &pipe_config->name,\
13110 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13111 "(expected tu %i gmch %i/%i link %i/%i, " \
13112 "found tu %i, gmch %i/%i link %i/%i)", \
13113 current_config->name.tu, \
13114 current_config->name.gmch_m, \
13115 current_config->name.gmch_n, \
13116 current_config->name.link_m, \
13117 current_config->name.link_n, \
13118 pipe_config->name.tu, \
13119 pipe_config->name.gmch_m, \
13120 pipe_config->name.gmch_n, \
13121 pipe_config->name.link_m, \
13122 pipe_config->name.link_n); \
13127 /* This is required for BDW+ where there is only one set of registers for
13128 * switching between high and low RR.
13129 * This macro can be used whenever a comparison has to be made between one
13130 * hw state and multiple sw state variables.
13132 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13133 if (!intel_compare_link_m_n(¤t_config->name, \
13134 &pipe_config->name, !fastset) && \
13135 !intel_compare_link_m_n(¤t_config->alt_name, \
13136 &pipe_config->name, !fastset)) { \
13137 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13138 "(expected tu %i gmch %i/%i link %i/%i, " \
13139 "or tu %i gmch %i/%i link %i/%i, " \
13140 "found tu %i, gmch %i/%i link %i/%i)", \
13141 current_config->name.tu, \
13142 current_config->name.gmch_m, \
13143 current_config->name.gmch_n, \
13144 current_config->name.link_m, \
13145 current_config->name.link_n, \
13146 current_config->alt_name.tu, \
13147 current_config->alt_name.gmch_m, \
13148 current_config->alt_name.gmch_n, \
13149 current_config->alt_name.link_m, \
13150 current_config->alt_name.link_n, \
13151 pipe_config->name.tu, \
13152 pipe_config->name.gmch_m, \
13153 pipe_config->name.gmch_n, \
13154 pipe_config->name.link_m, \
13155 pipe_config->name.link_n); \
13160 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13161 if ((current_config->name ^ pipe_config->name) & (mask)) { \
13162 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13163 "(%x) (expected %i, found %i)", \
13165 current_config->name & (mask), \
13166 pipe_config->name & (mask)); \
13171 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13172 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13173 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13174 "(expected %i, found %i)", \
13175 current_config->name, \
13176 pipe_config->name); \
13181 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13182 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
13183 &pipe_config->infoframes.name)) { \
13184 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13185 ¤t_config->infoframes.name, \
13186 &pipe_config->infoframes.name); \
13191 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13192 if (current_config->name1 != pipe_config->name1) { \
13193 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13194 "(expected %i, found %i, won't compare lut values)", \
13195 current_config->name1, \
13196 pipe_config->name1); \
13199 if (!intel_color_lut_equal(current_config->name2, \
13200 pipe_config->name2, pipe_config->name1, \
13201 bit_precision)) { \
13202 pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13203 "hw_state doesn't match sw_state"); \
13209 #define PIPE_CONF_QUIRK(quirk) \
13210 ((current_config->quirks | pipe_config->quirks) & (quirk))
13212 PIPE_CONF_CHECK_I(cpu_transcoder);
13214 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13215 PIPE_CONF_CHECK_I(fdi_lanes);
13216 PIPE_CONF_CHECK_M_N(fdi_m_n);
13218 PIPE_CONF_CHECK_I(lane_count);
13219 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13221 if (INTEL_GEN(dev_priv) < 8) {
13222 PIPE_CONF_CHECK_M_N(dp_m_n);
13224 if (current_config->has_drrs)
13225 PIPE_CONF_CHECK_M_N(dp_m2_n2);
13227 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13229 PIPE_CONF_CHECK_X(output_types);
13231 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13232 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13233 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13234 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13235 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13236 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13238 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13239 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13240 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13241 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13242 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13243 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13245 PIPE_CONF_CHECK_I(pixel_multiplier);
13246 PIPE_CONF_CHECK_I(output_format);
13247 PIPE_CONF_CHECK_I(dc3co_exitline);
13248 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13249 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13250 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13251 PIPE_CONF_CHECK_BOOL(limited_color_range);
13253 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13254 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13255 PIPE_CONF_CHECK_BOOL(has_infoframe);
13256 PIPE_CONF_CHECK_BOOL(fec_enable);
13258 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13260 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13261 DRM_MODE_FLAG_INTERLACE);
13263 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13264 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13265 DRM_MODE_FLAG_PHSYNC);
13266 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13267 DRM_MODE_FLAG_NHSYNC);
13268 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13269 DRM_MODE_FLAG_PVSYNC);
13270 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13271 DRM_MODE_FLAG_NVSYNC);
13274 PIPE_CONF_CHECK_X(gmch_pfit.control);
13275 /* pfit ratios are autocomputed by the hw on gen4+ */
13276 if (INTEL_GEN(dev_priv) < 4)
13277 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13278 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13281 * Changing the EDP transcoder input mux
13282 * (A_ONOFF vs. A_ON) requires a full modeset.
13284 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13287 PIPE_CONF_CHECK_I(pipe_src_w);
13288 PIPE_CONF_CHECK_I(pipe_src_h);
13290 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13291 if (current_config->pch_pfit.enabled) {
13292 PIPE_CONF_CHECK_X(pch_pfit.pos);
13293 PIPE_CONF_CHECK_X(pch_pfit.size);
13296 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13297 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13299 PIPE_CONF_CHECK_X(gamma_mode);
13300 if (IS_CHERRYVIEW(dev_priv))
13301 PIPE_CONF_CHECK_X(cgm_mode);
13303 PIPE_CONF_CHECK_X(csc_mode);
13304 PIPE_CONF_CHECK_BOOL(gamma_enable);
13305 PIPE_CONF_CHECK_BOOL(csc_enable);
13307 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13309 PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13313 PIPE_CONF_CHECK_BOOL(double_wide);
13315 PIPE_CONF_CHECK_P(shared_dpll);
13316 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13317 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13318 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13319 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13320 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13321 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13322 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13323 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13324 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13325 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13326 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13327 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13328 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13329 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13330 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13331 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13332 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13333 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13334 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13335 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13336 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13337 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13338 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13339 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13340 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13341 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13342 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13343 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13344 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13345 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13346 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13348 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13349 PIPE_CONF_CHECK_X(dsi_pll.div);
13351 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13352 PIPE_CONF_CHECK_I(pipe_bpp);
13354 PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13355 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13357 PIPE_CONF_CHECK_I(min_voltage_level);
13359 PIPE_CONF_CHECK_X(infoframes.enable);
13360 PIPE_CONF_CHECK_X(infoframes.gcp);
13361 PIPE_CONF_CHECK_INFOFRAME(avi);
13362 PIPE_CONF_CHECK_INFOFRAME(spd);
13363 PIPE_CONF_CHECK_INFOFRAME(hdmi);
13364 PIPE_CONF_CHECK_INFOFRAME(drm);
13366 PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
13367 PIPE_CONF_CHECK_I(master_transcoder);
13369 #undef PIPE_CONF_CHECK_X
13370 #undef PIPE_CONF_CHECK_I
13371 #undef PIPE_CONF_CHECK_BOOL
13372 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13373 #undef PIPE_CONF_CHECK_P
13374 #undef PIPE_CONF_CHECK_FLAGS
13375 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13376 #undef PIPE_CONF_CHECK_COLOR_LUT
13377 #undef PIPE_CONF_QUIRK
13382 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13383 const struct intel_crtc_state *pipe_config)
13385 if (pipe_config->has_pch_encoder) {
13386 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13387 &pipe_config->fdi_m_n);
13388 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13391 * FDI already provided one idea for the dotclock.
13392 * Yell if the encoder disagrees.
13394 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13395 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13396 fdi_dotclock, dotclock);
13400 static void verify_wm_state(struct intel_crtc *crtc,
13401 struct intel_crtc_state *new_crtc_state)
13403 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13404 struct skl_hw_state {
13405 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13406 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13407 struct skl_ddb_allocation ddb;
13408 struct skl_pipe_wm wm;
13410 struct skl_ddb_allocation *sw_ddb;
13411 struct skl_pipe_wm *sw_wm;
13412 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13413 const enum pipe pipe = crtc->pipe;
13414 int plane, level, max_level = ilk_wm_max_level(dev_priv);
13416 if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13419 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13423 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13424 sw_wm = &new_crtc_state->wm.skl.optimal;
13426 skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13428 skl_ddb_get_hw_state(dev_priv, &hw->ddb);
13429 sw_ddb = &dev_priv->wm.skl_hw.ddb;
13431 if (INTEL_GEN(dev_priv) >= 11 &&
13432 hw->ddb.enabled_slices != sw_ddb->enabled_slices)
13433 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
13434 sw_ddb->enabled_slices,
13435 hw->ddb.enabled_slices);
13438 for_each_universal_plane(dev_priv, pipe, plane) {
13439 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13441 hw_plane_wm = &hw->wm.planes[plane];
13442 sw_plane_wm = &sw_wm->planes[plane];
13445 for (level = 0; level <= max_level; level++) {
13446 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13447 &sw_plane_wm->wm[level]))
13450 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13451 pipe_name(pipe), plane + 1, level,
13452 sw_plane_wm->wm[level].plane_en,
13453 sw_plane_wm->wm[level].plane_res_b,
13454 sw_plane_wm->wm[level].plane_res_l,
13455 hw_plane_wm->wm[level].plane_en,
13456 hw_plane_wm->wm[level].plane_res_b,
13457 hw_plane_wm->wm[level].plane_res_l);
13460 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13461 &sw_plane_wm->trans_wm)) {
13462 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13463 pipe_name(pipe), plane + 1,
13464 sw_plane_wm->trans_wm.plane_en,
13465 sw_plane_wm->trans_wm.plane_res_b,
13466 sw_plane_wm->trans_wm.plane_res_l,
13467 hw_plane_wm->trans_wm.plane_en,
13468 hw_plane_wm->trans_wm.plane_res_b,
13469 hw_plane_wm->trans_wm.plane_res_l);
13473 hw_ddb_entry = &hw->ddb_y[plane];
13474 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13476 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13477 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13478 pipe_name(pipe), plane + 1,
13479 sw_ddb_entry->start, sw_ddb_entry->end,
13480 hw_ddb_entry->start, hw_ddb_entry->end);
13486 * If the cursor plane isn't active, we may not have updated it's ddb
13487 * allocation. In that case since the ddb allocation will be updated
13488 * once the plane becomes visible, we can skip this check
13491 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13493 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13494 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13497 for (level = 0; level <= max_level; level++) {
13498 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13499 &sw_plane_wm->wm[level]))
13502 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13503 pipe_name(pipe), level,
13504 sw_plane_wm->wm[level].plane_en,
13505 sw_plane_wm->wm[level].plane_res_b,
13506 sw_plane_wm->wm[level].plane_res_l,
13507 hw_plane_wm->wm[level].plane_en,
13508 hw_plane_wm->wm[level].plane_res_b,
13509 hw_plane_wm->wm[level].plane_res_l);
13512 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13513 &sw_plane_wm->trans_wm)) {
13514 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13516 sw_plane_wm->trans_wm.plane_en,
13517 sw_plane_wm->trans_wm.plane_res_b,
13518 sw_plane_wm->trans_wm.plane_res_l,
13519 hw_plane_wm->trans_wm.plane_en,
13520 hw_plane_wm->trans_wm.plane_res_b,
13521 hw_plane_wm->trans_wm.plane_res_l);
13525 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13526 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13528 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13529 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13531 sw_ddb_entry->start, sw_ddb_entry->end,
13532 hw_ddb_entry->start, hw_ddb_entry->end);
13540 verify_connector_state(struct intel_atomic_state *state,
13541 struct intel_crtc *crtc)
13543 struct drm_connector *connector;
13544 struct drm_connector_state *new_conn_state;
13547 for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13548 struct drm_encoder *encoder = connector->encoder;
13549 struct intel_crtc_state *crtc_state = NULL;
13551 if (new_conn_state->crtc != &crtc->base)
13555 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13557 intel_connector_verify_state(crtc_state, new_conn_state);
13559 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13560 "connector's atomic encoder doesn't match legacy encoder\n");
13565 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13567 struct intel_encoder *encoder;
13568 struct drm_connector *connector;
13569 struct drm_connector_state *old_conn_state, *new_conn_state;
13572 for_each_intel_encoder(&dev_priv->drm, encoder) {
13573 bool enabled = false, found = false;
13576 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13577 encoder->base.base.id,
13578 encoder->base.name);
13580 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13581 new_conn_state, i) {
13582 if (old_conn_state->best_encoder == &encoder->base)
13585 if (new_conn_state->best_encoder != &encoder->base)
13587 found = enabled = true;
13589 I915_STATE_WARN(new_conn_state->crtc !=
13590 encoder->base.crtc,
13591 "connector's crtc doesn't match encoder crtc\n");
13597 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13598 "encoder's enabled state mismatch "
13599 "(expected %i, found %i)\n",
13600 !!encoder->base.crtc, enabled);
13602 if (!encoder->base.crtc) {
13605 active = encoder->get_hw_state(encoder, &pipe);
13606 I915_STATE_WARN(active,
13607 "encoder detached but still enabled on pipe %c.\n",
13614 verify_crtc_state(struct intel_crtc *crtc,
13615 struct intel_crtc_state *old_crtc_state,
13616 struct intel_crtc_state *new_crtc_state)
13618 struct drm_device *dev = crtc->base.dev;
13619 struct drm_i915_private *dev_priv = to_i915(dev);
13620 struct intel_encoder *encoder;
13621 struct intel_crtc_state *pipe_config;
13622 struct drm_atomic_state *state;
13625 state = old_crtc_state->uapi.state;
13626 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
13627 intel_crtc_free_hw_state(old_crtc_state);
13629 pipe_config = old_crtc_state;
13630 memset(pipe_config, 0, sizeof(*pipe_config));
13631 pipe_config->uapi.crtc = &crtc->base;
13632 pipe_config->uapi.state = state;
13634 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13636 active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13638 /* we keep both pipes enabled on 830 */
13639 if (IS_I830(dev_priv))
13640 active = new_crtc_state->hw.active;
13642 I915_STATE_WARN(new_crtc_state->hw.active != active,
13643 "crtc active state doesn't match with hw state "
13644 "(expected %i, found %i)\n",
13645 new_crtc_state->hw.active, active);
13647 I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
13648 "transitional active state does not match atomic hw state "
13649 "(expected %i, found %i)\n",
13650 new_crtc_state->hw.active, crtc->active);
13652 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13655 active = encoder->get_hw_state(encoder, &pipe);
13656 I915_STATE_WARN(active != new_crtc_state->hw.active,
13657 "[ENCODER:%i] active %i with crtc active %i\n",
13658 encoder->base.base.id, active,
13659 new_crtc_state->hw.active);
13661 I915_STATE_WARN(active && crtc->pipe != pipe,
13662 "Encoder connected to wrong pipe %c\n",
13666 encoder->get_config(encoder, pipe_config);
13669 intel_crtc_compute_pixel_rate(pipe_config);
13671 if (!new_crtc_state->hw.active)
13674 intel_pipe_config_sanity_check(dev_priv, pipe_config);
13676 if (!intel_pipe_config_compare(new_crtc_state,
13677 pipe_config, false)) {
13678 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13679 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13680 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13685 intel_verify_planes(struct intel_atomic_state *state)
13687 struct intel_plane *plane;
13688 const struct intel_plane_state *plane_state;
13691 for_each_new_intel_plane_in_state(state, plane,
13693 assert_plane(plane, plane_state->planar_slave ||
13694 plane_state->uapi.visible);
13698 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13699 struct intel_shared_dpll *pll,
13700 struct intel_crtc *crtc,
13701 struct intel_crtc_state *new_crtc_state)
13703 struct intel_dpll_hw_state dpll_hw_state;
13704 unsigned int crtc_mask;
13707 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13709 DRM_DEBUG_KMS("%s\n", pll->info->name);
13711 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13713 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13714 I915_STATE_WARN(!pll->on && pll->active_mask,
13715 "pll in active use but not on in sw tracking\n");
13716 I915_STATE_WARN(pll->on && !pll->active_mask,
13717 "pll is on but not used by any active crtc\n");
13718 I915_STATE_WARN(pll->on != active,
13719 "pll on state mismatch (expected %i, found %i)\n",
13724 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13725 "more active pll users than references: %x vs %x\n",
13726 pll->active_mask, pll->state.crtc_mask);
13731 crtc_mask = drm_crtc_mask(&crtc->base);
13733 if (new_crtc_state->hw.active)
13734 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13735 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13736 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13738 I915_STATE_WARN(pll->active_mask & crtc_mask,
13739 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13740 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13742 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13743 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13744 crtc_mask, pll->state.crtc_mask);
13746 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13748 sizeof(dpll_hw_state)),
13749 "pll hw state mismatch\n");
13753 verify_shared_dpll_state(struct intel_crtc *crtc,
13754 struct intel_crtc_state *old_crtc_state,
13755 struct intel_crtc_state *new_crtc_state)
13757 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13759 if (new_crtc_state->shared_dpll)
13760 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13762 if (old_crtc_state->shared_dpll &&
13763 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13764 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13765 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13767 I915_STATE_WARN(pll->active_mask & crtc_mask,
13768 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13769 pipe_name(drm_crtc_index(&crtc->base)));
13770 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13771 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13772 pipe_name(drm_crtc_index(&crtc->base)));
13777 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13778 struct intel_atomic_state *state,
13779 struct intel_crtc_state *old_crtc_state,
13780 struct intel_crtc_state *new_crtc_state)
13782 if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13785 verify_wm_state(crtc, new_crtc_state);
13786 verify_connector_state(state, crtc);
13787 verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13788 verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13792 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13796 for (i = 0; i < dev_priv->num_shared_dpll; i++)
13797 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13801 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13802 struct intel_atomic_state *state)
13804 verify_encoder_state(dev_priv, state);
13805 verify_connector_state(state, NULL);
13806 verify_disabled_dpll_state(dev_priv);
13810 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13812 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13813 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13814 const struct drm_display_mode *adjusted_mode =
13815 &crtc_state->hw.adjusted_mode;
13817 drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13820 * The scanline counter increments at the leading edge of hsync.
13822 * On most platforms it starts counting from vtotal-1 on the
13823 * first active line. That means the scanline counter value is
13824 * always one less than what we would expect. Ie. just after
13825 * start of vblank, which also occurs at start of hsync (on the
13826 * last active line), the scanline counter will read vblank_start-1.
13828 * On gen2 the scanline counter starts counting from 1 instead
13829 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13830 * to keep the value positive), instead of adding one.
13832 * On HSW+ the behaviour of the scanline counter depends on the output
13833 * type. For DP ports it behaves like most other platforms, but on HDMI
13834 * there's an extra 1 line difference. So we need to add two instead of
13835 * one to the value.
13837 * On VLV/CHV DSI the scanline counter would appear to increment
13838 * approx. 1/3 of a scanline before start of vblank. Unfortunately
13839 * that means we can't tell whether we're in vblank or not while
13840 * we're on that particular line. We must still set scanline_offset
13841 * to 1 so that the vblank timestamps come out correct when we query
13842 * the scanline counter from within the vblank interrupt handler.
13843 * However if queried just before the start of vblank we'll get an
13844 * answer that's slightly in the future.
13846 if (IS_GEN(dev_priv, 2)) {
13849 vtotal = adjusted_mode->crtc_vtotal;
13850 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13853 crtc->scanline_offset = vtotal - 1;
13854 } else if (HAS_DDI(dev_priv) &&
13855 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13856 crtc->scanline_offset = 2;
13858 crtc->scanline_offset = 1;
13862 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13864 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13865 struct intel_crtc_state *new_crtc_state;
13866 struct intel_crtc *crtc;
13869 if (!dev_priv->display.crtc_compute_clock)
13872 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13873 if (!needs_modeset(new_crtc_state))
13876 intel_release_shared_dplls(state, crtc);
13881 * This implements the workaround described in the "notes" section of the mode
13882 * set sequence documentation. When going from no pipes or single pipe to
13883 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13884 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13886 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13888 struct intel_crtc_state *crtc_state;
13889 struct intel_crtc *crtc;
13890 struct intel_crtc_state *first_crtc_state = NULL;
13891 struct intel_crtc_state *other_crtc_state = NULL;
13892 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13895 /* look at all crtc's that are going to be enabled in during modeset */
13896 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13897 if (!crtc_state->hw.active ||
13898 !needs_modeset(crtc_state))
13901 if (first_crtc_state) {
13902 other_crtc_state = crtc_state;
13905 first_crtc_state = crtc_state;
13906 first_pipe = crtc->pipe;
13910 /* No workaround needed? */
13911 if (!first_crtc_state)
13914 /* w/a possibly needed, check how many crtc's are already enabled. */
13915 for_each_intel_crtc(state->base.dev, crtc) {
13916 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13917 if (IS_ERR(crtc_state))
13918 return PTR_ERR(crtc_state);
13920 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13922 if (!crtc_state->hw.active ||
13923 needs_modeset(crtc_state))
13926 /* 2 or more enabled crtcs means no need for w/a */
13927 if (enabled_pipe != INVALID_PIPE)
13930 enabled_pipe = crtc->pipe;
13933 if (enabled_pipe != INVALID_PIPE)
13934 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13935 else if (other_crtc_state)
13936 other_crtc_state->hsw_workaround_pipe = first_pipe;
13941 static int intel_modeset_checks(struct intel_atomic_state *state)
13943 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13944 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13945 struct intel_crtc *crtc;
13948 /* keep the current setting */
13949 if (!state->cdclk.force_min_cdclk_changed)
13950 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13952 state->modeset = true;
13953 state->active_pipes = dev_priv->active_pipes;
13954 state->cdclk.logical = dev_priv->cdclk.logical;
13955 state->cdclk.actual = dev_priv->cdclk.actual;
13957 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13958 new_crtc_state, i) {
13959 if (new_crtc_state->hw.active)
13960 state->active_pipes |= BIT(crtc->pipe);
13962 state->active_pipes &= ~BIT(crtc->pipe);
13964 if (old_crtc_state->hw.active != new_crtc_state->hw.active)
13965 state->active_pipe_changes |= BIT(crtc->pipe);
13968 if (state->active_pipe_changes) {
13969 ret = intel_atomic_lock_global_state(state);
13974 ret = intel_modeset_calc_cdclk(state);
13978 intel_modeset_clear_plls(state);
13980 if (IS_HASWELL(dev_priv))
13981 return haswell_mode_set_planes_workaround(state);
13987 * Handle calculation of various watermark data at the end of the atomic check
13988 * phase. The code here should be run after the per-crtc and per-plane 'check'
13989 * handlers to ensure that all derived state has been updated.
13991 static int calc_watermark_data(struct intel_atomic_state *state)
13993 struct drm_device *dev = state->base.dev;
13994 struct drm_i915_private *dev_priv = to_i915(dev);
13996 /* Is there platform-specific watermark information to calculate? */
13997 if (dev_priv->display.compute_global_watermarks)
13998 return dev_priv->display.compute_global_watermarks(state);
14003 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14004 struct intel_crtc_state *new_crtc_state)
14006 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14009 new_crtc_state->uapi.mode_changed = false;
14010 new_crtc_state->update_pipe = true;
14013 * If we're not doing the full modeset we want to
14014 * keep the current M/N values as they may be
14015 * sufficiently different to the computed values
14016 * to cause problems.
14018 * FIXME: should really copy more fuzzy state here
14020 new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14021 new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14022 new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14023 new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14026 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14027 struct intel_crtc *crtc,
14030 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14031 struct intel_plane *plane;
14033 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14034 struct intel_plane_state *plane_state;
14036 if ((plane_ids_mask & BIT(plane->id)) == 0)
14039 plane_state = intel_atomic_get_plane_state(state, plane);
14040 if (IS_ERR(plane_state))
14041 return PTR_ERR(plane_state);
14047 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14049 /* See {hsw,vlv,ivb}_plane_ratio() */
14050 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14051 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14052 IS_IVYBRIDGE(dev_priv);
14055 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14056 bool *need_modeset)
14058 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14059 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14060 struct intel_plane_state *plane_state;
14061 struct intel_plane *plane;
14062 struct intel_crtc *crtc;
14065 ret = icl_add_linked_planes(state);
14069 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14070 ret = intel_plane_atomic_check(state, plane);
14072 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
14073 plane->base.base.id, plane->base.name);
14078 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14079 new_crtc_state, i) {
14080 u8 old_active_planes, new_active_planes;
14082 ret = icl_check_nv12_planes(new_crtc_state);
14087 * On some platforms the number of active planes affects
14088 * the planes' minimum cdclk calculation. Add such planes
14089 * to the state before we compute the minimum cdclk.
14091 if (!active_planes_affects_min_cdclk(dev_priv))
14094 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14095 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14097 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14100 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14106 * active_planes bitmask has been updated, and potentially
14107 * affected planes are part of the state. We can now
14108 * compute the minimum cdclk for each plane.
14110 for_each_new_intel_plane_in_state(state, plane, plane_state, i)
14111 *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
14116 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14118 struct intel_crtc_state *crtc_state;
14119 struct intel_crtc *crtc;
14122 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14123 int ret = intel_crtc_atomic_check(state, crtc);
14125 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
14126 crtc->base.base.id, crtc->base.name);
14135 * intel_atomic_check - validate state object
14137 * @_state: state to validate
14139 static int intel_atomic_check(struct drm_device *dev,
14140 struct drm_atomic_state *_state)
14142 struct drm_i915_private *dev_priv = to_i915(dev);
14143 struct intel_atomic_state *state = to_intel_atomic_state(_state);
14144 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14145 struct intel_crtc *crtc;
14147 bool any_ms = false;
14149 /* Catch I915_MODE_FLAG_INHERITED */
14150 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14151 new_crtc_state, i) {
14152 if (new_crtc_state->hw.mode.private_flags !=
14153 old_crtc_state->hw.mode.private_flags)
14154 new_crtc_state->uapi.mode_changed = true;
14157 ret = drm_atomic_helper_check_modeset(dev, &state->base);
14161 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14162 new_crtc_state, i) {
14163 if (!needs_modeset(new_crtc_state)) {
14165 intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14170 if (!new_crtc_state->uapi.enable) {
14171 intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
14177 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14181 ret = intel_modeset_pipe_config(new_crtc_state);
14185 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14187 if (needs_modeset(new_crtc_state))
14191 if (any_ms && !check_digital_port_conflicts(state)) {
14192 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
14197 ret = drm_dp_mst_atomic_check(&state->base);
14201 any_ms |= state->cdclk.force_min_cdclk_changed;
14203 ret = intel_atomic_check_planes(state, &any_ms);
14208 ret = intel_modeset_checks(state);
14212 state->cdclk.logical = dev_priv->cdclk.logical;
14215 ret = intel_atomic_check_crtcs(state);
14219 intel_fbc_choose_crtc(dev_priv, state);
14220 ret = calc_watermark_data(state);
14224 ret = intel_bw_atomic_check(state);
14228 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14229 new_crtc_state, i) {
14230 if (!needs_modeset(new_crtc_state) &&
14231 !new_crtc_state->update_pipe)
14234 intel_dump_pipe_config(new_crtc_state, state,
14235 needs_modeset(new_crtc_state) ?
14236 "[modeset]" : "[fastset]");
14242 if (ret == -EDEADLK)
14246 * FIXME would probably be nice to know which crtc specifically
14247 * caused the failure, in cases where we can pinpoint it.
14249 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14251 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14256 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14258 return drm_atomic_helper_prepare_planes(state->base.dev,
14262 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14264 struct drm_device *dev = crtc->base.dev;
14265 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14267 if (!vblank->max_vblank_count)
14268 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14270 return crtc->base.funcs->get_vblank_counter(&crtc->base);
14273 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14274 struct intel_crtc_state *crtc_state)
14276 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14278 if (!IS_GEN(dev_priv, 2))
14279 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14281 if (crtc_state->has_pch_encoder) {
14282 enum pipe pch_transcoder =
14283 intel_crtc_pch_transcoder(crtc);
14285 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14289 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14290 const struct intel_crtc_state *new_crtc_state)
14292 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14293 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14296 * Update pipe size and adjust fitter if needed: the reason for this is
14297 * that in compute_mode_changes we check the native mode (not the pfit
14298 * mode) to see if we can flip rather than do a full mode set. In the
14299 * fastboot case, we'll flip, but if we don't update the pipesrc and
14300 * pfit state, we'll end up with a big fb scanned out into the wrong
14303 intel_set_pipe_src_size(new_crtc_state);
14305 /* on skylake this is done by detaching scalers */
14306 if (INTEL_GEN(dev_priv) >= 9) {
14307 skl_detach_scalers(new_crtc_state);
14309 if (new_crtc_state->pch_pfit.enabled)
14310 skylake_pfit_enable(new_crtc_state);
14311 } else if (HAS_PCH_SPLIT(dev_priv)) {
14312 if (new_crtc_state->pch_pfit.enabled)
14313 ironlake_pfit_enable(new_crtc_state);
14314 else if (old_crtc_state->pch_pfit.enabled)
14315 ironlake_pfit_disable(old_crtc_state);
14318 if (INTEL_GEN(dev_priv) >= 11)
14319 icl_set_pipe_chicken(crtc);
14322 static void commit_pipe_config(struct intel_atomic_state *state,
14323 struct intel_crtc_state *old_crtc_state,
14324 struct intel_crtc_state *new_crtc_state)
14326 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14327 bool modeset = needs_modeset(new_crtc_state);
14330 * During modesets pipe configuration was programmed as the
14331 * CRTC was enabled.
14334 if (new_crtc_state->uapi.color_mgmt_changed ||
14335 new_crtc_state->update_pipe)
14336 intel_color_commit(new_crtc_state);
14338 if (INTEL_GEN(dev_priv) >= 9)
14339 skl_detach_scalers(new_crtc_state);
14341 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14342 bdw_set_pipemisc(new_crtc_state);
14344 if (new_crtc_state->update_pipe)
14345 intel_pipe_fastset(old_crtc_state, new_crtc_state);
14348 if (dev_priv->display.atomic_update_watermarks)
14349 dev_priv->display.atomic_update_watermarks(state,
14353 static void intel_update_crtc(struct intel_crtc *crtc,
14354 struct intel_atomic_state *state,
14355 struct intel_crtc_state *old_crtc_state,
14356 struct intel_crtc_state *new_crtc_state)
14358 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14359 bool modeset = needs_modeset(new_crtc_state);
14360 struct intel_plane_state *new_plane_state =
14361 intel_atomic_get_new_plane_state(state,
14362 to_intel_plane(crtc->base.primary));
14365 intel_crtc_update_active_timings(new_crtc_state);
14367 dev_priv->display.crtc_enable(new_crtc_state, state);
14369 /* vblanks work again, re-enable pipe CRC. */
14370 intel_crtc_enable_pipe_crc(crtc);
14372 if (new_crtc_state->preload_luts &&
14373 (new_crtc_state->uapi.color_mgmt_changed ||
14374 new_crtc_state->update_pipe))
14375 intel_color_load_luts(new_crtc_state);
14377 intel_pre_plane_update(old_crtc_state, new_crtc_state);
14379 if (new_crtc_state->update_pipe)
14380 intel_encoders_update_pipe(state, crtc);
14383 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14384 intel_fbc_disable(crtc);
14385 else if (new_plane_state)
14386 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14388 /* Perform vblank evasion around commit operation */
14389 intel_pipe_update_start(new_crtc_state);
14391 commit_pipe_config(state, old_crtc_state, new_crtc_state);
14393 if (INTEL_GEN(dev_priv) >= 9)
14394 skl_update_planes_on_crtc(state, crtc);
14396 i9xx_update_planes_on_crtc(state, crtc);
14398 intel_pipe_update_end(new_crtc_state);
14401 * We usually enable FIFO underrun interrupts as part of the
14402 * CRTC enable sequence during modesets. But when we inherit a
14403 * valid pipe configuration from the BIOS we need to take care
14404 * of enabling them on the CRTC's first fastset.
14406 if (new_crtc_state->update_pipe && !modeset &&
14407 old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14408 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14411 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
14413 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
14414 enum transcoder slave_transcoder;
14416 WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
14418 slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
14419 return intel_get_crtc_for_pipe(dev_priv,
14420 (enum pipe)slave_transcoder);
14423 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14424 struct intel_crtc_state *old_crtc_state,
14425 struct intel_crtc_state *new_crtc_state,
14426 struct intel_crtc *crtc)
14428 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14430 intel_crtc_disable_planes(state, crtc);
14433 * We need to disable pipe CRC before disabling the pipe,
14434 * or we race against vblank off.
14436 intel_crtc_disable_pipe_crc(crtc);
14438 dev_priv->display.crtc_disable(old_crtc_state, state);
14439 crtc->active = false;
14440 intel_fbc_disable(crtc);
14441 intel_disable_shared_dpll(old_crtc_state);
14444 * Underruns don't always raise interrupts,
14445 * so check manually.
14447 intel_check_cpu_fifo_underruns(dev_priv);
14448 intel_check_pch_fifo_underruns(dev_priv);
14450 /* FIXME unify this for all platforms */
14451 if (!new_crtc_state->hw.active &&
14452 !HAS_GMCH(dev_priv) &&
14453 dev_priv->display.initial_watermarks)
14454 dev_priv->display.initial_watermarks(state,
14458 static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
14459 struct intel_crtc *crtc,
14460 struct intel_crtc_state *old_crtc_state,
14461 struct intel_crtc_state *new_crtc_state)
14463 struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14464 struct intel_crtc_state *new_slave_crtc_state =
14465 intel_atomic_get_new_crtc_state(state, slave_crtc);
14466 struct intel_crtc_state *old_slave_crtc_state =
14467 intel_atomic_get_old_crtc_state(state, slave_crtc);
14469 WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14470 !old_slave_crtc_state);
14472 /* Disable Slave first */
14473 intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
14474 if (old_slave_crtc_state->hw.active)
14475 intel_old_crtc_state_disables(state,
14476 old_slave_crtc_state,
14477 new_slave_crtc_state,
14480 /* Disable Master */
14481 intel_pre_plane_update(old_crtc_state, new_crtc_state);
14482 if (old_crtc_state->hw.active)
14483 intel_old_crtc_state_disables(state,
14489 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14491 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14492 struct intel_crtc *crtc;
14496 * Disable CRTC/pipes in reverse order because some features(MST in
14497 * TGL+) requires master and slave relationship between pipes, so it
14498 * should always pick the lowest pipe as master as it will be enabled
14499 * first and disable in the reverse order so the master will be the
14500 * last one to be disabled.
14502 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
14503 new_crtc_state, i) {
14504 if (!needs_modeset(new_crtc_state))
14507 /* In case of Transcoder port Sync master slave CRTCs can be
14508 * assigned in any order and we need to make sure that
14509 * slave CRTCs are disabled first and then master CRTC since
14510 * Slave vblanks are masked till Master Vblanks.
14512 if (is_trans_port_sync_mode(new_crtc_state)) {
14513 if (is_trans_port_sync_master(new_crtc_state))
14514 intel_trans_port_sync_modeset_disables(state,
14521 intel_pre_plane_update(old_crtc_state, new_crtc_state);
14523 if (old_crtc_state->hw.active)
14524 intel_old_crtc_state_disables(state,
14532 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
14534 struct intel_crtc *crtc;
14535 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14538 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14539 if (!new_crtc_state->hw.active)
14542 intel_update_crtc(crtc, state, old_crtc_state,
14547 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
14548 struct intel_atomic_state *state,
14549 struct intel_crtc_state *new_crtc_state)
14551 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14553 intel_crtc_update_active_timings(new_crtc_state);
14554 dev_priv->display.crtc_enable(new_crtc_state, state);
14555 intel_crtc_enable_pipe_crc(crtc);
14558 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
14559 struct intel_atomic_state *state)
14561 struct drm_connector *uninitialized_var(conn);
14562 struct drm_connector_state *conn_state;
14563 struct intel_dp *intel_dp;
14566 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
14567 if (conn_state->crtc == &crtc->base)
14570 intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
14571 intel_dp_stop_link_train(intel_dp);
14574 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
14575 struct intel_atomic_state *state)
14577 struct intel_crtc_state *new_crtc_state =
14578 intel_atomic_get_new_crtc_state(state, crtc);
14579 struct intel_crtc_state *old_crtc_state =
14580 intel_atomic_get_old_crtc_state(state, crtc);
14581 struct intel_plane_state *new_plane_state =
14582 intel_atomic_get_new_plane_state(state,
14583 to_intel_plane(crtc->base.primary));
14584 bool modeset = needs_modeset(new_crtc_state);
14586 if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14587 intel_fbc_disable(crtc);
14588 else if (new_plane_state)
14589 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14591 /* Perform vblank evasion around commit operation */
14592 intel_pipe_update_start(new_crtc_state);
14593 commit_pipe_config(state, old_crtc_state, new_crtc_state);
14594 skl_update_planes_on_crtc(state, crtc);
14595 intel_pipe_update_end(new_crtc_state);
14598 * We usually enable FIFO underrun interrupts as part of the
14599 * CRTC enable sequence during modesets. But when we inherit a
14600 * valid pipe configuration from the BIOS we need to take care
14601 * of enabling them on the CRTC's first fastset.
14603 if (new_crtc_state->update_pipe && !modeset &&
14604 old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14605 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14608 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
14609 struct intel_atomic_state *state,
14610 struct intel_crtc_state *old_crtc_state,
14611 struct intel_crtc_state *new_crtc_state)
14613 struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14614 struct intel_crtc_state *new_slave_crtc_state =
14615 intel_atomic_get_new_crtc_state(state, slave_crtc);
14616 struct intel_crtc_state *old_slave_crtc_state =
14617 intel_atomic_get_old_crtc_state(state, slave_crtc);
14619 WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14620 !old_slave_crtc_state);
14622 DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
14623 crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
14624 slave_crtc->base.name);
14626 /* Enable seq for slave with with DP_TP_CTL left Idle until the
14629 intel_crtc_enable_trans_port_sync(slave_crtc,
14631 new_slave_crtc_state);
14633 /* Enable seq for master with with DP_TP_CTL left Idle */
14634 intel_crtc_enable_trans_port_sync(crtc,
14638 /* Set Slave's DP_TP_CTL to Normal */
14639 intel_set_dp_tp_ctl_normal(slave_crtc,
14642 /* Set Master's DP_TP_CTL To Normal */
14643 usleep_range(200, 400);
14644 intel_set_dp_tp_ctl_normal(crtc,
14647 /* Now do the post crtc enable for all master and slaves */
14648 intel_post_crtc_enable_updates(slave_crtc,
14650 intel_post_crtc_enable_updates(crtc,
14654 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
14656 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14657 struct intel_crtc *crtc;
14658 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14659 unsigned int updated = 0;
14662 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
14663 u8 required_slices = state->wm_results.ddb.enabled_slices;
14664 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
14666 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
14667 /* ignore allocations for crtc's that have been turned off. */
14668 if (new_crtc_state->hw.active)
14669 entries[i] = old_crtc_state->wm.skl.ddb;
14671 /* If 2nd DBuf slice required, enable it here */
14672 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
14673 icl_dbuf_slices_update(dev_priv, required_slices);
14676 * Whenever the number of active pipes changes, we need to make sure we
14677 * update the pipes in the right order so that their ddb allocations
14678 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
14679 * cause pipe underruns and other bad stuff.
14684 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14685 enum pipe pipe = crtc->pipe;
14686 bool vbl_wait = false;
14687 bool modeset = needs_modeset(new_crtc_state);
14689 if (updated & BIT(crtc->pipe) || !new_crtc_state->hw.active)
14692 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
14694 INTEL_NUM_PIPES(dev_priv), i))
14697 updated |= BIT(pipe);
14698 entries[i] = new_crtc_state->wm.skl.ddb;
14701 * If this is an already active pipe, it's DDB changed,
14702 * and this isn't the last pipe that needs updating
14703 * then we need to wait for a vblank to pass for the
14704 * new ddb allocation to take effect.
14706 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
14707 &old_crtc_state->wm.skl.ddb) &&
14709 state->wm_results.dirty_pipes != updated)
14712 if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
14713 if (is_trans_port_sync_master(new_crtc_state))
14714 intel_update_trans_port_sync_crtcs(crtc,
14721 intel_update_crtc(crtc, state, old_crtc_state,
14726 intel_wait_for_vblank(dev_priv, pipe);
14730 } while (progress);
14732 /* If 2nd DBuf slice is no more required disable it */
14733 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
14734 icl_dbuf_slices_update(dev_priv, required_slices);
14737 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14739 struct intel_atomic_state *state, *next;
14740 struct llist_node *freed;
14742 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14743 llist_for_each_entry_safe(state, next, freed, freed)
14744 drm_atomic_state_put(&state->base);
14747 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14749 struct drm_i915_private *dev_priv =
14750 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14752 intel_atomic_helper_free_state(dev_priv);
14755 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
14757 struct wait_queue_entry wait_fence, wait_reset;
14758 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
14760 init_wait_entry(&wait_fence, 0);
14761 init_wait_entry(&wait_reset, 0);
14763 prepare_to_wait(&intel_state->commit_ready.wait,
14764 &wait_fence, TASK_UNINTERRUPTIBLE);
14765 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14766 I915_RESET_MODESET),
14767 &wait_reset, TASK_UNINTERRUPTIBLE);
14770 if (i915_sw_fence_done(&intel_state->commit_ready) ||
14771 test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
14776 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
14777 finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14778 I915_RESET_MODESET),
14782 static void intel_atomic_cleanup_work(struct work_struct *work)
14784 struct drm_atomic_state *state =
14785 container_of(work, struct drm_atomic_state, commit_work);
14786 struct drm_i915_private *i915 = to_i915(state->dev);
14788 drm_atomic_helper_cleanup_planes(&i915->drm, state);
14789 drm_atomic_helper_commit_cleanup_done(state);
14790 drm_atomic_state_put(state);
14792 intel_atomic_helper_free_state(i915);
14795 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
14797 struct drm_device *dev = state->base.dev;
14798 struct drm_i915_private *dev_priv = to_i915(dev);
14799 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14800 struct intel_crtc *crtc;
14801 u64 put_domains[I915_MAX_PIPES] = {};
14802 intel_wakeref_t wakeref = 0;
14805 intel_atomic_commit_fence_wait(state);
14807 drm_atomic_helper_wait_for_dependencies(&state->base);
14809 if (state->modeset)
14810 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
14812 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14813 new_crtc_state, i) {
14814 if (needs_modeset(new_crtc_state) ||
14815 new_crtc_state->update_pipe) {
14817 put_domains[crtc->pipe] =
14818 modeset_get_crtc_power_domains(new_crtc_state);
14822 intel_commit_modeset_disables(state);
14824 /* FIXME: Eventually get rid of our crtc->config pointer */
14825 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14826 crtc->config = new_crtc_state;
14828 if (state->modeset) {
14829 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
14831 intel_set_cdclk_pre_plane_update(dev_priv,
14832 &state->cdclk.actual,
14833 &dev_priv->cdclk.actual,
14834 state->cdclk.pipe);
14837 * SKL workaround: bspec recommends we disable the SAGV when we
14838 * have more then one pipe enabled
14840 if (!intel_can_enable_sagv(state))
14841 intel_disable_sagv(dev_priv);
14843 intel_modeset_verify_disabled(dev_priv, state);
14846 /* Complete the events for pipes that have now been disabled */
14847 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14848 bool modeset = needs_modeset(new_crtc_state);
14850 /* Complete events for now disable pipes here. */
14851 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
14852 spin_lock_irq(&dev->event_lock);
14853 drm_crtc_send_vblank_event(&crtc->base,
14854 new_crtc_state->uapi.event);
14855 spin_unlock_irq(&dev->event_lock);
14857 new_crtc_state->uapi.event = NULL;
14861 if (state->modeset)
14862 intel_encoders_update_prepare(state);
14864 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14865 dev_priv->display.commit_modeset_enables(state);
14867 if (state->modeset) {
14868 intel_encoders_update_complete(state);
14870 intel_set_cdclk_post_plane_update(dev_priv,
14871 &state->cdclk.actual,
14872 &dev_priv->cdclk.actual,
14873 state->cdclk.pipe);
14876 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14877 * already, but still need the state for the delayed optimization. To
14879 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14880 * - schedule that vblank worker _before_ calling hw_done
14881 * - at the start of commit_tail, cancel it _synchrously
14882 * - switch over to the vblank wait helper in the core after that since
14883 * we don't need out special handling any more.
14885 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14887 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14888 if (new_crtc_state->hw.active &&
14889 !needs_modeset(new_crtc_state) &&
14890 !new_crtc_state->preload_luts &&
14891 (new_crtc_state->uapi.color_mgmt_changed ||
14892 new_crtc_state->update_pipe))
14893 intel_color_load_luts(new_crtc_state);
14897 * Now that the vblank has passed, we can go ahead and program the
14898 * optimal watermarks on platforms that need two-step watermark
14901 * TODO: Move this (and other cleanup) to an async worker eventually.
14903 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14904 if (dev_priv->display.optimize_watermarks)
14905 dev_priv->display.optimize_watermarks(state,
14909 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14910 intel_post_plane_update(old_crtc_state);
14912 if (put_domains[i])
14913 modeset_put_power_domains(dev_priv, put_domains[i]);
14915 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14918 if (state->modeset)
14919 intel_verify_planes(state);
14921 if (state->modeset && intel_can_enable_sagv(state))
14922 intel_enable_sagv(dev_priv);
14924 drm_atomic_helper_commit_hw_done(&state->base);
14926 if (state->modeset) {
14927 /* As one of the primary mmio accessors, KMS has a high
14928 * likelihood of triggering bugs in unclaimed access. After we
14929 * finish modesetting, see if an error has been flagged, and if
14930 * so enable debugging for the next modeset - and hope we catch
14933 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
14934 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
14936 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14939 * Defer the cleanup of the old state to a separate worker to not
14940 * impede the current task (userspace for blocking modesets) that
14941 * are executed inline. For out-of-line asynchronous modesets/flips,
14942 * deferring to a new worker seems overkill, but we would place a
14943 * schedule point (cond_resched()) here anyway to keep latencies
14946 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
14947 queue_work(system_highpri_wq, &state->base.commit_work);
14950 static void intel_atomic_commit_work(struct work_struct *work)
14952 struct intel_atomic_state *state =
14953 container_of(work, struct intel_atomic_state, base.commit_work);
14955 intel_atomic_commit_tail(state);
14958 static int __i915_sw_fence_call
14959 intel_atomic_commit_ready(struct i915_sw_fence *fence,
14960 enum i915_sw_fence_notify notify)
14962 struct intel_atomic_state *state =
14963 container_of(fence, struct intel_atomic_state, commit_ready);
14966 case FENCE_COMPLETE:
14967 /* we do blocking waits in the worker, nothing to do here */
14971 struct intel_atomic_helper *helper =
14972 &to_i915(state->base.dev)->atomic_helper;
14974 if (llist_add(&state->freed, &helper->free_list))
14975 schedule_work(&helper->free_work);
14980 return NOTIFY_DONE;
14983 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
14985 struct intel_plane_state *old_plane_state, *new_plane_state;
14986 struct intel_plane *plane;
14989 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14990 new_plane_state, i)
14991 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
14992 to_intel_frontbuffer(new_plane_state->hw.fb),
14993 plane->frontbuffer_bit);
14996 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
14998 struct intel_crtc *crtc;
15000 for_each_intel_crtc(&dev_priv->drm, crtc)
15001 drm_modeset_lock_assert_held(&crtc->base.mutex);
15004 static int intel_atomic_commit(struct drm_device *dev,
15005 struct drm_atomic_state *_state,
15008 struct intel_atomic_state *state = to_intel_atomic_state(_state);
15009 struct drm_i915_private *dev_priv = to_i915(dev);
15012 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15014 drm_atomic_state_get(&state->base);
15015 i915_sw_fence_init(&state->commit_ready,
15016 intel_atomic_commit_ready);
15019 * The intel_legacy_cursor_update() fast path takes care
15020 * of avoiding the vblank waits for simple cursor
15021 * movement and flips. For cursor on/off and size changes,
15022 * we want to perform the vblank waits so that watermark
15023 * updates happen during the correct frames. Gen9+ have
15024 * double buffered watermarks and so shouldn't need this.
15026 * Unset state->legacy_cursor_update before the call to
15027 * drm_atomic_helper_setup_commit() because otherwise
15028 * drm_atomic_helper_wait_for_flip_done() is a noop and
15029 * we get FIFO underruns because we didn't wait
15032 * FIXME doing watermarks and fb cleanup from a vblank worker
15033 * (assuming we had any) would solve these problems.
15035 if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15036 struct intel_crtc_state *new_crtc_state;
15037 struct intel_crtc *crtc;
15040 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15041 if (new_crtc_state->wm.need_postvbl_update ||
15042 new_crtc_state->update_wm_post)
15043 state->base.legacy_cursor_update = false;
15046 ret = intel_atomic_prepare_commit(state);
15048 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
15049 i915_sw_fence_commit(&state->commit_ready);
15050 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15054 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15056 ret = drm_atomic_helper_swap_state(&state->base, true);
15059 i915_sw_fence_commit(&state->commit_ready);
15061 drm_atomic_helper_cleanup_planes(dev, &state->base);
15062 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15065 dev_priv->wm.distrust_bios_wm = false;
15066 intel_shared_dpll_swap_state(state);
15067 intel_atomic_track_fbs(state);
15069 if (state->global_state_changed) {
15070 assert_global_state_locked(dev_priv);
15072 memcpy(dev_priv->min_cdclk, state->min_cdclk,
15073 sizeof(state->min_cdclk));
15074 memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
15075 sizeof(state->min_voltage_level));
15076 dev_priv->active_pipes = state->active_pipes;
15077 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
15079 intel_cdclk_swap_state(state);
15082 drm_atomic_state_get(&state->base);
15083 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15085 i915_sw_fence_commit(&state->commit_ready);
15086 if (nonblock && state->modeset) {
15087 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15088 } else if (nonblock) {
15089 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15091 if (state->modeset)
15092 flush_workqueue(dev_priv->modeset_wq);
15093 intel_atomic_commit_tail(state);
15099 struct wait_rps_boost {
15100 struct wait_queue_entry wait;
15102 struct drm_crtc *crtc;
15103 struct i915_request *request;
15106 static int do_rps_boost(struct wait_queue_entry *_wait,
15107 unsigned mode, int sync, void *key)
15109 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15110 struct i915_request *rq = wait->request;
15113 * If we missed the vblank, but the request is already running it
15114 * is reasonable to assume that it will complete before the next
15115 * vblank without our intervention, so leave RPS alone.
15117 if (!i915_request_started(rq))
15118 intel_rps_boost(rq);
15119 i915_request_put(rq);
15121 drm_crtc_vblank_put(wait->crtc);
15123 list_del(&wait->wait.entry);
15128 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15129 struct dma_fence *fence)
15131 struct wait_rps_boost *wait;
15133 if (!dma_fence_is_i915(fence))
15136 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15139 if (drm_crtc_vblank_get(crtc))
15142 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15144 drm_crtc_vblank_put(crtc);
15148 wait->request = to_request(dma_fence_get(fence));
15151 wait->wait.func = do_rps_boost;
15152 wait->wait.flags = 0;
15154 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15157 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15159 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15160 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15161 struct drm_framebuffer *fb = plane_state->hw.fb;
15162 struct i915_vma *vma;
15164 if (plane->id == PLANE_CURSOR &&
15165 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15166 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15167 const int align = intel_cursor_alignment(dev_priv);
15170 err = i915_gem_object_attach_phys(obj, align);
15175 vma = intel_pin_and_fence_fb_obj(fb,
15176 &plane_state->view,
15177 intel_plane_uses_fence(plane_state),
15178 &plane_state->flags);
15180 return PTR_ERR(vma);
15182 plane_state->vma = vma;
15187 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15189 struct i915_vma *vma;
15191 vma = fetch_and_zero(&old_plane_state->vma);
15193 intel_unpin_fb_vma(vma, old_plane_state->flags);
15196 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15198 struct i915_sched_attr attr = {
15199 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15202 i915_gem_object_wait_priority(obj, 0, &attr);
15206 * intel_prepare_plane_fb - Prepare fb for usage on plane
15207 * @plane: drm plane to prepare for
15208 * @_new_plane_state: the plane state being prepared
15210 * Prepares a framebuffer for usage on a display plane. Generally this
15211 * involves pinning the underlying object and updating the frontbuffer tracking
15212 * bits. Some older platforms need special physical address handling for
15215 * Returns 0 on success, negative error code on failure.
15218 intel_prepare_plane_fb(struct drm_plane *plane,
15219 struct drm_plane_state *_new_plane_state)
15221 struct intel_plane_state *new_plane_state =
15222 to_intel_plane_state(_new_plane_state);
15223 struct intel_atomic_state *intel_state =
15224 to_intel_atomic_state(new_plane_state->uapi.state);
15225 struct drm_i915_private *dev_priv = to_i915(plane->dev);
15226 struct drm_framebuffer *fb = new_plane_state->hw.fb;
15227 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15228 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
15232 struct intel_crtc_state *crtc_state =
15233 intel_atomic_get_new_crtc_state(intel_state,
15234 to_intel_crtc(plane->state->crtc));
15236 /* Big Hammer, we also need to ensure that any pending
15237 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15238 * current scanout is retired before unpinning the old
15239 * framebuffer. Note that we rely on userspace rendering
15240 * into the buffer attached to the pipe they are waiting
15241 * on. If not, userspace generates a GPU hang with IPEHR
15242 * point to the MI_WAIT_FOR_EVENT.
15244 * This should only fail upon a hung GPU, in which case we
15245 * can safely continue.
15247 if (needs_modeset(crtc_state)) {
15248 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15249 old_obj->base.resv, NULL,
15257 if (new_plane_state->uapi.fence) { /* explicit fencing */
15258 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
15259 new_plane_state->uapi.fence,
15260 I915_FENCE_TIMEOUT,
15269 ret = i915_gem_object_pin_pages(obj);
15273 ret = intel_plane_pin_fb(new_plane_state);
15275 i915_gem_object_unpin_pages(obj);
15279 fb_obj_bump_render_priority(obj);
15280 intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
15282 if (!new_plane_state->uapi.fence) { /* implicit fencing */
15283 struct dma_fence *fence;
15285 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15286 obj->base.resv, NULL,
15287 false, I915_FENCE_TIMEOUT,
15292 fence = dma_resv_get_excl_rcu(obj->base.resv);
15294 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15296 dma_fence_put(fence);
15299 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15300 new_plane_state->uapi.fence);
15304 * We declare pageflips to be interactive and so merit a small bias
15305 * towards upclocking to deliver the frame on time. By only changing
15306 * the RPS thresholds to sample more regularly and aim for higher
15307 * clocks we can hopefully deliver low power workloads (like kodi)
15308 * that are not quite steady state without resorting to forcing
15309 * maximum clocks following a vblank miss (see do_rps_boost()).
15311 if (!intel_state->rps_interactive) {
15312 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15313 intel_state->rps_interactive = true;
15320 * intel_cleanup_plane_fb - Cleans up an fb after plane use
15321 * @plane: drm plane to clean up for
15322 * @_old_plane_state: the state from the previous modeset
15324 * Cleans up a framebuffer that has just been removed from a plane.
15327 intel_cleanup_plane_fb(struct drm_plane *plane,
15328 struct drm_plane_state *_old_plane_state)
15330 struct intel_plane_state *old_plane_state =
15331 to_intel_plane_state(_old_plane_state);
15332 struct intel_atomic_state *intel_state =
15333 to_intel_atomic_state(old_plane_state->uapi.state);
15334 struct drm_i915_private *dev_priv = to_i915(plane->dev);
15336 if (intel_state->rps_interactive) {
15337 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15338 intel_state->rps_interactive = false;
15341 /* Should only be called after a successful intel_prepare_plane_fb()! */
15342 intel_plane_unpin_fb(old_plane_state);
15346 * intel_plane_destroy - destroy a plane
15347 * @plane: plane to destroy
15349 * Common destruction function for all types of planes (primary, cursor,
15352 void intel_plane_destroy(struct drm_plane *plane)
15354 drm_plane_cleanup(plane);
15355 kfree(to_intel_plane(plane));
15358 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15359 u32 format, u64 modifier)
15361 switch (modifier) {
15362 case DRM_FORMAT_MOD_LINEAR:
15363 case I915_FORMAT_MOD_X_TILED:
15370 case DRM_FORMAT_C8:
15371 case DRM_FORMAT_RGB565:
15372 case DRM_FORMAT_XRGB1555:
15373 case DRM_FORMAT_XRGB8888:
15374 return modifier == DRM_FORMAT_MOD_LINEAR ||
15375 modifier == I915_FORMAT_MOD_X_TILED;
15381 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
15382 u32 format, u64 modifier)
15384 switch (modifier) {
15385 case DRM_FORMAT_MOD_LINEAR:
15386 case I915_FORMAT_MOD_X_TILED:
15393 case DRM_FORMAT_C8:
15394 case DRM_FORMAT_RGB565:
15395 case DRM_FORMAT_XRGB8888:
15396 case DRM_FORMAT_XBGR8888:
15397 case DRM_FORMAT_ARGB8888:
15398 case DRM_FORMAT_ABGR8888:
15399 case DRM_FORMAT_XRGB2101010:
15400 case DRM_FORMAT_XBGR2101010:
15401 case DRM_FORMAT_ARGB2101010:
15402 case DRM_FORMAT_ABGR2101010:
15403 case DRM_FORMAT_XBGR16161616F:
15404 return modifier == DRM_FORMAT_MOD_LINEAR ||
15405 modifier == I915_FORMAT_MOD_X_TILED;
15411 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
15412 u32 format, u64 modifier)
15414 return modifier == DRM_FORMAT_MOD_LINEAR &&
15415 format == DRM_FORMAT_ARGB8888;
15418 static const struct drm_plane_funcs i965_plane_funcs = {
15419 .update_plane = drm_atomic_helper_update_plane,
15420 .disable_plane = drm_atomic_helper_disable_plane,
15421 .destroy = intel_plane_destroy,
15422 .atomic_duplicate_state = intel_plane_duplicate_state,
15423 .atomic_destroy_state = intel_plane_destroy_state,
15424 .format_mod_supported = i965_plane_format_mod_supported,
15427 static const struct drm_plane_funcs i8xx_plane_funcs = {
15428 .update_plane = drm_atomic_helper_update_plane,
15429 .disable_plane = drm_atomic_helper_disable_plane,
15430 .destroy = intel_plane_destroy,
15431 .atomic_duplicate_state = intel_plane_duplicate_state,
15432 .atomic_destroy_state = intel_plane_destroy_state,
15433 .format_mod_supported = i8xx_plane_format_mod_supported,
15437 intel_legacy_cursor_update(struct drm_plane *_plane,
15438 struct drm_crtc *_crtc,
15439 struct drm_framebuffer *fb,
15440 int crtc_x, int crtc_y,
15441 unsigned int crtc_w, unsigned int crtc_h,
15442 u32 src_x, u32 src_y,
15443 u32 src_w, u32 src_h,
15444 struct drm_modeset_acquire_ctx *ctx)
15446 struct intel_plane *plane = to_intel_plane(_plane);
15447 struct intel_crtc *crtc = to_intel_crtc(_crtc);
15448 struct intel_plane_state *old_plane_state =
15449 to_intel_plane_state(plane->base.state);
15450 struct intel_plane_state *new_plane_state;
15451 struct intel_crtc_state *crtc_state =
15452 to_intel_crtc_state(crtc->base.state);
15453 struct intel_crtc_state *new_crtc_state;
15457 * When crtc is inactive or there is a modeset pending,
15458 * wait for it to complete in the slowpath
15460 if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
15461 crtc_state->update_pipe)
15465 * Don't do an async update if there is an outstanding commit modifying
15466 * the plane. This prevents our async update's changes from getting
15467 * overridden by a previous synchronous update's state.
15469 if (old_plane_state->uapi.commit &&
15470 !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
15474 * If any parameters change that may affect watermarks,
15475 * take the slowpath. Only changing fb or position should be
15478 if (old_plane_state->uapi.crtc != &crtc->base ||
15479 old_plane_state->uapi.src_w != src_w ||
15480 old_plane_state->uapi.src_h != src_h ||
15481 old_plane_state->uapi.crtc_w != crtc_w ||
15482 old_plane_state->uapi.crtc_h != crtc_h ||
15483 !old_plane_state->uapi.fb != !fb)
15486 new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
15487 if (!new_plane_state)
15490 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
15491 if (!new_crtc_state) {
15496 drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
15498 new_plane_state->uapi.src_x = src_x;
15499 new_plane_state->uapi.src_y = src_y;
15500 new_plane_state->uapi.src_w = src_w;
15501 new_plane_state->uapi.src_h = src_h;
15502 new_plane_state->uapi.crtc_x = crtc_x;
15503 new_plane_state->uapi.crtc_y = crtc_y;
15504 new_plane_state->uapi.crtc_w = crtc_w;
15505 new_plane_state->uapi.crtc_h = crtc_h;
15507 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
15508 old_plane_state, new_plane_state);
15512 ret = intel_plane_pin_fb(new_plane_state);
15516 intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
15518 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15519 to_intel_frontbuffer(new_plane_state->hw.fb),
15520 plane->frontbuffer_bit);
15522 /* Swap plane state */
15523 plane->base.state = &new_plane_state->uapi;
15526 * We cannot swap crtc_state as it may be in use by an atomic commit or
15527 * page flip that's running simultaneously. If we swap crtc_state and
15528 * destroy the old state, we will cause a use-after-free there.
15530 * Only update active_planes, which is needed for our internal
15531 * bookkeeping. Either value will do the right thing when updating
15532 * planes atomically. If the cursor was part of the atomic update then
15533 * we would have taken the slowpath.
15535 crtc_state->active_planes = new_crtc_state->active_planes;
15537 if (new_plane_state->uapi.visible)
15538 intel_update_plane(plane, crtc_state, new_plane_state);
15540 intel_disable_plane(plane, crtc_state);
15542 intel_plane_unpin_fb(old_plane_state);
15545 if (new_crtc_state)
15546 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
15548 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
15550 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
15554 return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
15555 crtc_x, crtc_y, crtc_w, crtc_h,
15556 src_x, src_y, src_w, src_h, ctx);
15559 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
15560 .update_plane = intel_legacy_cursor_update,
15561 .disable_plane = drm_atomic_helper_disable_plane,
15562 .destroy = intel_plane_destroy,
15563 .atomic_duplicate_state = intel_plane_duplicate_state,
15564 .atomic_destroy_state = intel_plane_destroy_state,
15565 .format_mod_supported = intel_cursor_format_mod_supported,
15568 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
15569 enum i9xx_plane_id i9xx_plane)
15571 if (!HAS_FBC(dev_priv))
15574 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15575 return i9xx_plane == PLANE_A; /* tied to pipe A */
15576 else if (IS_IVYBRIDGE(dev_priv))
15577 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
15578 i9xx_plane == PLANE_C;
15579 else if (INTEL_GEN(dev_priv) >= 4)
15580 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
15582 return i9xx_plane == PLANE_A;
15585 static struct intel_plane *
15586 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
15588 struct intel_plane *plane;
15589 const struct drm_plane_funcs *plane_funcs;
15590 unsigned int supported_rotations;
15591 unsigned int possible_crtcs;
15592 const u32 *formats;
15596 if (INTEL_GEN(dev_priv) >= 9)
15597 return skl_universal_plane_create(dev_priv, pipe,
15600 plane = intel_plane_alloc();
15604 plane->pipe = pipe;
15606 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
15607 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
15609 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
15610 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
15612 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
15613 plane->id = PLANE_PRIMARY;
15614 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
15616 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
15617 if (plane->has_fbc) {
15618 struct intel_fbc *fbc = &dev_priv->fbc;
15620 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
15623 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15624 formats = vlv_primary_formats;
15625 num_formats = ARRAY_SIZE(vlv_primary_formats);
15626 } else if (INTEL_GEN(dev_priv) >= 4) {
15628 * WaFP16GammaEnabling:ivb
15629 * "Workaround : When using the 64-bit format, the plane
15630 * output on each color channel has one quarter amplitude.
15631 * It can be brought up to full amplitude by using pipe
15632 * gamma correction or pipe color space conversion to
15633 * multiply the plane output by four."
15635 * There is no dedicated plane gamma for the primary plane,
15636 * and using the pipe gamma/csc could conflict with other
15637 * planes, so we choose not to expose fp16 on IVB primary
15638 * planes. HSW primary planes no longer have this problem.
15640 if (IS_IVYBRIDGE(dev_priv)) {
15641 formats = ivb_primary_formats;
15642 num_formats = ARRAY_SIZE(ivb_primary_formats);
15644 formats = i965_primary_formats;
15645 num_formats = ARRAY_SIZE(i965_primary_formats);
15648 formats = i8xx_primary_formats;
15649 num_formats = ARRAY_SIZE(i8xx_primary_formats);
15652 if (INTEL_GEN(dev_priv) >= 4)
15653 plane_funcs = &i965_plane_funcs;
15655 plane_funcs = &i8xx_plane_funcs;
15657 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15658 plane->min_cdclk = vlv_plane_min_cdclk;
15659 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15660 plane->min_cdclk = hsw_plane_min_cdclk;
15661 else if (IS_IVYBRIDGE(dev_priv))
15662 plane->min_cdclk = ivb_plane_min_cdclk;
15664 plane->min_cdclk = i9xx_plane_min_cdclk;
15666 plane->max_stride = i9xx_plane_max_stride;
15667 plane->update_plane = i9xx_update_plane;
15668 plane->disable_plane = i9xx_disable_plane;
15669 plane->get_hw_state = i9xx_plane_get_hw_state;
15670 plane->check_plane = i9xx_plane_check;
15672 possible_crtcs = BIT(pipe);
15674 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
15675 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15676 possible_crtcs, plane_funcs,
15677 formats, num_formats,
15678 i9xx_format_modifiers,
15679 DRM_PLANE_TYPE_PRIMARY,
15680 "primary %c", pipe_name(pipe));
15682 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15683 possible_crtcs, plane_funcs,
15684 formats, num_formats,
15685 i9xx_format_modifiers,
15686 DRM_PLANE_TYPE_PRIMARY,
15688 plane_name(plane->i9xx_plane));
15692 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
15693 supported_rotations =
15694 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
15695 DRM_MODE_REFLECT_X;
15696 } else if (INTEL_GEN(dev_priv) >= 4) {
15697 supported_rotations =
15698 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
15700 supported_rotations = DRM_MODE_ROTATE_0;
15703 if (INTEL_GEN(dev_priv) >= 4)
15704 drm_plane_create_rotation_property(&plane->base,
15706 supported_rotations);
15709 drm_plane_create_zpos_immutable_property(&plane->base, zpos);
15711 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
15716 intel_plane_free(plane);
15718 return ERR_PTR(ret);
15721 static struct intel_plane *
15722 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
15725 unsigned int possible_crtcs;
15726 struct intel_plane *cursor;
15729 cursor = intel_plane_alloc();
15730 if (IS_ERR(cursor))
15733 cursor->pipe = pipe;
15734 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
15735 cursor->id = PLANE_CURSOR;
15736 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
15738 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15739 cursor->max_stride = i845_cursor_max_stride;
15740 cursor->update_plane = i845_update_cursor;
15741 cursor->disable_plane = i845_disable_cursor;
15742 cursor->get_hw_state = i845_cursor_get_hw_state;
15743 cursor->check_plane = i845_check_cursor;
15745 cursor->max_stride = i9xx_cursor_max_stride;
15746 cursor->update_plane = i9xx_update_cursor;
15747 cursor->disable_plane = i9xx_disable_cursor;
15748 cursor->get_hw_state = i9xx_cursor_get_hw_state;
15749 cursor->check_plane = i9xx_check_cursor;
15752 cursor->cursor.base = ~0;
15753 cursor->cursor.cntl = ~0;
15755 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
15756 cursor->cursor.size = ~0;
15758 possible_crtcs = BIT(pipe);
15760 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
15761 possible_crtcs, &intel_cursor_plane_funcs,
15762 intel_cursor_formats,
15763 ARRAY_SIZE(intel_cursor_formats),
15764 cursor_format_modifiers,
15765 DRM_PLANE_TYPE_CURSOR,
15766 "cursor %c", pipe_name(pipe));
15770 if (INTEL_GEN(dev_priv) >= 4)
15771 drm_plane_create_rotation_property(&cursor->base,
15773 DRM_MODE_ROTATE_0 |
15774 DRM_MODE_ROTATE_180);
15776 zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
15777 drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
15779 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15784 intel_plane_free(cursor);
15786 return ERR_PTR(ret);
15789 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
15790 struct intel_crtc_state *crtc_state)
15792 struct intel_crtc_scaler_state *scaler_state =
15793 &crtc_state->scaler_state;
15794 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15797 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
15798 if (!crtc->num_scalers)
15801 for (i = 0; i < crtc->num_scalers; i++) {
15802 struct intel_scaler *scaler = &scaler_state->scalers[i];
15804 scaler->in_use = 0;
15808 scaler_state->scaler_id = -1;
15811 #define INTEL_CRTC_FUNCS \
15812 .gamma_set = drm_atomic_helper_legacy_gamma_set, \
15813 .set_config = drm_atomic_helper_set_config, \
15814 .destroy = intel_crtc_destroy, \
15815 .page_flip = drm_atomic_helper_page_flip, \
15816 .atomic_duplicate_state = intel_crtc_duplicate_state, \
15817 .atomic_destroy_state = intel_crtc_destroy_state, \
15818 .set_crc_source = intel_crtc_set_crc_source, \
15819 .verify_crc_source = intel_crtc_verify_crc_source, \
15820 .get_crc_sources = intel_crtc_get_crc_sources
15822 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15825 .get_vblank_counter = g4x_get_vblank_counter,
15826 .enable_vblank = bdw_enable_vblank,
15827 .disable_vblank = bdw_disable_vblank,
15830 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15833 .get_vblank_counter = g4x_get_vblank_counter,
15834 .enable_vblank = ilk_enable_vblank,
15835 .disable_vblank = ilk_disable_vblank,
15838 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15841 .get_vblank_counter = g4x_get_vblank_counter,
15842 .enable_vblank = i965_enable_vblank,
15843 .disable_vblank = i965_disable_vblank,
15846 static const struct drm_crtc_funcs i965_crtc_funcs = {
15849 .get_vblank_counter = i915_get_vblank_counter,
15850 .enable_vblank = i965_enable_vblank,
15851 .disable_vblank = i965_disable_vblank,
15854 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15857 .get_vblank_counter = i915_get_vblank_counter,
15858 .enable_vblank = i915gm_enable_vblank,
15859 .disable_vblank = i915gm_disable_vblank,
15862 static const struct drm_crtc_funcs i915_crtc_funcs = {
15865 .get_vblank_counter = i915_get_vblank_counter,
15866 .enable_vblank = i8xx_enable_vblank,
15867 .disable_vblank = i8xx_disable_vblank,
15870 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15873 /* no hw vblank counter */
15874 .enable_vblank = i8xx_enable_vblank,
15875 .disable_vblank = i8xx_disable_vblank,
15878 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15880 const struct drm_crtc_funcs *funcs;
15881 struct intel_crtc *intel_crtc;
15882 struct intel_crtc_state *crtc_state = NULL;
15883 struct intel_plane *primary = NULL;
15884 struct intel_plane *cursor = NULL;
15887 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
15891 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
15896 __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->uapi);
15897 intel_crtc->config = crtc_state;
15899 primary = intel_primary_plane_create(dev_priv, pipe);
15900 if (IS_ERR(primary)) {
15901 ret = PTR_ERR(primary);
15904 intel_crtc->plane_ids_mask |= BIT(primary->id);
15906 for_each_sprite(dev_priv, pipe, sprite) {
15907 struct intel_plane *plane;
15909 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15910 if (IS_ERR(plane)) {
15911 ret = PTR_ERR(plane);
15914 intel_crtc->plane_ids_mask |= BIT(plane->id);
15917 cursor = intel_cursor_plane_create(dev_priv, pipe);
15918 if (IS_ERR(cursor)) {
15919 ret = PTR_ERR(cursor);
15922 intel_crtc->plane_ids_mask |= BIT(cursor->id);
15924 if (HAS_GMCH(dev_priv)) {
15925 if (IS_CHERRYVIEW(dev_priv) ||
15926 IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15927 funcs = &g4x_crtc_funcs;
15928 else if (IS_GEN(dev_priv, 4))
15929 funcs = &i965_crtc_funcs;
15930 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
15931 funcs = &i915gm_crtc_funcs;
15932 else if (IS_GEN(dev_priv, 3))
15933 funcs = &i915_crtc_funcs;
15935 funcs = &i8xx_crtc_funcs;
15937 if (INTEL_GEN(dev_priv) >= 8)
15938 funcs = &bdw_crtc_funcs;
15940 funcs = &ilk_crtc_funcs;
15943 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
15944 &primary->base, &cursor->base,
15945 funcs, "pipe %c", pipe_name(pipe));
15949 intel_crtc->pipe = pipe;
15951 /* initialize shared scalers */
15952 intel_crtc_init_scalers(intel_crtc, crtc_state);
15954 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15955 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15956 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
15958 if (INTEL_GEN(dev_priv) < 9) {
15959 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15961 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15962 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15963 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
15966 intel_color_init(intel_crtc);
15968 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15974 * drm_mode_config_cleanup() will free up any
15975 * crtcs/planes already initialized.
15983 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15984 struct drm_file *file)
15986 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15987 struct drm_crtc *drmmode_crtc;
15988 struct intel_crtc *crtc;
15990 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15994 crtc = to_intel_crtc(drmmode_crtc);
15995 pipe_from_crtc_id->pipe = crtc->pipe;
16000 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16002 struct drm_device *dev = encoder->base.dev;
16003 struct intel_encoder *source_encoder;
16004 u32 possible_clones = 0;
16006 for_each_intel_encoder(dev, source_encoder) {
16007 if (encoders_cloneable(encoder, source_encoder))
16008 possible_clones |= drm_encoder_mask(&source_encoder->base);
16011 return possible_clones;
16014 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16016 struct drm_device *dev = encoder->base.dev;
16017 struct intel_crtc *crtc;
16018 u32 possible_crtcs = 0;
16020 for_each_intel_crtc(dev, crtc) {
16021 if (encoder->pipe_mask & BIT(crtc->pipe))
16022 possible_crtcs |= drm_crtc_mask(&crtc->base);
16025 return possible_crtcs;
16028 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16030 if (!IS_MOBILE(dev_priv))
16033 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
16036 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
16042 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16044 if (INTEL_GEN(dev_priv) >= 9)
16047 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16050 if (HAS_PCH_LPT_H(dev_priv) &&
16051 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16054 /* DDI E can't be used if DDI A requires 4 lanes */
16055 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16058 if (!dev_priv->vbt.int_crt_support)
16064 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16069 if (HAS_DDI(dev_priv))
16072 * This w/a is needed at least on CPT/PPT, but to be sure apply it
16073 * everywhere where registers can be write protected.
16075 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16080 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16081 u32 val = I915_READ(PP_CONTROL(pps_idx));
16083 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16084 I915_WRITE(PP_CONTROL(pps_idx), val);
16088 static void intel_pps_init(struct drm_i915_private *dev_priv)
16090 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16091 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16092 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16093 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16095 dev_priv->pps_mmio_base = PPS_BASE;
16097 intel_pps_unlock_regs_wa(dev_priv);
16100 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16102 struct intel_encoder *encoder;
16103 bool dpd_is_edp = false;
16105 intel_pps_init(dev_priv);
16107 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16110 if (INTEL_GEN(dev_priv) >= 12) {
16111 intel_ddi_init(dev_priv, PORT_A);
16112 intel_ddi_init(dev_priv, PORT_B);
16113 intel_ddi_init(dev_priv, PORT_D);
16114 intel_ddi_init(dev_priv, PORT_E);
16115 intel_ddi_init(dev_priv, PORT_F);
16116 intel_ddi_init(dev_priv, PORT_G);
16117 intel_ddi_init(dev_priv, PORT_H);
16118 intel_ddi_init(dev_priv, PORT_I);
16119 icl_dsi_init(dev_priv);
16120 } else if (IS_ELKHARTLAKE(dev_priv)) {
16121 intel_ddi_init(dev_priv, PORT_A);
16122 intel_ddi_init(dev_priv, PORT_B);
16123 intel_ddi_init(dev_priv, PORT_C);
16124 intel_ddi_init(dev_priv, PORT_D);
16125 icl_dsi_init(dev_priv);
16126 } else if (IS_GEN(dev_priv, 11)) {
16127 intel_ddi_init(dev_priv, PORT_A);
16128 intel_ddi_init(dev_priv, PORT_B);
16129 intel_ddi_init(dev_priv, PORT_C);
16130 intel_ddi_init(dev_priv, PORT_D);
16131 intel_ddi_init(dev_priv, PORT_E);
16133 * On some ICL SKUs port F is not present. No strap bits for
16134 * this, so rely on VBT.
16135 * Work around broken VBTs on SKUs known to have no port F.
16137 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16138 intel_bios_is_port_present(dev_priv, PORT_F))
16139 intel_ddi_init(dev_priv, PORT_F);
16141 icl_dsi_init(dev_priv);
16142 } else if (IS_GEN9_LP(dev_priv)) {
16144 * FIXME: Broxton doesn't support port detection via the
16145 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16146 * detect the ports.
16148 intel_ddi_init(dev_priv, PORT_A);
16149 intel_ddi_init(dev_priv, PORT_B);
16150 intel_ddi_init(dev_priv, PORT_C);
16152 vlv_dsi_init(dev_priv);
16153 } else if (HAS_DDI(dev_priv)) {
16156 if (intel_ddi_crt_present(dev_priv))
16157 intel_crt_init(dev_priv);
16160 * Haswell uses DDI functions to detect digital outputs.
16161 * On SKL pre-D0 the strap isn't connected, so we assume
16164 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16165 /* WaIgnoreDDIAStrap: skl */
16166 if (found || IS_GEN9_BC(dev_priv))
16167 intel_ddi_init(dev_priv, PORT_A);
16169 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16171 found = I915_READ(SFUSE_STRAP);
16173 if (found & SFUSE_STRAP_DDIB_DETECTED)
16174 intel_ddi_init(dev_priv, PORT_B);
16175 if (found & SFUSE_STRAP_DDIC_DETECTED)
16176 intel_ddi_init(dev_priv, PORT_C);
16177 if (found & SFUSE_STRAP_DDID_DETECTED)
16178 intel_ddi_init(dev_priv, PORT_D);
16179 if (found & SFUSE_STRAP_DDIF_DETECTED)
16180 intel_ddi_init(dev_priv, PORT_F);
16182 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16184 if (IS_GEN9_BC(dev_priv) &&
16185 intel_bios_is_port_present(dev_priv, PORT_E))
16186 intel_ddi_init(dev_priv, PORT_E);
16188 } else if (HAS_PCH_SPLIT(dev_priv)) {
16192 * intel_edp_init_connector() depends on this completing first,
16193 * to prevent the registration of both eDP and LVDS and the
16194 * incorrect sharing of the PPS.
16196 intel_lvds_init(dev_priv);
16197 intel_crt_init(dev_priv);
16199 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16201 if (ilk_has_edp_a(dev_priv))
16202 intel_dp_init(dev_priv, DP_A, PORT_A);
16204 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
16205 /* PCH SDVOB multiplex with HDMIB */
16206 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16208 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16209 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
16210 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16213 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
16214 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16216 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
16217 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16219 if (I915_READ(PCH_DP_C) & DP_DETECTED)
16220 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16222 if (I915_READ(PCH_DP_D) & DP_DETECTED)
16223 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16224 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16225 bool has_edp, has_port;
16227 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16228 intel_crt_init(dev_priv);
16231 * The DP_DETECTED bit is the latched state of the DDC
16232 * SDA pin at boot. However since eDP doesn't require DDC
16233 * (no way to plug in a DP->HDMI dongle) the DDC pins for
16234 * eDP ports may have been muxed to an alternate function.
16235 * Thus we can't rely on the DP_DETECTED bit alone to detect
16236 * eDP ports. Consult the VBT as well as DP_DETECTED to
16237 * detect eDP ports.
16239 * Sadly the straps seem to be missing sometimes even for HDMI
16240 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16241 * and VBT for the presence of the port. Additionally we can't
16242 * trust the port type the VBT declares as we've seen at least
16243 * HDMI ports that the VBT claim are DP or eDP.
16245 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16246 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16247 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
16248 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16249 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16250 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16252 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16253 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16254 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
16255 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16256 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16257 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16259 if (IS_CHERRYVIEW(dev_priv)) {
16261 * eDP not supported on port D,
16262 * so no need to worry about it
16264 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16265 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
16266 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16267 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
16268 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16271 vlv_dsi_init(dev_priv);
16272 } else if (IS_PINEVIEW(dev_priv)) {
16273 intel_lvds_init(dev_priv);
16274 intel_crt_init(dev_priv);
16275 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16276 bool found = false;
16278 if (IS_MOBILE(dev_priv))
16279 intel_lvds_init(dev_priv);
16281 intel_crt_init(dev_priv);
16283 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16284 DRM_DEBUG_KMS("probing SDVOB\n");
16285 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16286 if (!found && IS_G4X(dev_priv)) {
16287 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
16288 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16291 if (!found && IS_G4X(dev_priv))
16292 intel_dp_init(dev_priv, DP_B, PORT_B);
16295 /* Before G4X SDVOC doesn't have its own detect register */
16297 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16298 DRM_DEBUG_KMS("probing SDVOC\n");
16299 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16302 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
16304 if (IS_G4X(dev_priv)) {
16305 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
16306 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16308 if (IS_G4X(dev_priv))
16309 intel_dp_init(dev_priv, DP_C, PORT_C);
16312 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
16313 intel_dp_init(dev_priv, DP_D, PORT_D);
16315 if (SUPPORTS_TV(dev_priv))
16316 intel_tv_init(dev_priv);
16317 } else if (IS_GEN(dev_priv, 2)) {
16318 if (IS_I85X(dev_priv))
16319 intel_lvds_init(dev_priv);
16321 intel_crt_init(dev_priv);
16322 intel_dvo_init(dev_priv);
16325 intel_psr_init(dev_priv);
16327 for_each_intel_encoder(&dev_priv->drm, encoder) {
16328 encoder->base.possible_crtcs =
16329 intel_encoder_possible_crtcs(encoder);
16330 encoder->base.possible_clones =
16331 intel_encoder_possible_clones(encoder);
16334 intel_init_pch_refclk(dev_priv);
16336 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16339 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16341 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16343 drm_framebuffer_cleanup(fb);
16344 intel_frontbuffer_put(intel_fb->frontbuffer);
16349 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16350 struct drm_file *file,
16351 unsigned int *handle)
16353 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16355 if (obj->userptr.mm) {
16356 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
16360 return drm_gem_handle_create(file, &obj->base, handle);
16363 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16364 struct drm_file *file,
16365 unsigned flags, unsigned color,
16366 struct drm_clip_rect *clips,
16367 unsigned num_clips)
16369 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16371 i915_gem_object_flush_if_display(obj);
16372 intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16377 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16378 .destroy = intel_user_framebuffer_destroy,
16379 .create_handle = intel_user_framebuffer_create_handle,
16380 .dirty = intel_user_framebuffer_dirty,
16383 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16384 struct drm_i915_gem_object *obj,
16385 struct drm_mode_fb_cmd2 *mode_cmd)
16387 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16388 struct drm_framebuffer *fb = &intel_fb->base;
16390 unsigned int tiling, stride;
16394 intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16395 if (!intel_fb->frontbuffer)
16398 i915_gem_object_lock(obj);
16399 tiling = i915_gem_object_get_tiling(obj);
16400 stride = i915_gem_object_get_stride(obj);
16401 i915_gem_object_unlock(obj);
16403 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16405 * If there's a fence, enforce that
16406 * the fb modifier and tiling mode match.
16408 if (tiling != I915_TILING_NONE &&
16409 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16410 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
16414 if (tiling == I915_TILING_X) {
16415 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16416 } else if (tiling == I915_TILING_Y) {
16417 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
16422 if (!drm_any_plane_has_format(&dev_priv->drm,
16423 mode_cmd->pixel_format,
16424 mode_cmd->modifier[0])) {
16425 struct drm_format_name_buf format_name;
16427 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
16428 drm_get_format_name(mode_cmd->pixel_format,
16430 mode_cmd->modifier[0]);
16435 * gen2/3 display engine uses the fence if present,
16436 * so the tiling mode must match the fb modifier exactly.
16438 if (INTEL_GEN(dev_priv) < 4 &&
16439 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16440 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
16444 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16445 mode_cmd->modifier[0]);
16446 if (mode_cmd->pitches[0] > max_stride) {
16447 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
16448 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16449 "tiled" : "linear",
16450 mode_cmd->pitches[0], max_stride);
16455 * If there's a fence, enforce that
16456 * the fb pitch and fence stride match.
16458 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16459 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
16460 mode_cmd->pitches[0], stride);
16464 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16465 if (mode_cmd->offsets[0] != 0)
16468 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16470 for (i = 0; i < fb->format->num_planes; i++) {
16471 u32 stride_alignment;
16473 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16474 DRM_DEBUG_KMS("bad plane %d handle\n", i);
16478 stride_alignment = intel_fb_stride_alignment(fb, i);
16481 * Display WA #0531: skl,bxt,kbl,glk
16483 * Render decompression and plane width > 3840
16484 * combined with horizontal panning requires the
16485 * plane stride to be a multiple of 4. We'll just
16486 * require the entire fb to accommodate that to avoid
16487 * potential runtime errors at plane configuration time.
16489 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
16490 is_ccs_modifier(fb->modifier))
16491 stride_alignment *= 4;
16493 if (fb->pitches[i] & (stride_alignment - 1)) {
16494 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
16495 i, fb->pitches[i], stride_alignment);
16499 fb->obj[i] = &obj->base;
16502 ret = intel_fill_fb_info(dev_priv, fb);
16506 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16508 DRM_ERROR("framebuffer init failed %d\n", ret);
16515 intel_frontbuffer_put(intel_fb->frontbuffer);
16519 static struct drm_framebuffer *
16520 intel_user_framebuffer_create(struct drm_device *dev,
16521 struct drm_file *filp,
16522 const struct drm_mode_fb_cmd2 *user_mode_cmd)
16524 struct drm_framebuffer *fb;
16525 struct drm_i915_gem_object *obj;
16526 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16528 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16530 return ERR_PTR(-ENOENT);
16532 fb = intel_framebuffer_create(obj, &mode_cmd);
16533 i915_gem_object_put(obj);
16538 static void intel_atomic_state_free(struct drm_atomic_state *state)
16540 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
16542 drm_atomic_state_default_release(state);
16544 i915_sw_fence_fini(&intel_state->commit_ready);
16549 static enum drm_mode_status
16550 intel_mode_valid(struct drm_device *dev,
16551 const struct drm_display_mode *mode)
16553 struct drm_i915_private *dev_priv = to_i915(dev);
16554 int hdisplay_max, htotal_max;
16555 int vdisplay_max, vtotal_max;
16558 * Can't reject DBLSCAN here because Xorg ddxen can add piles
16559 * of DBLSCAN modes to the output's mode list when they detect
16560 * the scaling mode property on the connector. And they don't
16561 * ask the kernel to validate those modes in any way until
16562 * modeset time at which point the client gets a protocol error.
16563 * So in order to not upset those clients we silently ignore the
16564 * DBLSCAN flag on such connectors. For other connectors we will
16565 * reject modes with the DBLSCAN flag in encoder->compute_config().
16566 * And we always reject DBLSCAN modes in connector->mode_valid()
16567 * as we never want such modes on the connector's mode list.
16570 if (mode->vscan > 1)
16571 return MODE_NO_VSCAN;
16573 if (mode->flags & DRM_MODE_FLAG_HSKEW)
16574 return MODE_H_ILLEGAL;
16576 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16577 DRM_MODE_FLAG_NCSYNC |
16578 DRM_MODE_FLAG_PCSYNC))
16581 if (mode->flags & (DRM_MODE_FLAG_BCAST |
16582 DRM_MODE_FLAG_PIXMUX |
16583 DRM_MODE_FLAG_CLKDIV2))
16586 /* Transcoder timing limits */
16587 if (INTEL_GEN(dev_priv) >= 11) {
16588 hdisplay_max = 16384;
16589 vdisplay_max = 8192;
16590 htotal_max = 16384;
16592 } else if (INTEL_GEN(dev_priv) >= 9 ||
16593 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16594 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16595 vdisplay_max = 4096;
16598 } else if (INTEL_GEN(dev_priv) >= 3) {
16599 hdisplay_max = 4096;
16600 vdisplay_max = 4096;
16604 hdisplay_max = 2048;
16605 vdisplay_max = 2048;
16610 if (mode->hdisplay > hdisplay_max ||
16611 mode->hsync_start > htotal_max ||
16612 mode->hsync_end > htotal_max ||
16613 mode->htotal > htotal_max)
16614 return MODE_H_ILLEGAL;
16616 if (mode->vdisplay > vdisplay_max ||
16617 mode->vsync_start > vtotal_max ||
16618 mode->vsync_end > vtotal_max ||
16619 mode->vtotal > vtotal_max)
16620 return MODE_V_ILLEGAL;
16622 if (INTEL_GEN(dev_priv) >= 5) {
16623 if (mode->hdisplay < 64 ||
16624 mode->htotal - mode->hdisplay < 32)
16625 return MODE_H_ILLEGAL;
16627 if (mode->vtotal - mode->vdisplay < 5)
16628 return MODE_V_ILLEGAL;
16630 if (mode->htotal - mode->hdisplay < 32)
16631 return MODE_H_ILLEGAL;
16633 if (mode->vtotal - mode->vdisplay < 3)
16634 return MODE_V_ILLEGAL;
16640 enum drm_mode_status
16641 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16642 const struct drm_display_mode *mode)
16644 int plane_width_max, plane_height_max;
16647 * intel_mode_valid() should be
16648 * sufficient on older platforms.
16650 if (INTEL_GEN(dev_priv) < 9)
16654 * Most people will probably want a fullscreen
16655 * plane so let's not advertize modes that are
16656 * too big for that.
16658 if (INTEL_GEN(dev_priv) >= 11) {
16659 plane_width_max = 5120;
16660 plane_height_max = 4320;
16662 plane_width_max = 5120;
16663 plane_height_max = 4096;
16666 if (mode->hdisplay > plane_width_max)
16667 return MODE_H_ILLEGAL;
16669 if (mode->vdisplay > plane_height_max)
16670 return MODE_V_ILLEGAL;
16675 static const struct drm_mode_config_funcs intel_mode_funcs = {
16676 .fb_create = intel_user_framebuffer_create,
16677 .get_format_info = intel_get_format_info,
16678 .output_poll_changed = intel_fbdev_output_poll_changed,
16679 .mode_valid = intel_mode_valid,
16680 .atomic_check = intel_atomic_check,
16681 .atomic_commit = intel_atomic_commit,
16682 .atomic_state_alloc = intel_atomic_state_alloc,
16683 .atomic_state_clear = intel_atomic_state_clear,
16684 .atomic_state_free = intel_atomic_state_free,
16688 * intel_init_display_hooks - initialize the display modesetting hooks
16689 * @dev_priv: device private
16691 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16693 intel_init_cdclk_hooks(dev_priv);
16695 if (INTEL_GEN(dev_priv) >= 9) {
16696 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16697 dev_priv->display.get_initial_plane_config =
16698 skylake_get_initial_plane_config;
16699 dev_priv->display.crtc_compute_clock =
16700 haswell_crtc_compute_clock;
16701 dev_priv->display.crtc_enable = haswell_crtc_enable;
16702 dev_priv->display.crtc_disable = haswell_crtc_disable;
16703 } else if (HAS_DDI(dev_priv)) {
16704 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16705 dev_priv->display.get_initial_plane_config =
16706 i9xx_get_initial_plane_config;
16707 dev_priv->display.crtc_compute_clock =
16708 haswell_crtc_compute_clock;
16709 dev_priv->display.crtc_enable = haswell_crtc_enable;
16710 dev_priv->display.crtc_disable = haswell_crtc_disable;
16711 } else if (HAS_PCH_SPLIT(dev_priv)) {
16712 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
16713 dev_priv->display.get_initial_plane_config =
16714 i9xx_get_initial_plane_config;
16715 dev_priv->display.crtc_compute_clock =
16716 ironlake_crtc_compute_clock;
16717 dev_priv->display.crtc_enable = ironlake_crtc_enable;
16718 dev_priv->display.crtc_disable = ironlake_crtc_disable;
16719 } else if (IS_CHERRYVIEW(dev_priv)) {
16720 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16721 dev_priv->display.get_initial_plane_config =
16722 i9xx_get_initial_plane_config;
16723 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16724 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16725 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16726 } else if (IS_VALLEYVIEW(dev_priv)) {
16727 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16728 dev_priv->display.get_initial_plane_config =
16729 i9xx_get_initial_plane_config;
16730 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16731 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16732 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16733 } else if (IS_G4X(dev_priv)) {
16734 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16735 dev_priv->display.get_initial_plane_config =
16736 i9xx_get_initial_plane_config;
16737 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16738 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16739 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16740 } else if (IS_PINEVIEW(dev_priv)) {
16741 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16742 dev_priv->display.get_initial_plane_config =
16743 i9xx_get_initial_plane_config;
16744 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16745 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16746 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16747 } else if (!IS_GEN(dev_priv, 2)) {
16748 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16749 dev_priv->display.get_initial_plane_config =
16750 i9xx_get_initial_plane_config;
16751 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16752 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16753 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16755 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16756 dev_priv->display.get_initial_plane_config =
16757 i9xx_get_initial_plane_config;
16758 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16759 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16760 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16763 if (IS_GEN(dev_priv, 5)) {
16764 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
16765 } else if (IS_GEN(dev_priv, 6)) {
16766 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16767 } else if (IS_IVYBRIDGE(dev_priv)) {
16768 /* FIXME: detect B0+ stepping and use auto training */
16769 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16770 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
16771 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
16774 if (INTEL_GEN(dev_priv) >= 9)
16775 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16777 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16781 void intel_modeset_init_hw(struct drm_i915_private *i915)
16783 intel_update_cdclk(i915);
16784 intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
16785 i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
16789 * Calculate what we think the watermarks should be for the state we've read
16790 * out of the hardware and then immediately program those watermarks so that
16791 * we ensure the hardware settings match our internal state.
16793 * We can calculate what we think WM's should be by creating a duplicate of the
16794 * current state (which was constructed during hardware readout) and running it
16795 * through the atomic check code to calculate new watermark values in the
16798 static void sanitize_watermarks(struct drm_device *dev)
16800 struct drm_i915_private *dev_priv = to_i915(dev);
16801 struct drm_atomic_state *state;
16802 struct intel_atomic_state *intel_state;
16803 struct intel_crtc *crtc;
16804 struct intel_crtc_state *crtc_state;
16805 struct drm_modeset_acquire_ctx ctx;
16809 /* Only supported on platforms that use atomic watermark design */
16810 if (!dev_priv->display.optimize_watermarks)
16814 * We need to hold connection_mutex before calling duplicate_state so
16815 * that the connector loop is protected.
16817 drm_modeset_acquire_init(&ctx, 0);
16819 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16820 if (ret == -EDEADLK) {
16821 drm_modeset_backoff(&ctx);
16823 } else if (WARN_ON(ret)) {
16827 state = drm_atomic_helper_duplicate_state(dev, &ctx);
16828 if (WARN_ON(IS_ERR(state)))
16831 intel_state = to_intel_atomic_state(state);
16834 * Hardware readout is the only time we don't want to calculate
16835 * intermediate watermarks (since we don't trust the current
16838 if (!HAS_GMCH(dev_priv))
16839 intel_state->skip_intermediate_wm = true;
16841 ret = intel_atomic_check(dev, state);
16844 * If we fail here, it means that the hardware appears to be
16845 * programmed in a way that shouldn't be possible, given our
16846 * understanding of watermark requirements. This might mean a
16847 * mistake in the hardware readout code or a mistake in the
16848 * watermark calculations for a given platform. Raise a WARN
16849 * so that this is noticeable.
16851 * If this actually happens, we'll have to just leave the
16852 * BIOS-programmed watermarks untouched and hope for the best.
16854 WARN(true, "Could not determine valid watermarks for inherited state\n");
16858 /* Write calculated watermark values back */
16859 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16860 crtc_state->wm.need_postvbl_update = true;
16861 dev_priv->display.optimize_watermarks(intel_state, crtc_state);
16863 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16867 drm_atomic_state_put(state);
16869 drm_modeset_drop_locks(&ctx);
16870 drm_modeset_acquire_fini(&ctx);
16873 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16875 if (IS_GEN(dev_priv, 5)) {
16877 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16879 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16880 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16881 dev_priv->fdi_pll_freq = 270000;
16886 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16889 static int intel_initial_commit(struct drm_device *dev)
16891 struct drm_atomic_state *state = NULL;
16892 struct drm_modeset_acquire_ctx ctx;
16893 struct intel_crtc *crtc;
16896 state = drm_atomic_state_alloc(dev);
16900 drm_modeset_acquire_init(&ctx, 0);
16903 state->acquire_ctx = &ctx;
16905 for_each_intel_crtc(dev, crtc) {
16906 struct intel_crtc_state *crtc_state =
16907 intel_atomic_get_crtc_state(state, crtc);
16909 if (IS_ERR(crtc_state)) {
16910 ret = PTR_ERR(crtc_state);
16914 if (crtc_state->hw.active) {
16915 ret = drm_atomic_add_affected_planes(state, &crtc->base);
16920 * FIXME hack to force a LUT update to avoid the
16921 * plane update forcing the pipe gamma on without
16922 * having a proper LUT loaded. Remove once we
16923 * have readout for pipe gamma enable.
16925 crtc_state->uapi.color_mgmt_changed = true;
16929 ret = drm_atomic_commit(state);
16932 if (ret == -EDEADLK) {
16933 drm_atomic_state_clear(state);
16934 drm_modeset_backoff(&ctx);
16938 drm_atomic_state_put(state);
16940 drm_modeset_drop_locks(&ctx);
16941 drm_modeset_acquire_fini(&ctx);
16946 static void intel_mode_config_init(struct drm_i915_private *i915)
16948 struct drm_mode_config *mode_config = &i915->drm.mode_config;
16950 drm_mode_config_init(&i915->drm);
16952 mode_config->min_width = 0;
16953 mode_config->min_height = 0;
16955 mode_config->preferred_depth = 24;
16956 mode_config->prefer_shadow = 1;
16958 mode_config->allow_fb_modifiers = true;
16960 mode_config->funcs = &intel_mode_funcs;
16963 * Maximum framebuffer dimensions, chosen to match
16964 * the maximum render engine surface size on gen4+.
16966 if (INTEL_GEN(i915) >= 7) {
16967 mode_config->max_width = 16384;
16968 mode_config->max_height = 16384;
16969 } else if (INTEL_GEN(i915) >= 4) {
16970 mode_config->max_width = 8192;
16971 mode_config->max_height = 8192;
16972 } else if (IS_GEN(i915, 3)) {
16973 mode_config->max_width = 4096;
16974 mode_config->max_height = 4096;
16976 mode_config->max_width = 2048;
16977 mode_config->max_height = 2048;
16980 if (IS_I845G(i915) || IS_I865G(i915)) {
16981 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
16982 mode_config->cursor_height = 1023;
16983 } else if (IS_GEN(i915, 2)) {
16984 mode_config->cursor_width = 64;
16985 mode_config->cursor_height = 64;
16987 mode_config->cursor_width = 256;
16988 mode_config->cursor_height = 256;
16992 int intel_modeset_init(struct drm_i915_private *i915)
16994 struct drm_device *dev = &i915->drm;
16996 struct intel_crtc *crtc;
16999 i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17000 i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17001 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17003 intel_mode_config_init(i915);
17005 ret = intel_bw_init(i915);
17009 init_llist_head(&i915->atomic_helper.free_list);
17010 INIT_WORK(&i915->atomic_helper.free_work,
17011 intel_atomic_helper_free_state_worker);
17013 intel_init_quirks(i915);
17015 intel_fbc_init(i915);
17017 intel_init_pm(i915);
17019 intel_panel_sanitize_ssc(i915);
17021 intel_gmbus_setup(i915);
17023 DRM_DEBUG_KMS("%d display pipe%s available.\n",
17024 INTEL_NUM_PIPES(i915),
17025 INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17027 if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
17028 for_each_pipe(i915, pipe) {
17029 ret = intel_crtc_init(i915, pipe);
17031 drm_mode_config_cleanup(dev);
17037 intel_shared_dpll_init(dev);
17038 intel_update_fdi_pll_freq(i915);
17040 intel_update_czclk(i915);
17041 intel_modeset_init_hw(i915);
17043 intel_hdcp_component_init(i915);
17045 if (i915->max_cdclk_freq == 0)
17046 intel_update_max_cdclk(i915);
17048 /* Just disable it once at startup */
17049 intel_vga_disable(i915);
17050 intel_setup_outputs(i915);
17052 drm_modeset_lock_all(dev);
17053 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17054 drm_modeset_unlock_all(dev);
17056 for_each_intel_crtc(dev, crtc) {
17057 struct intel_initial_plane_config plane_config = {};
17063 * Note that reserving the BIOS fb up front prevents us
17064 * from stuffing other stolen allocations like the ring
17065 * on top. This prevents some ugliness at boot time, and
17066 * can even allow for smooth boot transitions if the BIOS
17067 * fb is large enough for the active pipe configuration.
17069 i915->display.get_initial_plane_config(crtc, &plane_config);
17072 * If the fb is shared between multiple heads, we'll
17073 * just get the first one.
17075 intel_find_initial_plane_obj(crtc, &plane_config);
17079 * Make sure hardware watermarks really match the state we read out.
17080 * Note that we need to do this after reconstructing the BIOS fb's
17081 * since the watermark calculation done here will use pstate->fb.
17083 if (!HAS_GMCH(i915))
17084 sanitize_watermarks(dev);
17087 * Force all active planes to recompute their states. So that on
17088 * mode_setcrtc after probe, all the intel_plane_state variables
17089 * are already calculated and there is no assert_plane warnings
17092 ret = intel_initial_commit(dev);
17094 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
17099 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17101 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17102 /* 640x480@60Hz, ~25175 kHz */
17103 struct dpll clock = {
17113 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
17115 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17116 pipe_name(pipe), clock.vco, clock.dot);
17118 fp = i9xx_dpll_compute_fp(&clock);
17119 dpll = DPLL_DVO_2X_MODE |
17120 DPLL_VGA_MODE_DIS |
17121 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17122 PLL_P2_DIVIDE_BY_4 |
17123 PLL_REF_INPUT_DREFCLK |
17126 I915_WRITE(FP0(pipe), fp);
17127 I915_WRITE(FP1(pipe), fp);
17129 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17130 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17131 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17132 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17133 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17134 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17135 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17138 * Apparently we need to have VGA mode enabled prior to changing
17139 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17140 * dividers, even though the register value does change.
17142 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17143 I915_WRITE(DPLL(pipe), dpll);
17145 /* Wait for the clocks to stabilize. */
17146 POSTING_READ(DPLL(pipe));
17149 /* The pixel multiplier can only be updated once the
17150 * DPLL is enabled and the clocks are stable.
17152 * So write it again.
17154 I915_WRITE(DPLL(pipe), dpll);
17156 /* We do this three times for luck */
17157 for (i = 0; i < 3 ; i++) {
17158 I915_WRITE(DPLL(pipe), dpll);
17159 POSTING_READ(DPLL(pipe));
17160 udelay(150); /* wait for warmup */
17163 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17164 POSTING_READ(PIPECONF(pipe));
17166 intel_wait_for_pipe_scanline_moving(crtc);
17169 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17171 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17173 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
17176 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
17177 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
17178 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
17179 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
17180 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
17182 I915_WRITE(PIPECONF(pipe), 0);
17183 POSTING_READ(PIPECONF(pipe));
17185 intel_wait_for_pipe_scanline_stopped(crtc);
17187 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
17188 POSTING_READ(DPLL(pipe));
17192 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17194 struct intel_crtc *crtc;
17196 if (INTEL_GEN(dev_priv) >= 4)
17199 for_each_intel_crtc(&dev_priv->drm, crtc) {
17200 struct intel_plane *plane =
17201 to_intel_plane(crtc->base.primary);
17202 struct intel_crtc *plane_crtc;
17205 if (!plane->get_hw_state(plane, &pipe))
17208 if (pipe == crtc->pipe)
17211 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17212 plane->base.base.id, plane->base.name);
17214 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17215 intel_plane_disable_noatomic(plane_crtc, plane);
17219 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17221 struct drm_device *dev = crtc->base.dev;
17222 struct intel_encoder *encoder;
17224 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17230 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17232 struct drm_device *dev = encoder->base.dev;
17233 struct intel_connector *connector;
17235 for_each_connector_on_encoder(dev, &encoder->base, connector)
17241 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17242 enum pipe pch_transcoder)
17244 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17245 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17248 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
17250 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
17251 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
17252 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17254 if (INTEL_GEN(dev_priv) >= 9 ||
17255 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17256 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
17259 if (transcoder_is_dsi(cpu_transcoder))
17262 val = I915_READ(reg);
17263 val &= ~HSW_FRAME_START_DELAY_MASK;
17264 val |= HSW_FRAME_START_DELAY(0);
17265 I915_WRITE(reg, val);
17267 i915_reg_t reg = PIPECONF(cpu_transcoder);
17270 val = I915_READ(reg);
17271 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
17272 val |= PIPECONF_FRAME_START_DELAY(0);
17273 I915_WRITE(reg, val);
17276 if (!crtc_state->has_pch_encoder)
17279 if (HAS_PCH_IBX(dev_priv)) {
17280 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
17283 val = I915_READ(reg);
17284 val &= ~TRANS_FRAME_START_DELAY_MASK;
17285 val |= TRANS_FRAME_START_DELAY(0);
17286 I915_WRITE(reg, val);
17288 i915_reg_t reg = TRANS_CHICKEN2(crtc->pipe);
17291 val = I915_READ(reg);
17292 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
17293 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
17294 I915_WRITE(reg, val);
17298 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17299 struct drm_modeset_acquire_ctx *ctx)
17301 struct drm_device *dev = crtc->base.dev;
17302 struct drm_i915_private *dev_priv = to_i915(dev);
17303 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17305 if (crtc_state->hw.active) {
17306 struct intel_plane *plane;
17308 /* Clear any frame start delays used for debugging left by the BIOS */
17309 intel_sanitize_frame_start_delay(crtc_state);
17311 /* Disable everything but the primary plane */
17312 for_each_intel_plane_on_crtc(dev, crtc, plane) {
17313 const struct intel_plane_state *plane_state =
17314 to_intel_plane_state(plane->base.state);
17316 if (plane_state->uapi.visible &&
17317 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17318 intel_plane_disable_noatomic(crtc, plane);
17322 * Disable any background color set by the BIOS, but enable the
17323 * gamma and CSC to match how we program our planes.
17325 if (INTEL_GEN(dev_priv) >= 9)
17326 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
17327 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
17328 SKL_BOTTOM_COLOR_CSC_ENABLE);
17331 /* Adjust the state of the output pipe according to whether we
17332 * have active connectors/encoders. */
17333 if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
17334 intel_crtc_disable_noatomic(&crtc->base, ctx);
17336 if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
17338 * We start out with underrun reporting disabled to avoid races.
17339 * For correct bookkeeping mark this on active crtcs.
17341 * Also on gmch platforms we dont have any hardware bits to
17342 * disable the underrun reporting. Which means we need to start
17343 * out with underrun reporting disabled also on inactive pipes,
17344 * since otherwise we'll complain about the garbage we read when
17345 * e.g. coming up after runtime pm.
17347 * No protection against concurrent access is required - at
17348 * worst a fifo underrun happens which also sets this to false.
17350 crtc->cpu_fifo_underrun_disabled = true;
17352 * We track the PCH trancoder underrun reporting state
17353 * within the crtc. With crtc for pipe A housing the underrun
17354 * reporting state for PCH transcoder A, crtc for pipe B housing
17355 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17356 * and marking underrun reporting as disabled for the non-existing
17357 * PCH transcoders B and C would prevent enabling the south
17358 * error interrupt (see cpt_can_enable_serr_int()).
17360 if (has_pch_trancoder(dev_priv, crtc->pipe))
17361 crtc->pch_fifo_underrun_disabled = true;
17365 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17367 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
17370 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17371 * the hardware when a high res displays plugged in. DPLL P
17372 * divider is zero, and the pipe timings are bonkers. We'll
17373 * try to disable everything in that case.
17375 * FIXME would be nice to be able to sanitize this state
17376 * without several WARNs, but for now let's take the easy
17379 return IS_GEN(dev_priv, 6) &&
17380 crtc_state->hw.active &&
17381 crtc_state->shared_dpll &&
17382 crtc_state->port_clock == 0;
17385 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17387 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17388 struct intel_connector *connector;
17389 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17390 struct intel_crtc_state *crtc_state = crtc ?
17391 to_intel_crtc_state(crtc->base.state) : NULL;
17393 /* We need to check both for a crtc link (meaning that the
17394 * encoder is active and trying to read from a pipe) and the
17395 * pipe itself being active. */
17396 bool has_active_crtc = crtc_state &&
17397 crtc_state->hw.active;
17399 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17400 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17401 pipe_name(crtc->pipe));
17402 has_active_crtc = false;
17405 connector = intel_encoder_find_connector(encoder);
17406 if (connector && !has_active_crtc) {
17407 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17408 encoder->base.base.id,
17409 encoder->base.name);
17411 /* Connector is active, but has no active pipe. This is
17412 * fallout from our resume register restoring. Disable
17413 * the encoder manually again. */
17415 struct drm_encoder *best_encoder;
17417 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
17418 encoder->base.base.id,
17419 encoder->base.name);
17421 /* avoid oopsing in case the hooks consult best_encoder */
17422 best_encoder = connector->base.state->best_encoder;
17423 connector->base.state->best_encoder = &encoder->base;
17425 if (encoder->disable)
17426 encoder->disable(encoder, crtc_state,
17427 connector->base.state);
17428 if (encoder->post_disable)
17429 encoder->post_disable(encoder, crtc_state,
17430 connector->base.state);
17432 connector->base.state->best_encoder = best_encoder;
17434 encoder->base.crtc = NULL;
17436 /* Inconsistent output/port/pipe state happens presumably due to
17437 * a bug in one of the get_hw_state functions. Or someplace else
17438 * in our code, like the register restore mess on resume. Clamp
17439 * things to off as a safer default. */
17441 connector->base.dpms = DRM_MODE_DPMS_OFF;
17442 connector->base.encoder = NULL;
17445 /* notify opregion of the sanitized encoder state */
17446 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17448 if (INTEL_GEN(dev_priv) >= 11)
17449 icl_sanitize_encoder_pll_mapping(encoder);
17452 /* FIXME read out full plane state for all planes */
17453 static void readout_plane_state(struct drm_i915_private *dev_priv)
17455 struct intel_plane *plane;
17456 struct intel_crtc *crtc;
17458 for_each_intel_plane(&dev_priv->drm, plane) {
17459 struct intel_plane_state *plane_state =
17460 to_intel_plane_state(plane->base.state);
17461 struct intel_crtc_state *crtc_state;
17462 enum pipe pipe = PIPE_A;
17465 visible = plane->get_hw_state(plane, &pipe);
17467 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17468 crtc_state = to_intel_crtc_state(crtc->base.state);
17470 intel_set_plane_visible(crtc_state, plane_state, visible);
17472 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17473 plane->base.base.id, plane->base.name,
17474 enableddisabled(visible), pipe_name(pipe));
17477 for_each_intel_crtc(&dev_priv->drm, crtc) {
17478 struct intel_crtc_state *crtc_state =
17479 to_intel_crtc_state(crtc->base.state);
17481 fixup_active_planes(crtc_state);
17485 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17487 struct drm_i915_private *dev_priv = to_i915(dev);
17489 struct intel_crtc *crtc;
17490 struct intel_encoder *encoder;
17491 struct intel_connector *connector;
17492 struct drm_connector_list_iter conn_iter;
17495 dev_priv->active_pipes = 0;
17497 for_each_intel_crtc(dev, crtc) {
17498 struct intel_crtc_state *crtc_state =
17499 to_intel_crtc_state(crtc->base.state);
17501 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
17502 intel_crtc_free_hw_state(crtc_state);
17503 memset(crtc_state, 0, sizeof(*crtc_state));
17504 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->uapi);
17506 crtc_state->hw.active = crtc_state->hw.enable =
17507 dev_priv->display.get_pipe_config(crtc, crtc_state);
17509 crtc->base.enabled = crtc_state->hw.enable;
17510 crtc->active = crtc_state->hw.active;
17512 if (crtc_state->hw.active)
17513 dev_priv->active_pipes |= BIT(crtc->pipe);
17515 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
17516 crtc->base.base.id, crtc->base.name,
17517 enableddisabled(crtc_state->hw.active));
17520 readout_plane_state(dev_priv);
17522 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17523 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17525 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
17526 &pll->state.hw_state);
17528 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
17529 pll->info->id == DPLL_ID_EHL_DPLL4) {
17530 pll->wakeref = intel_display_power_get(dev_priv,
17531 POWER_DOMAIN_DPLL_DC_OFF);
17534 pll->state.crtc_mask = 0;
17535 for_each_intel_crtc(dev, crtc) {
17536 struct intel_crtc_state *crtc_state =
17537 to_intel_crtc_state(crtc->base.state);
17539 if (crtc_state->hw.active &&
17540 crtc_state->shared_dpll == pll)
17541 pll->state.crtc_mask |= 1 << crtc->pipe;
17543 pll->active_mask = pll->state.crtc_mask;
17545 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
17546 pll->info->name, pll->state.crtc_mask, pll->on);
17549 for_each_intel_encoder(dev, encoder) {
17552 if (encoder->get_hw_state(encoder, &pipe)) {
17553 struct intel_crtc_state *crtc_state;
17555 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17556 crtc_state = to_intel_crtc_state(crtc->base.state);
17558 encoder->base.crtc = &crtc->base;
17559 encoder->get_config(encoder, crtc_state);
17561 encoder->base.crtc = NULL;
17564 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17565 encoder->base.base.id, encoder->base.name,
17566 enableddisabled(encoder->base.crtc),
17570 drm_connector_list_iter_begin(dev, &conn_iter);
17571 for_each_intel_connector_iter(connector, &conn_iter) {
17572 if (connector->get_hw_state(connector)) {
17573 struct intel_crtc_state *crtc_state;
17574 struct intel_crtc *crtc;
17576 connector->base.dpms = DRM_MODE_DPMS_ON;
17578 encoder = connector->encoder;
17579 connector->base.encoder = &encoder->base;
17581 crtc = to_intel_crtc(encoder->base.crtc);
17582 crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17584 if (crtc_state && crtc_state->hw.active) {
17586 * This has to be done during hardware readout
17587 * because anything calling .crtc_disable may
17588 * rely on the connector_mask being accurate.
17590 crtc_state->uapi.connector_mask |=
17591 drm_connector_mask(&connector->base);
17592 crtc_state->uapi.encoder_mask |=
17593 drm_encoder_mask(&encoder->base);
17596 connector->base.dpms = DRM_MODE_DPMS_OFF;
17597 connector->base.encoder = NULL;
17599 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
17600 connector->base.base.id, connector->base.name,
17601 enableddisabled(connector->base.encoder));
17603 drm_connector_list_iter_end(&conn_iter);
17605 for_each_intel_crtc(dev, crtc) {
17606 struct intel_bw_state *bw_state =
17607 to_intel_bw_state(dev_priv->bw_obj.state);
17608 struct intel_crtc_state *crtc_state =
17609 to_intel_crtc_state(crtc->base.state);
17610 struct intel_plane *plane;
17613 if (crtc_state->hw.active) {
17614 struct drm_display_mode *mode = &crtc_state->hw.mode;
17616 intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
17619 *mode = crtc_state->hw.adjusted_mode;
17620 mode->hdisplay = crtc_state->pipe_src_w;
17621 mode->vdisplay = crtc_state->pipe_src_h;
17624 * The initial mode needs to be set in order to keep
17625 * the atomic core happy. It wants a valid mode if the
17626 * crtc's enabled, so we do the above call.
17628 * But we don't set all the derived state fully, hence
17629 * set a flag to indicate that a full recalculation is
17630 * needed on the next commit.
17632 mode->private_flags = I915_MODE_FLAG_INHERITED;
17634 intel_crtc_compute_pixel_rate(crtc_state);
17636 intel_crtc_update_active_timings(crtc_state);
17638 intel_crtc_copy_hw_to_uapi_state(crtc_state);
17641 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17642 const struct intel_plane_state *plane_state =
17643 to_intel_plane_state(plane->base.state);
17646 * FIXME don't have the fb yet, so can't
17647 * use intel_plane_data_rate() :(
17649 if (plane_state->uapi.visible)
17650 crtc_state->data_rate[plane->id] =
17651 4 * crtc_state->pixel_rate;
17653 * FIXME don't have the fb yet, so can't
17654 * use plane->min_cdclk() :(
17656 if (plane_state->uapi.visible && plane->min_cdclk) {
17657 if (crtc_state->double_wide ||
17658 INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17659 crtc_state->min_cdclk[plane->id] =
17660 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17662 crtc_state->min_cdclk[plane->id] =
17663 crtc_state->pixel_rate;
17665 DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
17666 plane->base.base.id, plane->base.name,
17667 crtc_state->min_cdclk[plane->id]);
17670 if (crtc_state->hw.active) {
17671 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17672 if (WARN_ON(min_cdclk < 0))
17676 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
17677 dev_priv->min_voltage_level[crtc->pipe] =
17678 crtc_state->min_voltage_level;
17680 intel_bw_crtc_update(bw_state, crtc_state);
17682 intel_pipe_config_sanity_check(dev_priv, crtc_state);
17687 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17689 struct intel_encoder *encoder;
17691 for_each_intel_encoder(&dev_priv->drm, encoder) {
17692 struct intel_crtc_state *crtc_state;
17694 if (!encoder->get_power_domains)
17698 * MST-primary and inactive encoders don't have a crtc state
17699 * and neither of these require any power domain references.
17701 if (!encoder->base.crtc)
17704 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17705 encoder->get_power_domains(encoder, crtc_state);
17709 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17711 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
17712 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
17713 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
17716 if (IS_HASWELL(dev_priv)) {
17718 * WaRsPkgCStateDisplayPMReq:hsw
17719 * System hang if this isn't done before disabling all planes!
17721 I915_WRITE(CHICKEN_PAR1_1,
17722 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17726 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17727 enum port port, i915_reg_t hdmi_reg)
17729 u32 val = I915_READ(hdmi_reg);
17731 if (val & SDVO_ENABLE ||
17732 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17735 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
17738 val &= ~SDVO_PIPE_SEL_MASK;
17739 val |= SDVO_PIPE_SEL(PIPE_A);
17741 I915_WRITE(hdmi_reg, val);
17744 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
17745 enum port port, i915_reg_t dp_reg)
17747 u32 val = I915_READ(dp_reg);
17749 if (val & DP_PORT_EN ||
17750 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
17753 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
17756 val &= ~DP_PIPE_SEL_MASK;
17757 val |= DP_PIPE_SEL(PIPE_A);
17759 I915_WRITE(dp_reg, val);
17762 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
17765 * The BIOS may select transcoder B on some of the PCH
17766 * ports even it doesn't enable the port. This would trip
17767 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
17768 * Sanitize the transcoder select bits to prevent that. We
17769 * assume that the BIOS never actually enabled the port,
17770 * because if it did we'd actually have to toggle the port
17771 * on and back off to make the transcoder A select stick
17772 * (see. intel_dp_link_down(), intel_disable_hdmi(),
17773 * intel_disable_sdvo()).
17775 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
17776 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
17777 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
17779 /* PCH SDVOB multiplex with HDMIB */
17780 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
17781 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
17782 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
17785 /* Scan out the current hw modeset state,
17786 * and sanitizes it to the current state
17789 intel_modeset_setup_hw_state(struct drm_device *dev,
17790 struct drm_modeset_acquire_ctx *ctx)
17792 struct drm_i915_private *dev_priv = to_i915(dev);
17793 struct intel_crtc_state *crtc_state;
17794 struct intel_encoder *encoder;
17795 struct intel_crtc *crtc;
17796 intel_wakeref_t wakeref;
17799 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
17801 intel_early_display_was(dev_priv);
17802 intel_modeset_readout_hw_state(dev);
17804 /* HW state is read out, now we need to sanitize this mess. */
17806 /* Sanitize the TypeC port mode upfront, encoders depend on this */
17807 for_each_intel_encoder(dev, encoder) {
17808 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
17810 /* We need to sanitize only the MST primary port. */
17811 if (encoder->type != INTEL_OUTPUT_DP_MST &&
17812 intel_phy_is_tc(dev_priv, phy))
17813 intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
17816 get_encoder_power_domains(dev_priv);
17818 if (HAS_PCH_IBX(dev_priv))
17819 ibx_sanitize_pch_ports(dev_priv);
17822 * intel_sanitize_plane_mapping() may need to do vblank
17823 * waits, so we need vblank interrupts restored beforehand.
17825 for_each_intel_crtc(&dev_priv->drm, crtc) {
17826 crtc_state = to_intel_crtc_state(crtc->base.state);
17828 drm_crtc_vblank_reset(&crtc->base);
17830 if (crtc_state->hw.active)
17831 intel_crtc_vblank_on(crtc_state);
17834 intel_sanitize_plane_mapping(dev_priv);
17836 for_each_intel_encoder(dev, encoder)
17837 intel_sanitize_encoder(encoder);
17839 for_each_intel_crtc(&dev_priv->drm, crtc) {
17840 crtc_state = to_intel_crtc_state(crtc->base.state);
17841 intel_sanitize_crtc(crtc, ctx);
17842 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
17845 intel_modeset_update_connector_atomic_state(dev);
17847 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17848 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17850 if (!pll->on || pll->active_mask)
17853 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17856 pll->info->funcs->disable(dev_priv, pll);
17860 if (IS_G4X(dev_priv)) {
17861 g4x_wm_get_hw_state(dev_priv);
17862 g4x_wm_sanitize(dev_priv);
17863 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17864 vlv_wm_get_hw_state(dev_priv);
17865 vlv_wm_sanitize(dev_priv);
17866 } else if (INTEL_GEN(dev_priv) >= 9) {
17867 skl_wm_get_hw_state(dev_priv);
17868 } else if (HAS_PCH_SPLIT(dev_priv)) {
17869 ilk_wm_get_hw_state(dev_priv);
17872 for_each_intel_crtc(dev, crtc) {
17875 crtc_state = to_intel_crtc_state(crtc->base.state);
17876 put_domains = modeset_get_crtc_power_domains(crtc_state);
17877 if (WARN_ON(put_domains))
17878 modeset_put_power_domains(dev_priv, put_domains);
17881 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17883 intel_fbc_init_pipe_state(dev_priv);
17886 void intel_display_resume(struct drm_device *dev)
17888 struct drm_i915_private *dev_priv = to_i915(dev);
17889 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17890 struct drm_modeset_acquire_ctx ctx;
17893 dev_priv->modeset_restore_state = NULL;
17895 state->acquire_ctx = &ctx;
17897 drm_modeset_acquire_init(&ctx, 0);
17900 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17901 if (ret != -EDEADLK)
17904 drm_modeset_backoff(&ctx);
17908 ret = __intel_display_resume(dev, state, &ctx);
17910 intel_enable_ipc(dev_priv);
17911 drm_modeset_drop_locks(&ctx);
17912 drm_modeset_acquire_fini(&ctx);
17915 DRM_ERROR("Restoring old state failed with %i\n", ret);
17917 drm_atomic_state_put(state);
17920 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
17922 struct intel_connector *connector;
17923 struct drm_connector_list_iter conn_iter;
17925 /* Kill all the work that may have been queued by hpd. */
17926 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
17927 for_each_intel_connector_iter(connector, &conn_iter) {
17928 if (connector->modeset_retry_work.func)
17929 cancel_work_sync(&connector->modeset_retry_work);
17930 if (connector->hdcp.shim) {
17931 cancel_delayed_work_sync(&connector->hdcp.check_work);
17932 cancel_work_sync(&connector->hdcp.prop_work);
17935 drm_connector_list_iter_end(&conn_iter);
17938 void intel_modeset_driver_remove(struct drm_i915_private *i915)
17940 flush_workqueue(i915->flip_wq);
17941 flush_workqueue(i915->modeset_wq);
17943 flush_work(&i915->atomic_helper.free_work);
17944 WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
17947 * Interrupts and polling as the first thing to avoid creating havoc.
17948 * Too much stuff here (turning of connectors, ...) would
17949 * experience fancy races otherwise.
17951 intel_irq_uninstall(i915);
17954 * Due to the hpd irq storm handling the hotplug work can re-arm the
17955 * poll handlers. Hence disable polling after hpd handling is shut down.
17957 intel_hpd_poll_fini(i915);
17959 /* poll work can call into fbdev, hence clean that up afterwards */
17960 intel_fbdev_fini(i915);
17962 intel_unregister_dsm_handler();
17964 intel_fbc_global_disable(i915);
17966 /* flush any delayed tasks or pending work */
17967 flush_scheduled_work();
17969 intel_hdcp_component_fini(i915);
17971 drm_mode_config_cleanup(&i915->drm);
17973 intel_overlay_cleanup(i915);
17975 intel_gmbus_teardown(i915);
17977 destroy_workqueue(i915->flip_wq);
17978 destroy_workqueue(i915->modeset_wq);
17980 intel_fbc_cleanup_cfb(i915);
17983 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
17985 struct intel_display_error_state {
17987 u32 power_well_driver;
17989 struct intel_cursor_error_state {
17994 } cursor[I915_MAX_PIPES];
17996 struct intel_pipe_error_state {
17997 bool power_domain_on;
18000 } pipe[I915_MAX_PIPES];
18002 struct intel_plane_error_state {
18010 } plane[I915_MAX_PIPES];
18012 struct intel_transcoder_error_state {
18014 bool power_domain_on;
18015 enum transcoder cpu_transcoder;
18028 struct intel_display_error_state *
18029 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18031 struct intel_display_error_state *error;
18032 int transcoders[] = {
18041 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18043 if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
18046 error = kzalloc(sizeof(*error), GFP_ATOMIC);
18050 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18051 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
18053 for_each_pipe(dev_priv, i) {
18054 error->pipe[i].power_domain_on =
18055 __intel_display_power_is_enabled(dev_priv,
18056 POWER_DOMAIN_PIPE(i));
18057 if (!error->pipe[i].power_domain_on)
18060 error->cursor[i].control = I915_READ(CURCNTR(i));
18061 error->cursor[i].position = I915_READ(CURPOS(i));
18062 error->cursor[i].base = I915_READ(CURBASE(i));
18064 error->plane[i].control = I915_READ(DSPCNTR(i));
18065 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
18066 if (INTEL_GEN(dev_priv) <= 3) {
18067 error->plane[i].size = I915_READ(DSPSIZE(i));
18068 error->plane[i].pos = I915_READ(DSPPOS(i));
18070 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18071 error->plane[i].addr = I915_READ(DSPADDR(i));
18072 if (INTEL_GEN(dev_priv) >= 4) {
18073 error->plane[i].surface = I915_READ(DSPSURF(i));
18074 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
18077 error->pipe[i].source = I915_READ(PIPESRC(i));
18079 if (HAS_GMCH(dev_priv))
18080 error->pipe[i].stat = I915_READ(PIPESTAT(i));
18083 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18084 enum transcoder cpu_transcoder = transcoders[i];
18086 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
18089 error->transcoder[i].available = true;
18090 error->transcoder[i].power_domain_on =
18091 __intel_display_power_is_enabled(dev_priv,
18092 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18093 if (!error->transcoder[i].power_domain_on)
18096 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18098 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
18099 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
18100 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
18101 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
18102 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
18103 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
18104 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
18110 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18113 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18114 struct intel_display_error_state *error)
18116 struct drm_i915_private *dev_priv = m->i915;
18122 err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18123 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18124 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18125 error->power_well_driver);
18126 for_each_pipe(dev_priv, i) {
18127 err_printf(m, "Pipe [%d]:\n", i);
18128 err_printf(m, " Power: %s\n",
18129 onoff(error->pipe[i].power_domain_on));
18130 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
18131 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
18133 err_printf(m, "Plane [%d]:\n", i);
18134 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
18135 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
18136 if (INTEL_GEN(dev_priv) <= 3) {
18137 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
18138 err_printf(m, " POS: %08x\n", error->plane[i].pos);
18140 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18141 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
18142 if (INTEL_GEN(dev_priv) >= 4) {
18143 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
18144 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
18147 err_printf(m, "Cursor [%d]:\n", i);
18148 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
18149 err_printf(m, " POS: %08x\n", error->cursor[i].position);
18150 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
18153 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18154 if (!error->transcoder[i].available)
18157 err_printf(m, "CPU transcoder: %s\n",
18158 transcoder_name(error->transcoder[i].cpu_transcoder));
18159 err_printf(m, " Power: %s\n",
18160 onoff(error->transcoder[i].power_domain_on));
18161 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
18162 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
18163 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
18164 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
18165 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
18166 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
18167 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);