]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/display/intel_display.c
drm/i915: Remove special case slave handling during hw programming, v3.
[linux.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57
58 #include "gt/intel_rps.h"
59
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_fbc.h"
70 #include "intel_fbdev.h"
71 #include "intel_fifo_underrun.h"
72 #include "intel_frontbuffer.h"
73 #include "intel_hdcp.h"
74 #include "intel_hotplug.h"
75 #include "intel_overlay.h"
76 #include "intel_pipe_crc.h"
77 #include "intel_pm.h"
78 #include "intel_psr.h"
79 #include "intel_quirks.h"
80 #include "intel_sideband.h"
81 #include "intel_sprite.h"
82 #include "intel_tc.h"
83 #include "intel_vga.h"
84
85 /* Primary plane formats for gen <= 3 */
86 static const u32 i8xx_primary_formats[] = {
87         DRM_FORMAT_C8,
88         DRM_FORMAT_RGB565,
89         DRM_FORMAT_XRGB1555,
90         DRM_FORMAT_XRGB8888,
91 };
92
93 /* Primary plane formats for ivb (no fp16 due to hw issue) */
94 static const u32 ivb_primary_formats[] = {
95         DRM_FORMAT_C8,
96         DRM_FORMAT_RGB565,
97         DRM_FORMAT_XRGB8888,
98         DRM_FORMAT_XBGR8888,
99         DRM_FORMAT_XRGB2101010,
100         DRM_FORMAT_XBGR2101010,
101 };
102
103 /* Primary plane formats for gen >= 4, except ivb */
104 static const u32 i965_primary_formats[] = {
105         DRM_FORMAT_C8,
106         DRM_FORMAT_RGB565,
107         DRM_FORMAT_XRGB8888,
108         DRM_FORMAT_XBGR8888,
109         DRM_FORMAT_XRGB2101010,
110         DRM_FORMAT_XBGR2101010,
111         DRM_FORMAT_XBGR16161616F,
112 };
113
114 static const u64 i9xx_format_modifiers[] = {
115         I915_FORMAT_MOD_X_TILED,
116         DRM_FORMAT_MOD_LINEAR,
117         DRM_FORMAT_MOD_INVALID
118 };
119
120 /* Cursor formats */
121 static const u32 intel_cursor_formats[] = {
122         DRM_FORMAT_ARGB8888,
123 };
124
125 static const u64 cursor_format_modifiers[] = {
126         DRM_FORMAT_MOD_LINEAR,
127         DRM_FORMAT_MOD_INVALID
128 };
129
130 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
131                                 struct intel_crtc_state *pipe_config);
132 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
133                                    struct intel_crtc_state *pipe_config);
134
135 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
136                                   struct drm_i915_gem_object *obj,
137                                   struct drm_mode_fb_cmd2 *mode_cmd);
138 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
139 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
140 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
141                                          const struct intel_link_m_n *m_n,
142                                          const struct intel_link_m_n *m2_n2);
143 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
144 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
145 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
146 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
147 static void vlv_prepare_pll(struct intel_crtc *crtc,
148                             const struct intel_crtc_state *pipe_config);
149 static void chv_prepare_pll(struct intel_crtc *crtc,
150                             const struct intel_crtc_state *pipe_config);
151 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
152                                     struct intel_crtc_state *crtc_state);
153 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
154 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
155 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
156 static void intel_modeset_setup_hw_state(struct drm_device *dev,
157                                          struct drm_modeset_acquire_ctx *ctx);
158 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
159
160 struct intel_limit {
161         struct {
162                 int min, max;
163         } dot, vco, n, m, m1, m2, p, p1;
164
165         struct {
166                 int dot_limit;
167                 int p2_slow, p2_fast;
168         } p2;
169 };
170
171 /* returns HPLL frequency in kHz */
172 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
173 {
174         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
175
176         /* Obtain SKU information */
177         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
178                 CCK_FUSE_HPLL_FREQ_MASK;
179
180         return vco_freq[hpll_freq] * 1000;
181 }
182
183 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
184                       const char *name, u32 reg, int ref_freq)
185 {
186         u32 val;
187         int divider;
188
189         val = vlv_cck_read(dev_priv, reg);
190         divider = val & CCK_FREQUENCY_VALUES;
191
192         WARN((val & CCK_FREQUENCY_STATUS) !=
193              (divider << CCK_FREQUENCY_STATUS_SHIFT),
194              "%s change in progress\n", name);
195
196         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
197 }
198
199 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
200                            const char *name, u32 reg)
201 {
202         int hpll;
203
204         vlv_cck_get(dev_priv);
205
206         if (dev_priv->hpll_freq == 0)
207                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
208
209         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
210
211         vlv_cck_put(dev_priv);
212
213         return hpll;
214 }
215
216 static void intel_update_czclk(struct drm_i915_private *dev_priv)
217 {
218         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
219                 return;
220
221         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
222                                                       CCK_CZ_CLOCK_CONTROL);
223
224         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
225 }
226
227 static inline u32 /* units of 100MHz */
228 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
229                     const struct intel_crtc_state *pipe_config)
230 {
231         if (HAS_DDI(dev_priv))
232                 return pipe_config->port_clock; /* SPLL */
233         else
234                 return dev_priv->fdi_pll_freq;
235 }
236
237 static const struct intel_limit intel_limits_i8xx_dac = {
238         .dot = { .min = 25000, .max = 350000 },
239         .vco = { .min = 908000, .max = 1512000 },
240         .n = { .min = 2, .max = 16 },
241         .m = { .min = 96, .max = 140 },
242         .m1 = { .min = 18, .max = 26 },
243         .m2 = { .min = 6, .max = 16 },
244         .p = { .min = 4, .max = 128 },
245         .p1 = { .min = 2, .max = 33 },
246         .p2 = { .dot_limit = 165000,
247                 .p2_slow = 4, .p2_fast = 2 },
248 };
249
250 static const struct intel_limit intel_limits_i8xx_dvo = {
251         .dot = { .min = 25000, .max = 350000 },
252         .vco = { .min = 908000, .max = 1512000 },
253         .n = { .min = 2, .max = 16 },
254         .m = { .min = 96, .max = 140 },
255         .m1 = { .min = 18, .max = 26 },
256         .m2 = { .min = 6, .max = 16 },
257         .p = { .min = 4, .max = 128 },
258         .p1 = { .min = 2, .max = 33 },
259         .p2 = { .dot_limit = 165000,
260                 .p2_slow = 4, .p2_fast = 4 },
261 };
262
263 static const struct intel_limit intel_limits_i8xx_lvds = {
264         .dot = { .min = 25000, .max = 350000 },
265         .vco = { .min = 908000, .max = 1512000 },
266         .n = { .min = 2, .max = 16 },
267         .m = { .min = 96, .max = 140 },
268         .m1 = { .min = 18, .max = 26 },
269         .m2 = { .min = 6, .max = 16 },
270         .p = { .min = 4, .max = 128 },
271         .p1 = { .min = 1, .max = 6 },
272         .p2 = { .dot_limit = 165000,
273                 .p2_slow = 14, .p2_fast = 7 },
274 };
275
276 static const struct intel_limit intel_limits_i9xx_sdvo = {
277         .dot = { .min = 20000, .max = 400000 },
278         .vco = { .min = 1400000, .max = 2800000 },
279         .n = { .min = 1, .max = 6 },
280         .m = { .min = 70, .max = 120 },
281         .m1 = { .min = 8, .max = 18 },
282         .m2 = { .min = 3, .max = 7 },
283         .p = { .min = 5, .max = 80 },
284         .p1 = { .min = 1, .max = 8 },
285         .p2 = { .dot_limit = 200000,
286                 .p2_slow = 10, .p2_fast = 5 },
287 };
288
289 static const struct intel_limit intel_limits_i9xx_lvds = {
290         .dot = { .min = 20000, .max = 400000 },
291         .vco = { .min = 1400000, .max = 2800000 },
292         .n = { .min = 1, .max = 6 },
293         .m = { .min = 70, .max = 120 },
294         .m1 = { .min = 8, .max = 18 },
295         .m2 = { .min = 3, .max = 7 },
296         .p = { .min = 7, .max = 98 },
297         .p1 = { .min = 1, .max = 8 },
298         .p2 = { .dot_limit = 112000,
299                 .p2_slow = 14, .p2_fast = 7 },
300 };
301
302
303 static const struct intel_limit intel_limits_g4x_sdvo = {
304         .dot = { .min = 25000, .max = 270000 },
305         .vco = { .min = 1750000, .max = 3500000},
306         .n = { .min = 1, .max = 4 },
307         .m = { .min = 104, .max = 138 },
308         .m1 = { .min = 17, .max = 23 },
309         .m2 = { .min = 5, .max = 11 },
310         .p = { .min = 10, .max = 30 },
311         .p1 = { .min = 1, .max = 3},
312         .p2 = { .dot_limit = 270000,
313                 .p2_slow = 10,
314                 .p2_fast = 10
315         },
316 };
317
318 static const struct intel_limit intel_limits_g4x_hdmi = {
319         .dot = { .min = 22000, .max = 400000 },
320         .vco = { .min = 1750000, .max = 3500000},
321         .n = { .min = 1, .max = 4 },
322         .m = { .min = 104, .max = 138 },
323         .m1 = { .min = 16, .max = 23 },
324         .m2 = { .min = 5, .max = 11 },
325         .p = { .min = 5, .max = 80 },
326         .p1 = { .min = 1, .max = 8},
327         .p2 = { .dot_limit = 165000,
328                 .p2_slow = 10, .p2_fast = 5 },
329 };
330
331 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
332         .dot = { .min = 20000, .max = 115000 },
333         .vco = { .min = 1750000, .max = 3500000 },
334         .n = { .min = 1, .max = 3 },
335         .m = { .min = 104, .max = 138 },
336         .m1 = { .min = 17, .max = 23 },
337         .m2 = { .min = 5, .max = 11 },
338         .p = { .min = 28, .max = 112 },
339         .p1 = { .min = 2, .max = 8 },
340         .p2 = { .dot_limit = 0,
341                 .p2_slow = 14, .p2_fast = 14
342         },
343 };
344
345 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
346         .dot = { .min = 80000, .max = 224000 },
347         .vco = { .min = 1750000, .max = 3500000 },
348         .n = { .min = 1, .max = 3 },
349         .m = { .min = 104, .max = 138 },
350         .m1 = { .min = 17, .max = 23 },
351         .m2 = { .min = 5, .max = 11 },
352         .p = { .min = 14, .max = 42 },
353         .p1 = { .min = 2, .max = 6 },
354         .p2 = { .dot_limit = 0,
355                 .p2_slow = 7, .p2_fast = 7
356         },
357 };
358
359 static const struct intel_limit intel_limits_pineview_sdvo = {
360         .dot = { .min = 20000, .max = 400000},
361         .vco = { .min = 1700000, .max = 3500000 },
362         /* Pineview's Ncounter is a ring counter */
363         .n = { .min = 3, .max = 6 },
364         .m = { .min = 2, .max = 256 },
365         /* Pineview only has one combined m divider, which we treat as m2. */
366         .m1 = { .min = 0, .max = 0 },
367         .m2 = { .min = 0, .max = 254 },
368         .p = { .min = 5, .max = 80 },
369         .p1 = { .min = 1, .max = 8 },
370         .p2 = { .dot_limit = 200000,
371                 .p2_slow = 10, .p2_fast = 5 },
372 };
373
374 static const struct intel_limit intel_limits_pineview_lvds = {
375         .dot = { .min = 20000, .max = 400000 },
376         .vco = { .min = 1700000, .max = 3500000 },
377         .n = { .min = 3, .max = 6 },
378         .m = { .min = 2, .max = 256 },
379         .m1 = { .min = 0, .max = 0 },
380         .m2 = { .min = 0, .max = 254 },
381         .p = { .min = 7, .max = 112 },
382         .p1 = { .min = 1, .max = 8 },
383         .p2 = { .dot_limit = 112000,
384                 .p2_slow = 14, .p2_fast = 14 },
385 };
386
387 /* Ironlake / Sandybridge
388  *
389  * We calculate clock using (register_value + 2) for N/M1/M2, so here
390  * the range value for them is (actual_value - 2).
391  */
392 static const struct intel_limit intel_limits_ironlake_dac = {
393         .dot = { .min = 25000, .max = 350000 },
394         .vco = { .min = 1760000, .max = 3510000 },
395         .n = { .min = 1, .max = 5 },
396         .m = { .min = 79, .max = 127 },
397         .m1 = { .min = 12, .max = 22 },
398         .m2 = { .min = 5, .max = 9 },
399         .p = { .min = 5, .max = 80 },
400         .p1 = { .min = 1, .max = 8 },
401         .p2 = { .dot_limit = 225000,
402                 .p2_slow = 10, .p2_fast = 5 },
403 };
404
405 static const struct intel_limit intel_limits_ironlake_single_lvds = {
406         .dot = { .min = 25000, .max = 350000 },
407         .vco = { .min = 1760000, .max = 3510000 },
408         .n = { .min = 1, .max = 3 },
409         .m = { .min = 79, .max = 118 },
410         .m1 = { .min = 12, .max = 22 },
411         .m2 = { .min = 5, .max = 9 },
412         .p = { .min = 28, .max = 112 },
413         .p1 = { .min = 2, .max = 8 },
414         .p2 = { .dot_limit = 225000,
415                 .p2_slow = 14, .p2_fast = 14 },
416 };
417
418 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
419         .dot = { .min = 25000, .max = 350000 },
420         .vco = { .min = 1760000, .max = 3510000 },
421         .n = { .min = 1, .max = 3 },
422         .m = { .min = 79, .max = 127 },
423         .m1 = { .min = 12, .max = 22 },
424         .m2 = { .min = 5, .max = 9 },
425         .p = { .min = 14, .max = 56 },
426         .p1 = { .min = 2, .max = 8 },
427         .p2 = { .dot_limit = 225000,
428                 .p2_slow = 7, .p2_fast = 7 },
429 };
430
431 /* LVDS 100mhz refclk limits. */
432 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
433         .dot = { .min = 25000, .max = 350000 },
434         .vco = { .min = 1760000, .max = 3510000 },
435         .n = { .min = 1, .max = 2 },
436         .m = { .min = 79, .max = 126 },
437         .m1 = { .min = 12, .max = 22 },
438         .m2 = { .min = 5, .max = 9 },
439         .p = { .min = 28, .max = 112 },
440         .p1 = { .min = 2, .max = 8 },
441         .p2 = { .dot_limit = 225000,
442                 .p2_slow = 14, .p2_fast = 14 },
443 };
444
445 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
446         .dot = { .min = 25000, .max = 350000 },
447         .vco = { .min = 1760000, .max = 3510000 },
448         .n = { .min = 1, .max = 3 },
449         .m = { .min = 79, .max = 126 },
450         .m1 = { .min = 12, .max = 22 },
451         .m2 = { .min = 5, .max = 9 },
452         .p = { .min = 14, .max = 42 },
453         .p1 = { .min = 2, .max = 6 },
454         .p2 = { .dot_limit = 225000,
455                 .p2_slow = 7, .p2_fast = 7 },
456 };
457
458 static const struct intel_limit intel_limits_vlv = {
459          /*
460           * These are the data rate limits (measured in fast clocks)
461           * since those are the strictest limits we have. The fast
462           * clock and actual rate limits are more relaxed, so checking
463           * them would make no difference.
464           */
465         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
466         .vco = { .min = 4000000, .max = 6000000 },
467         .n = { .min = 1, .max = 7 },
468         .m1 = { .min = 2, .max = 3 },
469         .m2 = { .min = 11, .max = 156 },
470         .p1 = { .min = 2, .max = 3 },
471         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
472 };
473
474 static const struct intel_limit intel_limits_chv = {
475         /*
476          * These are the data rate limits (measured in fast clocks)
477          * since those are the strictest limits we have.  The fast
478          * clock and actual rate limits are more relaxed, so checking
479          * them would make no difference.
480          */
481         .dot = { .min = 25000 * 5, .max = 540000 * 5},
482         .vco = { .min = 4800000, .max = 6480000 },
483         .n = { .min = 1, .max = 1 },
484         .m1 = { .min = 2, .max = 2 },
485         .m2 = { .min = 24 << 22, .max = 175 << 22 },
486         .p1 = { .min = 2, .max = 4 },
487         .p2 = { .p2_slow = 1, .p2_fast = 14 },
488 };
489
490 static const struct intel_limit intel_limits_bxt = {
491         /* FIXME: find real dot limits */
492         .dot = { .min = 0, .max = INT_MAX },
493         .vco = { .min = 4800000, .max = 6700000 },
494         .n = { .min = 1, .max = 1 },
495         .m1 = { .min = 2, .max = 2 },
496         /* FIXME: find real m2 limits */
497         .m2 = { .min = 2 << 22, .max = 255 << 22 },
498         .p1 = { .min = 2, .max = 4 },
499         .p2 = { .p2_slow = 1, .p2_fast = 20 },
500 };
501
502 /* WA Display #0827: Gen9:all */
503 static void
504 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
505 {
506         if (enable)
507                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
508                            I915_READ(CLKGATE_DIS_PSL(pipe)) |
509                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
510         else
511                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
512                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
513                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
514 }
515
516 /* Wa_2006604312:icl */
517 static void
518 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
519                        bool enable)
520 {
521         if (enable)
522                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
523                            I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
524         else
525                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
526                            I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
527 }
528
529 static bool
530 needs_modeset(const struct intel_crtc_state *state)
531 {
532         return drm_atomic_crtc_needs_modeset(&state->uapi);
533 }
534
535 bool
536 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
537 {
538         return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
539                 crtc_state->sync_mode_slaves_mask);
540 }
541
542 static bool
543 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
544 {
545         return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
546                 crtc_state->sync_mode_slaves_mask);
547 }
548
549 /*
550  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
551  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
552  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
553  * The helpers' return value is the rate of the clock that is fed to the
554  * display engine's pipe which can be the above fast dot clock rate or a
555  * divided-down version of it.
556  */
557 /* m1 is reserved as 0 in Pineview, n is a ring counter */
558 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
559 {
560         clock->m = clock->m2 + 2;
561         clock->p = clock->p1 * clock->p2;
562         if (WARN_ON(clock->n == 0 || clock->p == 0))
563                 return 0;
564         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
565         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
566
567         return clock->dot;
568 }
569
570 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
571 {
572         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
573 }
574
575 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
576 {
577         clock->m = i9xx_dpll_compute_m(clock);
578         clock->p = clock->p1 * clock->p2;
579         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
580                 return 0;
581         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
582         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
583
584         return clock->dot;
585 }
586
587 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
588 {
589         clock->m = clock->m1 * clock->m2;
590         clock->p = clock->p1 * clock->p2;
591         if (WARN_ON(clock->n == 0 || clock->p == 0))
592                 return 0;
593         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
594         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
595
596         return clock->dot / 5;
597 }
598
599 int chv_calc_dpll_params(int refclk, struct dpll *clock)
600 {
601         clock->m = clock->m1 * clock->m2;
602         clock->p = clock->p1 * clock->p2;
603         if (WARN_ON(clock->n == 0 || clock->p == 0))
604                 return 0;
605         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
606                                            clock->n << 22);
607         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
608
609         return clock->dot / 5;
610 }
611
612 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
613
614 /*
615  * Returns whether the given set of divisors are valid for a given refclk with
616  * the given connectors.
617  */
618 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
619                                const struct intel_limit *limit,
620                                const struct dpll *clock)
621 {
622         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
623                 INTELPllInvalid("n out of range\n");
624         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
625                 INTELPllInvalid("p1 out of range\n");
626         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
627                 INTELPllInvalid("m2 out of range\n");
628         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
629                 INTELPllInvalid("m1 out of range\n");
630
631         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
632             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
633                 if (clock->m1 <= clock->m2)
634                         INTELPllInvalid("m1 <= m2\n");
635
636         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
637             !IS_GEN9_LP(dev_priv)) {
638                 if (clock->p < limit->p.min || limit->p.max < clock->p)
639                         INTELPllInvalid("p out of range\n");
640                 if (clock->m < limit->m.min || limit->m.max < clock->m)
641                         INTELPllInvalid("m out of range\n");
642         }
643
644         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
645                 INTELPllInvalid("vco out of range\n");
646         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
647          * connector, etc., rather than just a single range.
648          */
649         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
650                 INTELPllInvalid("dot out of range\n");
651
652         return true;
653 }
654
655 static int
656 i9xx_select_p2_div(const struct intel_limit *limit,
657                    const struct intel_crtc_state *crtc_state,
658                    int target)
659 {
660         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
661
662         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
663                 /*
664                  * For LVDS just rely on its current settings for dual-channel.
665                  * We haven't figured out how to reliably set up different
666                  * single/dual channel state, if we even can.
667                  */
668                 if (intel_is_dual_link_lvds(dev_priv))
669                         return limit->p2.p2_fast;
670                 else
671                         return limit->p2.p2_slow;
672         } else {
673                 if (target < limit->p2.dot_limit)
674                         return limit->p2.p2_slow;
675                 else
676                         return limit->p2.p2_fast;
677         }
678 }
679
680 /*
681  * Returns a set of divisors for the desired target clock with the given
682  * refclk, or FALSE.  The returned values represent the clock equation:
683  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
684  *
685  * Target and reference clocks are specified in kHz.
686  *
687  * If match_clock is provided, then best_clock P divider must match the P
688  * divider from @match_clock used for LVDS downclocking.
689  */
690 static bool
691 i9xx_find_best_dpll(const struct intel_limit *limit,
692                     struct intel_crtc_state *crtc_state,
693                     int target, int refclk, struct dpll *match_clock,
694                     struct dpll *best_clock)
695 {
696         struct drm_device *dev = crtc_state->uapi.crtc->dev;
697         struct dpll clock;
698         int err = target;
699
700         memset(best_clock, 0, sizeof(*best_clock));
701
702         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
703
704         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
705              clock.m1++) {
706                 for (clock.m2 = limit->m2.min;
707                      clock.m2 <= limit->m2.max; clock.m2++) {
708                         if (clock.m2 >= clock.m1)
709                                 break;
710                         for (clock.n = limit->n.min;
711                              clock.n <= limit->n.max; clock.n++) {
712                                 for (clock.p1 = limit->p1.min;
713                                         clock.p1 <= limit->p1.max; clock.p1++) {
714                                         int this_err;
715
716                                         i9xx_calc_dpll_params(refclk, &clock);
717                                         if (!intel_PLL_is_valid(to_i915(dev),
718                                                                 limit,
719                                                                 &clock))
720                                                 continue;
721                                         if (match_clock &&
722                                             clock.p != match_clock->p)
723                                                 continue;
724
725                                         this_err = abs(clock.dot - target);
726                                         if (this_err < err) {
727                                                 *best_clock = clock;
728                                                 err = this_err;
729                                         }
730                                 }
731                         }
732                 }
733         }
734
735         return (err != target);
736 }
737
738 /*
739  * Returns a set of divisors for the desired target clock with the given
740  * refclk, or FALSE.  The returned values represent the clock equation:
741  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
742  *
743  * Target and reference clocks are specified in kHz.
744  *
745  * If match_clock is provided, then best_clock P divider must match the P
746  * divider from @match_clock used for LVDS downclocking.
747  */
748 static bool
749 pnv_find_best_dpll(const struct intel_limit *limit,
750                    struct intel_crtc_state *crtc_state,
751                    int target, int refclk, struct dpll *match_clock,
752                    struct dpll *best_clock)
753 {
754         struct drm_device *dev = crtc_state->uapi.crtc->dev;
755         struct dpll clock;
756         int err = target;
757
758         memset(best_clock, 0, sizeof(*best_clock));
759
760         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
761
762         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
763              clock.m1++) {
764                 for (clock.m2 = limit->m2.min;
765                      clock.m2 <= limit->m2.max; clock.m2++) {
766                         for (clock.n = limit->n.min;
767                              clock.n <= limit->n.max; clock.n++) {
768                                 for (clock.p1 = limit->p1.min;
769                                         clock.p1 <= limit->p1.max; clock.p1++) {
770                                         int this_err;
771
772                                         pnv_calc_dpll_params(refclk, &clock);
773                                         if (!intel_PLL_is_valid(to_i915(dev),
774                                                                 limit,
775                                                                 &clock))
776                                                 continue;
777                                         if (match_clock &&
778                                             clock.p != match_clock->p)
779                                                 continue;
780
781                                         this_err = abs(clock.dot - target);
782                                         if (this_err < err) {
783                                                 *best_clock = clock;
784                                                 err = this_err;
785                                         }
786                                 }
787                         }
788                 }
789         }
790
791         return (err != target);
792 }
793
794 /*
795  * Returns a set of divisors for the desired target clock with the given
796  * refclk, or FALSE.  The returned values represent the clock equation:
797  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
798  *
799  * Target and reference clocks are specified in kHz.
800  *
801  * If match_clock is provided, then best_clock P divider must match the P
802  * divider from @match_clock used for LVDS downclocking.
803  */
804 static bool
805 g4x_find_best_dpll(const struct intel_limit *limit,
806                    struct intel_crtc_state *crtc_state,
807                    int target, int refclk, struct dpll *match_clock,
808                    struct dpll *best_clock)
809 {
810         struct drm_device *dev = crtc_state->uapi.crtc->dev;
811         struct dpll clock;
812         int max_n;
813         bool found = false;
814         /* approximately equals target * 0.00585 */
815         int err_most = (target >> 8) + (target >> 9);
816
817         memset(best_clock, 0, sizeof(*best_clock));
818
819         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
820
821         max_n = limit->n.max;
822         /* based on hardware requirement, prefer smaller n to precision */
823         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
824                 /* based on hardware requirement, prefere larger m1,m2 */
825                 for (clock.m1 = limit->m1.max;
826                      clock.m1 >= limit->m1.min; clock.m1--) {
827                         for (clock.m2 = limit->m2.max;
828                              clock.m2 >= limit->m2.min; clock.m2--) {
829                                 for (clock.p1 = limit->p1.max;
830                                      clock.p1 >= limit->p1.min; clock.p1--) {
831                                         int this_err;
832
833                                         i9xx_calc_dpll_params(refclk, &clock);
834                                         if (!intel_PLL_is_valid(to_i915(dev),
835                                                                 limit,
836                                                                 &clock))
837                                                 continue;
838
839                                         this_err = abs(clock.dot - target);
840                                         if (this_err < err_most) {
841                                                 *best_clock = clock;
842                                                 err_most = this_err;
843                                                 max_n = clock.n;
844                                                 found = true;
845                                         }
846                                 }
847                         }
848                 }
849         }
850         return found;
851 }
852
853 /*
854  * Check if the calculated PLL configuration is more optimal compared to the
855  * best configuration and error found so far. Return the calculated error.
856  */
857 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
858                                const struct dpll *calculated_clock,
859                                const struct dpll *best_clock,
860                                unsigned int best_error_ppm,
861                                unsigned int *error_ppm)
862 {
863         /*
864          * For CHV ignore the error and consider only the P value.
865          * Prefer a bigger P value based on HW requirements.
866          */
867         if (IS_CHERRYVIEW(to_i915(dev))) {
868                 *error_ppm = 0;
869
870                 return calculated_clock->p > best_clock->p;
871         }
872
873         if (WARN_ON_ONCE(!target_freq))
874                 return false;
875
876         *error_ppm = div_u64(1000000ULL *
877                                 abs(target_freq - calculated_clock->dot),
878                              target_freq);
879         /*
880          * Prefer a better P value over a better (smaller) error if the error
881          * is small. Ensure this preference for future configurations too by
882          * setting the error to 0.
883          */
884         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
885                 *error_ppm = 0;
886
887                 return true;
888         }
889
890         return *error_ppm + 10 < best_error_ppm;
891 }
892
893 /*
894  * Returns a set of divisors for the desired target clock with the given
895  * refclk, or FALSE.  The returned values represent the clock equation:
896  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
897  */
898 static bool
899 vlv_find_best_dpll(const struct intel_limit *limit,
900                    struct intel_crtc_state *crtc_state,
901                    int target, int refclk, struct dpll *match_clock,
902                    struct dpll *best_clock)
903 {
904         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
905         struct drm_device *dev = crtc->base.dev;
906         struct dpll clock;
907         unsigned int bestppm = 1000000;
908         /* min update 19.2 MHz */
909         int max_n = min(limit->n.max, refclk / 19200);
910         bool found = false;
911
912         target *= 5; /* fast clock */
913
914         memset(best_clock, 0, sizeof(*best_clock));
915
916         /* based on hardware requirement, prefer smaller n to precision */
917         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
918                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
919                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
920                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
921                                 clock.p = clock.p1 * clock.p2;
922                                 /* based on hardware requirement, prefer bigger m1,m2 values */
923                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
924                                         unsigned int ppm;
925
926                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
927                                                                      refclk * clock.m1);
928
929                                         vlv_calc_dpll_params(refclk, &clock);
930
931                                         if (!intel_PLL_is_valid(to_i915(dev),
932                                                                 limit,
933                                                                 &clock))
934                                                 continue;
935
936                                         if (!vlv_PLL_is_optimal(dev, target,
937                                                                 &clock,
938                                                                 best_clock,
939                                                                 bestppm, &ppm))
940                                                 continue;
941
942                                         *best_clock = clock;
943                                         bestppm = ppm;
944                                         found = true;
945                                 }
946                         }
947                 }
948         }
949
950         return found;
951 }
952
953 /*
954  * Returns a set of divisors for the desired target clock with the given
955  * refclk, or FALSE.  The returned values represent the clock equation:
956  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
957  */
958 static bool
959 chv_find_best_dpll(const struct intel_limit *limit,
960                    struct intel_crtc_state *crtc_state,
961                    int target, int refclk, struct dpll *match_clock,
962                    struct dpll *best_clock)
963 {
964         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
965         struct drm_device *dev = crtc->base.dev;
966         unsigned int best_error_ppm;
967         struct dpll clock;
968         u64 m2;
969         int found = false;
970
971         memset(best_clock, 0, sizeof(*best_clock));
972         best_error_ppm = 1000000;
973
974         /*
975          * Based on hardware doc, the n always set to 1, and m1 always
976          * set to 2.  If requires to support 200Mhz refclk, we need to
977          * revisit this because n may not 1 anymore.
978          */
979         clock.n = 1, clock.m1 = 2;
980         target *= 5;    /* fast clock */
981
982         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
983                 for (clock.p2 = limit->p2.p2_fast;
984                                 clock.p2 >= limit->p2.p2_slow;
985                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
986                         unsigned int error_ppm;
987
988                         clock.p = clock.p1 * clock.p2;
989
990                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
991                                                    refclk * clock.m1);
992
993                         if (m2 > INT_MAX/clock.m1)
994                                 continue;
995
996                         clock.m2 = m2;
997
998                         chv_calc_dpll_params(refclk, &clock);
999
1000                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1001                                 continue;
1002
1003                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1004                                                 best_error_ppm, &error_ppm))
1005                                 continue;
1006
1007                         *best_clock = clock;
1008                         best_error_ppm = error_ppm;
1009                         found = true;
1010                 }
1011         }
1012
1013         return found;
1014 }
1015
1016 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1017                         struct dpll *best_clock)
1018 {
1019         int refclk = 100000;
1020         const struct intel_limit *limit = &intel_limits_bxt;
1021
1022         return chv_find_best_dpll(limit, crtc_state,
1023                                   crtc_state->port_clock, refclk,
1024                                   NULL, best_clock);
1025 }
1026
1027 bool intel_crtc_active(struct intel_crtc *crtc)
1028 {
1029         /* Be paranoid as we can arrive here with only partial
1030          * state retrieved from the hardware during setup.
1031          *
1032          * We can ditch the adjusted_mode.crtc_clock check as soon
1033          * as Haswell has gained clock readout/fastboot support.
1034          *
1035          * We can ditch the crtc->primary->state->fb check as soon as we can
1036          * properly reconstruct framebuffers.
1037          *
1038          * FIXME: The intel_crtc->active here should be switched to
1039          * crtc->state->active once we have proper CRTC states wired up
1040          * for atomic.
1041          */
1042         return crtc->active && crtc->base.primary->state->fb &&
1043                 crtc->config->hw.adjusted_mode.crtc_clock;
1044 }
1045
1046 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1047                                              enum pipe pipe)
1048 {
1049         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1050
1051         return crtc->config->cpu_transcoder;
1052 }
1053
1054 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1055                                     enum pipe pipe)
1056 {
1057         i915_reg_t reg = PIPEDSL(pipe);
1058         u32 line1, line2;
1059         u32 line_mask;
1060
1061         if (IS_GEN(dev_priv, 2))
1062                 line_mask = DSL_LINEMASK_GEN2;
1063         else
1064                 line_mask = DSL_LINEMASK_GEN3;
1065
1066         line1 = I915_READ(reg) & line_mask;
1067         msleep(5);
1068         line2 = I915_READ(reg) & line_mask;
1069
1070         return line1 != line2;
1071 }
1072
1073 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1074 {
1075         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1076         enum pipe pipe = crtc->pipe;
1077
1078         /* Wait for the display line to settle/start moving */
1079         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1080                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1081                           pipe_name(pipe), onoff(state));
1082 }
1083
1084 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1085 {
1086         wait_for_pipe_scanline_moving(crtc, false);
1087 }
1088
1089 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1090 {
1091         wait_for_pipe_scanline_moving(crtc, true);
1092 }
1093
1094 static void
1095 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1096 {
1097         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1098         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1099
1100         if (INTEL_GEN(dev_priv) >= 4) {
1101                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1102                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1103
1104                 /* Wait for the Pipe State to go off */
1105                 if (intel_de_wait_for_clear(dev_priv, reg,
1106                                             I965_PIPECONF_ACTIVE, 100))
1107                         WARN(1, "pipe_off wait timed out\n");
1108         } else {
1109                 intel_wait_for_pipe_scanline_stopped(crtc);
1110         }
1111 }
1112
1113 /* Only for pre-ILK configs */
1114 void assert_pll(struct drm_i915_private *dev_priv,
1115                 enum pipe pipe, bool state)
1116 {
1117         u32 val;
1118         bool cur_state;
1119
1120         val = I915_READ(DPLL(pipe));
1121         cur_state = !!(val & DPLL_VCO_ENABLE);
1122         I915_STATE_WARN(cur_state != state,
1123              "PLL state assertion failure (expected %s, current %s)\n",
1124                         onoff(state), onoff(cur_state));
1125 }
1126
1127 /* XXX: the dsi pll is shared between MIPI DSI ports */
1128 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1129 {
1130         u32 val;
1131         bool cur_state;
1132
1133         vlv_cck_get(dev_priv);
1134         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1135         vlv_cck_put(dev_priv);
1136
1137         cur_state = val & DSI_PLL_VCO_EN;
1138         I915_STATE_WARN(cur_state != state,
1139              "DSI PLL state assertion failure (expected %s, current %s)\n",
1140                         onoff(state), onoff(cur_state));
1141 }
1142
1143 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1144                           enum pipe pipe, bool state)
1145 {
1146         bool cur_state;
1147         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1148                                                                       pipe);
1149
1150         if (HAS_DDI(dev_priv)) {
1151                 /* DDI does not have a specific FDI_TX register */
1152                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1153                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1154         } else {
1155                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1156                 cur_state = !!(val & FDI_TX_ENABLE);
1157         }
1158         I915_STATE_WARN(cur_state != state,
1159              "FDI TX state assertion failure (expected %s, current %s)\n",
1160                         onoff(state), onoff(cur_state));
1161 }
1162 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1163 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1164
1165 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1166                           enum pipe pipe, bool state)
1167 {
1168         u32 val;
1169         bool cur_state;
1170
1171         val = I915_READ(FDI_RX_CTL(pipe));
1172         cur_state = !!(val & FDI_RX_ENABLE);
1173         I915_STATE_WARN(cur_state != state,
1174              "FDI RX state assertion failure (expected %s, current %s)\n",
1175                         onoff(state), onoff(cur_state));
1176 }
1177 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1178 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1179
1180 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1181                                       enum pipe pipe)
1182 {
1183         u32 val;
1184
1185         /* ILK FDI PLL is always enabled */
1186         if (IS_GEN(dev_priv, 5))
1187                 return;
1188
1189         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1190         if (HAS_DDI(dev_priv))
1191                 return;
1192
1193         val = I915_READ(FDI_TX_CTL(pipe));
1194         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1195 }
1196
1197 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1198                        enum pipe pipe, bool state)
1199 {
1200         u32 val;
1201         bool cur_state;
1202
1203         val = I915_READ(FDI_RX_CTL(pipe));
1204         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1205         I915_STATE_WARN(cur_state != state,
1206              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1207                         onoff(state), onoff(cur_state));
1208 }
1209
1210 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1211 {
1212         i915_reg_t pp_reg;
1213         u32 val;
1214         enum pipe panel_pipe = INVALID_PIPE;
1215         bool locked = true;
1216
1217         if (WARN_ON(HAS_DDI(dev_priv)))
1218                 return;
1219
1220         if (HAS_PCH_SPLIT(dev_priv)) {
1221                 u32 port_sel;
1222
1223                 pp_reg = PP_CONTROL(0);
1224                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1225
1226                 switch (port_sel) {
1227                 case PANEL_PORT_SELECT_LVDS:
1228                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1229                         break;
1230                 case PANEL_PORT_SELECT_DPA:
1231                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1232                         break;
1233                 case PANEL_PORT_SELECT_DPC:
1234                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1235                         break;
1236                 case PANEL_PORT_SELECT_DPD:
1237                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1238                         break;
1239                 default:
1240                         MISSING_CASE(port_sel);
1241                         break;
1242                 }
1243         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1244                 /* presumably write lock depends on pipe, not port select */
1245                 pp_reg = PP_CONTROL(pipe);
1246                 panel_pipe = pipe;
1247         } else {
1248                 u32 port_sel;
1249
1250                 pp_reg = PP_CONTROL(0);
1251                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1252
1253                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1254                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1255         }
1256
1257         val = I915_READ(pp_reg);
1258         if (!(val & PANEL_POWER_ON) ||
1259             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1260                 locked = false;
1261
1262         I915_STATE_WARN(panel_pipe == pipe && locked,
1263              "panel assertion failure, pipe %c regs locked\n",
1264              pipe_name(pipe));
1265 }
1266
1267 void assert_pipe(struct drm_i915_private *dev_priv,
1268                  enum pipe pipe, bool state)
1269 {
1270         bool cur_state;
1271         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1272                                                                       pipe);
1273         enum intel_display_power_domain power_domain;
1274         intel_wakeref_t wakeref;
1275
1276         /* we keep both pipes enabled on 830 */
1277         if (IS_I830(dev_priv))
1278                 state = true;
1279
1280         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1281         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1282         if (wakeref) {
1283                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1284                 cur_state = !!(val & PIPECONF_ENABLE);
1285
1286                 intel_display_power_put(dev_priv, power_domain, wakeref);
1287         } else {
1288                 cur_state = false;
1289         }
1290
1291         I915_STATE_WARN(cur_state != state,
1292              "pipe %c assertion failure (expected %s, current %s)\n",
1293                         pipe_name(pipe), onoff(state), onoff(cur_state));
1294 }
1295
1296 static void assert_plane(struct intel_plane *plane, bool state)
1297 {
1298         enum pipe pipe;
1299         bool cur_state;
1300
1301         cur_state = plane->get_hw_state(plane, &pipe);
1302
1303         I915_STATE_WARN(cur_state != state,
1304                         "%s assertion failure (expected %s, current %s)\n",
1305                         plane->base.name, onoff(state), onoff(cur_state));
1306 }
1307
1308 #define assert_plane_enabled(p) assert_plane(p, true)
1309 #define assert_plane_disabled(p) assert_plane(p, false)
1310
1311 static void assert_planes_disabled(struct intel_crtc *crtc)
1312 {
1313         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1314         struct intel_plane *plane;
1315
1316         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1317                 assert_plane_disabled(plane);
1318 }
1319
1320 static void assert_vblank_disabled(struct drm_crtc *crtc)
1321 {
1322         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1323                 drm_crtc_vblank_put(crtc);
1324 }
1325
1326 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1327                                     enum pipe pipe)
1328 {
1329         u32 val;
1330         bool enabled;
1331
1332         val = I915_READ(PCH_TRANSCONF(pipe));
1333         enabled = !!(val & TRANS_ENABLE);
1334         I915_STATE_WARN(enabled,
1335              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1336              pipe_name(pipe));
1337 }
1338
1339 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1340                                    enum pipe pipe, enum port port,
1341                                    i915_reg_t dp_reg)
1342 {
1343         enum pipe port_pipe;
1344         bool state;
1345
1346         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1347
1348         I915_STATE_WARN(state && port_pipe == pipe,
1349                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1350                         port_name(port), pipe_name(pipe));
1351
1352         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1353                         "IBX PCH DP %c still using transcoder B\n",
1354                         port_name(port));
1355 }
1356
1357 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1358                                      enum pipe pipe, enum port port,
1359                                      i915_reg_t hdmi_reg)
1360 {
1361         enum pipe port_pipe;
1362         bool state;
1363
1364         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1365
1366         I915_STATE_WARN(state && port_pipe == pipe,
1367                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1368                         port_name(port), pipe_name(pipe));
1369
1370         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1371                         "IBX PCH HDMI %c still using transcoder B\n",
1372                         port_name(port));
1373 }
1374
1375 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1376                                       enum pipe pipe)
1377 {
1378         enum pipe port_pipe;
1379
1380         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1381         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1382         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1383
1384         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1385                         port_pipe == pipe,
1386                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1387                         pipe_name(pipe));
1388
1389         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1390                         port_pipe == pipe,
1391                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1392                         pipe_name(pipe));
1393
1394         /* PCH SDVOB multiplex with HDMIB */
1395         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1396         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1397         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1398 }
1399
1400 static void _vlv_enable_pll(struct intel_crtc *crtc,
1401                             const struct intel_crtc_state *pipe_config)
1402 {
1403         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1404         enum pipe pipe = crtc->pipe;
1405
1406         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1407         POSTING_READ(DPLL(pipe));
1408         udelay(150);
1409
1410         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1411                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1412 }
1413
1414 static void vlv_enable_pll(struct intel_crtc *crtc,
1415                            const struct intel_crtc_state *pipe_config)
1416 {
1417         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1418         enum pipe pipe = crtc->pipe;
1419
1420         assert_pipe_disabled(dev_priv, pipe);
1421
1422         /* PLL is protected by panel, make sure we can write it */
1423         assert_panel_unlocked(dev_priv, pipe);
1424
1425         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1426                 _vlv_enable_pll(crtc, pipe_config);
1427
1428         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1429         POSTING_READ(DPLL_MD(pipe));
1430 }
1431
1432
1433 static void _chv_enable_pll(struct intel_crtc *crtc,
1434                             const struct intel_crtc_state *pipe_config)
1435 {
1436         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1437         enum pipe pipe = crtc->pipe;
1438         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1439         u32 tmp;
1440
1441         vlv_dpio_get(dev_priv);
1442
1443         /* Enable back the 10bit clock to display controller */
1444         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1445         tmp |= DPIO_DCLKP_EN;
1446         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1447
1448         vlv_dpio_put(dev_priv);
1449
1450         /*
1451          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1452          */
1453         udelay(1);
1454
1455         /* Enable PLL */
1456         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1457
1458         /* Check PLL is locked */
1459         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1460                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1461 }
1462
1463 static void chv_enable_pll(struct intel_crtc *crtc,
1464                            const struct intel_crtc_state *pipe_config)
1465 {
1466         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1467         enum pipe pipe = crtc->pipe;
1468
1469         assert_pipe_disabled(dev_priv, pipe);
1470
1471         /* PLL is protected by panel, make sure we can write it */
1472         assert_panel_unlocked(dev_priv, pipe);
1473
1474         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1475                 _chv_enable_pll(crtc, pipe_config);
1476
1477         if (pipe != PIPE_A) {
1478                 /*
1479                  * WaPixelRepeatModeFixForC0:chv
1480                  *
1481                  * DPLLCMD is AWOL. Use chicken bits to propagate
1482                  * the value from DPLLBMD to either pipe B or C.
1483                  */
1484                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1485                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1486                 I915_WRITE(CBR4_VLV, 0);
1487                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1488
1489                 /*
1490                  * DPLLB VGA mode also seems to cause problems.
1491                  * We should always have it disabled.
1492                  */
1493                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1494         } else {
1495                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1496                 POSTING_READ(DPLL_MD(pipe));
1497         }
1498 }
1499
1500 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1501 {
1502         if (IS_I830(dev_priv))
1503                 return false;
1504
1505         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1506 }
1507
1508 static void i9xx_enable_pll(struct intel_crtc *crtc,
1509                             const struct intel_crtc_state *crtc_state)
1510 {
1511         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1512         i915_reg_t reg = DPLL(crtc->pipe);
1513         u32 dpll = crtc_state->dpll_hw_state.dpll;
1514         int i;
1515
1516         assert_pipe_disabled(dev_priv, crtc->pipe);
1517
1518         /* PLL is protected by panel, make sure we can write it */
1519         if (i9xx_has_pps(dev_priv))
1520                 assert_panel_unlocked(dev_priv, crtc->pipe);
1521
1522         /*
1523          * Apparently we need to have VGA mode enabled prior to changing
1524          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1525          * dividers, even though the register value does change.
1526          */
1527         I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1528         I915_WRITE(reg, dpll);
1529
1530         /* Wait for the clocks to stabilize. */
1531         POSTING_READ(reg);
1532         udelay(150);
1533
1534         if (INTEL_GEN(dev_priv) >= 4) {
1535                 I915_WRITE(DPLL_MD(crtc->pipe),
1536                            crtc_state->dpll_hw_state.dpll_md);
1537         } else {
1538                 /* The pixel multiplier can only be updated once the
1539                  * DPLL is enabled and the clocks are stable.
1540                  *
1541                  * So write it again.
1542                  */
1543                 I915_WRITE(reg, dpll);
1544         }
1545
1546         /* We do this three times for luck */
1547         for (i = 0; i < 3; i++) {
1548                 I915_WRITE(reg, dpll);
1549                 POSTING_READ(reg);
1550                 udelay(150); /* wait for warmup */
1551         }
1552 }
1553
1554 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1555 {
1556         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1557         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1558         enum pipe pipe = crtc->pipe;
1559
1560         /* Don't disable pipe or pipe PLLs if needed */
1561         if (IS_I830(dev_priv))
1562                 return;
1563
1564         /* Make sure the pipe isn't still relying on us */
1565         assert_pipe_disabled(dev_priv, pipe);
1566
1567         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1568         POSTING_READ(DPLL(pipe));
1569 }
1570
1571 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1572 {
1573         u32 val;
1574
1575         /* Make sure the pipe isn't still relying on us */
1576         assert_pipe_disabled(dev_priv, pipe);
1577
1578         val = DPLL_INTEGRATED_REF_CLK_VLV |
1579                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1580         if (pipe != PIPE_A)
1581                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1582
1583         I915_WRITE(DPLL(pipe), val);
1584         POSTING_READ(DPLL(pipe));
1585 }
1586
1587 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1588 {
1589         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1590         u32 val;
1591
1592         /* Make sure the pipe isn't still relying on us */
1593         assert_pipe_disabled(dev_priv, pipe);
1594
1595         val = DPLL_SSC_REF_CLK_CHV |
1596                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1597         if (pipe != PIPE_A)
1598                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1599
1600         I915_WRITE(DPLL(pipe), val);
1601         POSTING_READ(DPLL(pipe));
1602
1603         vlv_dpio_get(dev_priv);
1604
1605         /* Disable 10bit clock to display controller */
1606         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1607         val &= ~DPIO_DCLKP_EN;
1608         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1609
1610         vlv_dpio_put(dev_priv);
1611 }
1612
1613 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1614                          struct intel_digital_port *dport,
1615                          unsigned int expected_mask)
1616 {
1617         u32 port_mask;
1618         i915_reg_t dpll_reg;
1619
1620         switch (dport->base.port) {
1621         case PORT_B:
1622                 port_mask = DPLL_PORTB_READY_MASK;
1623                 dpll_reg = DPLL(0);
1624                 break;
1625         case PORT_C:
1626                 port_mask = DPLL_PORTC_READY_MASK;
1627                 dpll_reg = DPLL(0);
1628                 expected_mask <<= 4;
1629                 break;
1630         case PORT_D:
1631                 port_mask = DPLL_PORTD_READY_MASK;
1632                 dpll_reg = DPIO_PHY_STATUS;
1633                 break;
1634         default:
1635                 BUG();
1636         }
1637
1638         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1639                                        port_mask, expected_mask, 1000))
1640                 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1641                      dport->base.base.base.id, dport->base.base.name,
1642                      I915_READ(dpll_reg) & port_mask, expected_mask);
1643 }
1644
1645 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1646 {
1647         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1648         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1649         enum pipe pipe = crtc->pipe;
1650         i915_reg_t reg;
1651         u32 val, pipeconf_val;
1652
1653         /* Make sure PCH DPLL is enabled */
1654         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1655
1656         /* FDI must be feeding us bits for PCH ports */
1657         assert_fdi_tx_enabled(dev_priv, pipe);
1658         assert_fdi_rx_enabled(dev_priv, pipe);
1659
1660         if (HAS_PCH_CPT(dev_priv)) {
1661                 /* Workaround: Set the timing override bit before enabling the
1662                  * pch transcoder. */
1663                 reg = TRANS_CHICKEN2(pipe);
1664                 val = I915_READ(reg);
1665                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1666                 I915_WRITE(reg, val);
1667         }
1668
1669         reg = PCH_TRANSCONF(pipe);
1670         val = I915_READ(reg);
1671         pipeconf_val = I915_READ(PIPECONF(pipe));
1672
1673         if (HAS_PCH_IBX(dev_priv)) {
1674                 /*
1675                  * Make the BPC in transcoder be consistent with
1676                  * that in pipeconf reg. For HDMI we must use 8bpc
1677                  * here for both 8bpc and 12bpc.
1678                  */
1679                 val &= ~PIPECONF_BPC_MASK;
1680                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1681                         val |= PIPECONF_8BPC;
1682                 else
1683                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1684         }
1685
1686         val &= ~TRANS_INTERLACE_MASK;
1687         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1688                 if (HAS_PCH_IBX(dev_priv) &&
1689                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1690                         val |= TRANS_LEGACY_INTERLACED_ILK;
1691                 else
1692                         val |= TRANS_INTERLACED;
1693         } else {
1694                 val |= TRANS_PROGRESSIVE;
1695         }
1696
1697         I915_WRITE(reg, val | TRANS_ENABLE);
1698         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1699                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1700 }
1701
1702 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1703                                       enum transcoder cpu_transcoder)
1704 {
1705         u32 val, pipeconf_val;
1706
1707         /* FDI must be feeding us bits for PCH ports */
1708         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1709         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1710
1711         /* Workaround: set timing override bit. */
1712         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1713         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1714         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1715
1716         val = TRANS_ENABLE;
1717         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1718
1719         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1720             PIPECONF_INTERLACED_ILK)
1721                 val |= TRANS_INTERLACED;
1722         else
1723                 val |= TRANS_PROGRESSIVE;
1724
1725         I915_WRITE(LPT_TRANSCONF, val);
1726         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1727                                   TRANS_STATE_ENABLE, 100))
1728                 DRM_ERROR("Failed to enable PCH transcoder\n");
1729 }
1730
1731 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1732                                             enum pipe pipe)
1733 {
1734         i915_reg_t reg;
1735         u32 val;
1736
1737         /* FDI relies on the transcoder */
1738         assert_fdi_tx_disabled(dev_priv, pipe);
1739         assert_fdi_rx_disabled(dev_priv, pipe);
1740
1741         /* Ports must be off as well */
1742         assert_pch_ports_disabled(dev_priv, pipe);
1743
1744         reg = PCH_TRANSCONF(pipe);
1745         val = I915_READ(reg);
1746         val &= ~TRANS_ENABLE;
1747         I915_WRITE(reg, val);
1748         /* wait for PCH transcoder off, transcoder state */
1749         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1750                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1751
1752         if (HAS_PCH_CPT(dev_priv)) {
1753                 /* Workaround: Clear the timing override chicken bit again. */
1754                 reg = TRANS_CHICKEN2(pipe);
1755                 val = I915_READ(reg);
1756                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1757                 I915_WRITE(reg, val);
1758         }
1759 }
1760
1761 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1762 {
1763         u32 val;
1764
1765         val = I915_READ(LPT_TRANSCONF);
1766         val &= ~TRANS_ENABLE;
1767         I915_WRITE(LPT_TRANSCONF, val);
1768         /* wait for PCH transcoder off, transcoder state */
1769         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1770                                     TRANS_STATE_ENABLE, 50))
1771                 DRM_ERROR("Failed to disable PCH transcoder\n");
1772
1773         /* Workaround: clear timing override bit. */
1774         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1775         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1776         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1777 }
1778
1779 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1780 {
1781         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1782
1783         if (HAS_PCH_LPT(dev_priv))
1784                 return PIPE_A;
1785         else
1786                 return crtc->pipe;
1787 }
1788
1789 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1790 {
1791         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1792
1793         /*
1794          * On i965gm the hardware frame counter reads
1795          * zero when the TV encoder is enabled :(
1796          */
1797         if (IS_I965GM(dev_priv) &&
1798             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1799                 return 0;
1800
1801         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1802                 return 0xffffffff; /* full 32 bit counter */
1803         else if (INTEL_GEN(dev_priv) >= 3)
1804                 return 0xffffff; /* only 24 bits of frame count */
1805         else
1806                 return 0; /* Gen2 doesn't have a hardware frame counter */
1807 }
1808
1809 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1810 {
1811         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1812
1813         drm_crtc_set_max_vblank_count(&crtc->base,
1814                                       intel_crtc_max_vblank_count(crtc_state));
1815         drm_crtc_vblank_on(&crtc->base);
1816 }
1817
1818 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1819 {
1820         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1821         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1822         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1823         enum pipe pipe = crtc->pipe;
1824         i915_reg_t reg;
1825         u32 val;
1826
1827         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1828
1829         assert_planes_disabled(crtc);
1830
1831         /*
1832          * A pipe without a PLL won't actually be able to drive bits from
1833          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1834          * need the check.
1835          */
1836         if (HAS_GMCH(dev_priv)) {
1837                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1838                         assert_dsi_pll_enabled(dev_priv);
1839                 else
1840                         assert_pll_enabled(dev_priv, pipe);
1841         } else {
1842                 if (new_crtc_state->has_pch_encoder) {
1843                         /* if driving the PCH, we need FDI enabled */
1844                         assert_fdi_rx_pll_enabled(dev_priv,
1845                                                   intel_crtc_pch_transcoder(crtc));
1846                         assert_fdi_tx_pll_enabled(dev_priv,
1847                                                   (enum pipe) cpu_transcoder);
1848                 }
1849                 /* FIXME: assert CPU port conditions for SNB+ */
1850         }
1851
1852         trace_intel_pipe_enable(crtc);
1853
1854         reg = PIPECONF(cpu_transcoder);
1855         val = I915_READ(reg);
1856         if (val & PIPECONF_ENABLE) {
1857                 /* we keep both pipes enabled on 830 */
1858                 WARN_ON(!IS_I830(dev_priv));
1859                 return;
1860         }
1861
1862         I915_WRITE(reg, val | PIPECONF_ENABLE);
1863         POSTING_READ(reg);
1864
1865         /*
1866          * Until the pipe starts PIPEDSL reads will return a stale value,
1867          * which causes an apparent vblank timestamp jump when PIPEDSL
1868          * resets to its proper value. That also messes up the frame count
1869          * when it's derived from the timestamps. So let's wait for the
1870          * pipe to start properly before we call drm_crtc_vblank_on()
1871          */
1872         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1873                 intel_wait_for_pipe_scanline_moving(crtc);
1874 }
1875
1876 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1877 {
1878         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1879         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1880         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1881         enum pipe pipe = crtc->pipe;
1882         i915_reg_t reg;
1883         u32 val;
1884
1885         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1886
1887         /*
1888          * Make sure planes won't keep trying to pump pixels to us,
1889          * or we might hang the display.
1890          */
1891         assert_planes_disabled(crtc);
1892
1893         trace_intel_pipe_disable(crtc);
1894
1895         reg = PIPECONF(cpu_transcoder);
1896         val = I915_READ(reg);
1897         if ((val & PIPECONF_ENABLE) == 0)
1898                 return;
1899
1900         /*
1901          * Double wide has implications for planes
1902          * so best keep it disabled when not needed.
1903          */
1904         if (old_crtc_state->double_wide)
1905                 val &= ~PIPECONF_DOUBLE_WIDE;
1906
1907         /* Don't disable pipe or pipe PLLs if needed */
1908         if (!IS_I830(dev_priv))
1909                 val &= ~PIPECONF_ENABLE;
1910
1911         I915_WRITE(reg, val);
1912         if ((val & PIPECONF_ENABLE) == 0)
1913                 intel_wait_for_pipe_off(old_crtc_state);
1914 }
1915
1916 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1917 {
1918         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1919 }
1920
1921 static unsigned int
1922 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1923 {
1924         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1925         unsigned int cpp = fb->format->cpp[color_plane];
1926
1927         switch (fb->modifier) {
1928         case DRM_FORMAT_MOD_LINEAR:
1929                 return intel_tile_size(dev_priv);
1930         case I915_FORMAT_MOD_X_TILED:
1931                 if (IS_GEN(dev_priv, 2))
1932                         return 128;
1933                 else
1934                         return 512;
1935         case I915_FORMAT_MOD_Y_TILED_CCS:
1936                 if (color_plane == 1)
1937                         return 128;
1938                 /* fall through */
1939         case I915_FORMAT_MOD_Y_TILED:
1940                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1941                         return 128;
1942                 else
1943                         return 512;
1944         case I915_FORMAT_MOD_Yf_TILED_CCS:
1945                 if (color_plane == 1)
1946                         return 128;
1947                 /* fall through */
1948         case I915_FORMAT_MOD_Yf_TILED:
1949                 switch (cpp) {
1950                 case 1:
1951                         return 64;
1952                 case 2:
1953                 case 4:
1954                         return 128;
1955                 case 8:
1956                 case 16:
1957                         return 256;
1958                 default:
1959                         MISSING_CASE(cpp);
1960                         return cpp;
1961                 }
1962                 break;
1963         default:
1964                 MISSING_CASE(fb->modifier);
1965                 return cpp;
1966         }
1967 }
1968
1969 static unsigned int
1970 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1971 {
1972         return intel_tile_size(to_i915(fb->dev)) /
1973                 intel_tile_width_bytes(fb, color_plane);
1974 }
1975
1976 /* Return the tile dimensions in pixel units */
1977 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1978                             unsigned int *tile_width,
1979                             unsigned int *tile_height)
1980 {
1981         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1982         unsigned int cpp = fb->format->cpp[color_plane];
1983
1984         *tile_width = tile_width_bytes / cpp;
1985         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1986 }
1987
1988 unsigned int
1989 intel_fb_align_height(const struct drm_framebuffer *fb,
1990                       int color_plane, unsigned int height)
1991 {
1992         unsigned int tile_height = intel_tile_height(fb, color_plane);
1993
1994         return ALIGN(height, tile_height);
1995 }
1996
1997 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1998 {
1999         unsigned int size = 0;
2000         int i;
2001
2002         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2003                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2004
2005         return size;
2006 }
2007
2008 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2009 {
2010         unsigned int size = 0;
2011         int i;
2012
2013         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2014                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2015
2016         return size;
2017 }
2018
2019 static void
2020 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2021                         const struct drm_framebuffer *fb,
2022                         unsigned int rotation)
2023 {
2024         view->type = I915_GGTT_VIEW_NORMAL;
2025         if (drm_rotation_90_or_270(rotation)) {
2026                 view->type = I915_GGTT_VIEW_ROTATED;
2027                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2028         }
2029 }
2030
2031 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2032 {
2033         if (IS_I830(dev_priv))
2034                 return 16 * 1024;
2035         else if (IS_I85X(dev_priv))
2036                 return 256;
2037         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2038                 return 32;
2039         else
2040                 return 4 * 1024;
2041 }
2042
2043 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2044 {
2045         if (INTEL_GEN(dev_priv) >= 9)
2046                 return 256 * 1024;
2047         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2048                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2049                 return 128 * 1024;
2050         else if (INTEL_GEN(dev_priv) >= 4)
2051                 return 4 * 1024;
2052         else
2053                 return 0;
2054 }
2055
2056 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2057                                          int color_plane)
2058 {
2059         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2060
2061         /* AUX_DIST needs only 4K alignment */
2062         if (color_plane == 1)
2063                 return 4096;
2064
2065         switch (fb->modifier) {
2066         case DRM_FORMAT_MOD_LINEAR:
2067                 return intel_linear_alignment(dev_priv);
2068         case I915_FORMAT_MOD_X_TILED:
2069                 if (INTEL_GEN(dev_priv) >= 9)
2070                         return 256 * 1024;
2071                 return 0;
2072         case I915_FORMAT_MOD_Y_TILED_CCS:
2073         case I915_FORMAT_MOD_Yf_TILED_CCS:
2074         case I915_FORMAT_MOD_Y_TILED:
2075         case I915_FORMAT_MOD_Yf_TILED:
2076                 return 1 * 1024 * 1024;
2077         default:
2078                 MISSING_CASE(fb->modifier);
2079                 return 0;
2080         }
2081 }
2082
2083 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2084 {
2085         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2086         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2087
2088         return INTEL_GEN(dev_priv) < 4 ||
2089                 (plane->has_fbc &&
2090                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2091 }
2092
2093 struct i915_vma *
2094 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2095                            const struct i915_ggtt_view *view,
2096                            bool uses_fence,
2097                            unsigned long *out_flags)
2098 {
2099         struct drm_device *dev = fb->dev;
2100         struct drm_i915_private *dev_priv = to_i915(dev);
2101         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2102         intel_wakeref_t wakeref;
2103         struct i915_vma *vma;
2104         unsigned int pinctl;
2105         u32 alignment;
2106
2107         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2108                 return ERR_PTR(-EINVAL);
2109
2110         alignment = intel_surf_alignment(fb, 0);
2111
2112         /* Note that the w/a also requires 64 PTE of padding following the
2113          * bo. We currently fill all unused PTE with the shadow page and so
2114          * we should always have valid PTE following the scanout preventing
2115          * the VT-d warning.
2116          */
2117         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2118                 alignment = 256 * 1024;
2119
2120         /*
2121          * Global gtt pte registers are special registers which actually forward
2122          * writes to a chunk of system memory. Which means that there is no risk
2123          * that the register values disappear as soon as we call
2124          * intel_runtime_pm_put(), so it is correct to wrap only the
2125          * pin/unpin/fence and not more.
2126          */
2127         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2128         i915_gem_object_lock(obj);
2129
2130         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2131
2132         pinctl = 0;
2133
2134         /* Valleyview is definitely limited to scanning out the first
2135          * 512MiB. Lets presume this behaviour was inherited from the
2136          * g4x display engine and that all earlier gen are similarly
2137          * limited. Testing suggests that it is a little more
2138          * complicated than this. For example, Cherryview appears quite
2139          * happy to scanout from anywhere within its global aperture.
2140          */
2141         if (HAS_GMCH(dev_priv))
2142                 pinctl |= PIN_MAPPABLE;
2143
2144         vma = i915_gem_object_pin_to_display_plane(obj,
2145                                                    alignment, view, pinctl);
2146         if (IS_ERR(vma))
2147                 goto err;
2148
2149         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2150                 int ret;
2151
2152                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2153                  * fence, whereas 965+ only requires a fence if using
2154                  * framebuffer compression.  For simplicity, we always, when
2155                  * possible, install a fence as the cost is not that onerous.
2156                  *
2157                  * If we fail to fence the tiled scanout, then either the
2158                  * modeset will reject the change (which is highly unlikely as
2159                  * the affected systems, all but one, do not have unmappable
2160                  * space) or we will not be able to enable full powersaving
2161                  * techniques (also likely not to apply due to various limits
2162                  * FBC and the like impose on the size of the buffer, which
2163                  * presumably we violated anyway with this unmappable buffer).
2164                  * Anyway, it is presumably better to stumble onwards with
2165                  * something and try to run the system in a "less than optimal"
2166                  * mode that matches the user configuration.
2167                  */
2168                 ret = i915_vma_pin_fence(vma);
2169                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2170                         i915_gem_object_unpin_from_display_plane(vma);
2171                         vma = ERR_PTR(ret);
2172                         goto err;
2173                 }
2174
2175                 if (ret == 0 && vma->fence)
2176                         *out_flags |= PLANE_HAS_FENCE;
2177         }
2178
2179         i915_vma_get(vma);
2180 err:
2181         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2182
2183         i915_gem_object_unlock(obj);
2184         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2185         return vma;
2186 }
2187
2188 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2189 {
2190         i915_gem_object_lock(vma->obj);
2191         if (flags & PLANE_HAS_FENCE)
2192                 i915_vma_unpin_fence(vma);
2193         i915_gem_object_unpin_from_display_plane(vma);
2194         i915_gem_object_unlock(vma->obj);
2195
2196         i915_vma_put(vma);
2197 }
2198
2199 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2200                           unsigned int rotation)
2201 {
2202         if (drm_rotation_90_or_270(rotation))
2203                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2204         else
2205                 return fb->pitches[color_plane];
2206 }
2207
2208 /*
2209  * Convert the x/y offsets into a linear offset.
2210  * Only valid with 0/180 degree rotation, which is fine since linear
2211  * offset is only used with linear buffers on pre-hsw and tiled buffers
2212  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2213  */
2214 u32 intel_fb_xy_to_linear(int x, int y,
2215                           const struct intel_plane_state *state,
2216                           int color_plane)
2217 {
2218         const struct drm_framebuffer *fb = state->hw.fb;
2219         unsigned int cpp = fb->format->cpp[color_plane];
2220         unsigned int pitch = state->color_plane[color_plane].stride;
2221
2222         return y * pitch + x * cpp;
2223 }
2224
2225 /*
2226  * Add the x/y offsets derived from fb->offsets[] to the user
2227  * specified plane src x/y offsets. The resulting x/y offsets
2228  * specify the start of scanout from the beginning of the gtt mapping.
2229  */
2230 void intel_add_fb_offsets(int *x, int *y,
2231                           const struct intel_plane_state *state,
2232                           int color_plane)
2233
2234 {
2235         *x += state->color_plane[color_plane].x;
2236         *y += state->color_plane[color_plane].y;
2237 }
2238
2239 static u32 intel_adjust_tile_offset(int *x, int *y,
2240                                     unsigned int tile_width,
2241                                     unsigned int tile_height,
2242                                     unsigned int tile_size,
2243                                     unsigned int pitch_tiles,
2244                                     u32 old_offset,
2245                                     u32 new_offset)
2246 {
2247         unsigned int pitch_pixels = pitch_tiles * tile_width;
2248         unsigned int tiles;
2249
2250         WARN_ON(old_offset & (tile_size - 1));
2251         WARN_ON(new_offset & (tile_size - 1));
2252         WARN_ON(new_offset > old_offset);
2253
2254         tiles = (old_offset - new_offset) / tile_size;
2255
2256         *y += tiles / pitch_tiles * tile_height;
2257         *x += tiles % pitch_tiles * tile_width;
2258
2259         /* minimize x in case it got needlessly big */
2260         *y += *x / pitch_pixels * tile_height;
2261         *x %= pitch_pixels;
2262
2263         return new_offset;
2264 }
2265
2266 static bool is_surface_linear(u64 modifier, int color_plane)
2267 {
2268         return modifier == DRM_FORMAT_MOD_LINEAR;
2269 }
2270
2271 static u32 intel_adjust_aligned_offset(int *x, int *y,
2272                                        const struct drm_framebuffer *fb,
2273                                        int color_plane,
2274                                        unsigned int rotation,
2275                                        unsigned int pitch,
2276                                        u32 old_offset, u32 new_offset)
2277 {
2278         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2279         unsigned int cpp = fb->format->cpp[color_plane];
2280
2281         WARN_ON(new_offset > old_offset);
2282
2283         if (!is_surface_linear(fb->modifier, color_plane)) {
2284                 unsigned int tile_size, tile_width, tile_height;
2285                 unsigned int pitch_tiles;
2286
2287                 tile_size = intel_tile_size(dev_priv);
2288                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2289
2290                 if (drm_rotation_90_or_270(rotation)) {
2291                         pitch_tiles = pitch / tile_height;
2292                         swap(tile_width, tile_height);
2293                 } else {
2294                         pitch_tiles = pitch / (tile_width * cpp);
2295                 }
2296
2297                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2298                                          tile_size, pitch_tiles,
2299                                          old_offset, new_offset);
2300         } else {
2301                 old_offset += *y * pitch + *x * cpp;
2302
2303                 *y = (old_offset - new_offset) / pitch;
2304                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2305         }
2306
2307         return new_offset;
2308 }
2309
2310 /*
2311  * Adjust the tile offset by moving the difference into
2312  * the x/y offsets.
2313  */
2314 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2315                                              const struct intel_plane_state *state,
2316                                              int color_plane,
2317                                              u32 old_offset, u32 new_offset)
2318 {
2319         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2320                                            state->hw.rotation,
2321                                            state->color_plane[color_plane].stride,
2322                                            old_offset, new_offset);
2323 }
2324
2325 /*
2326  * Computes the aligned offset to the base tile and adjusts
2327  * x, y. bytes per pixel is assumed to be a power-of-two.
2328  *
2329  * In the 90/270 rotated case, x and y are assumed
2330  * to be already rotated to match the rotated GTT view, and
2331  * pitch is the tile_height aligned framebuffer height.
2332  *
2333  * This function is used when computing the derived information
2334  * under intel_framebuffer, so using any of that information
2335  * here is not allowed. Anything under drm_framebuffer can be
2336  * used. This is why the user has to pass in the pitch since it
2337  * is specified in the rotated orientation.
2338  */
2339 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2340                                         int *x, int *y,
2341                                         const struct drm_framebuffer *fb,
2342                                         int color_plane,
2343                                         unsigned int pitch,
2344                                         unsigned int rotation,
2345                                         u32 alignment)
2346 {
2347         unsigned int cpp = fb->format->cpp[color_plane];
2348         u32 offset, offset_aligned;
2349
2350         if (alignment)
2351                 alignment--;
2352
2353         if (!is_surface_linear(fb->modifier, color_plane)) {
2354                 unsigned int tile_size, tile_width, tile_height;
2355                 unsigned int tile_rows, tiles, pitch_tiles;
2356
2357                 tile_size = intel_tile_size(dev_priv);
2358                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2359
2360                 if (drm_rotation_90_or_270(rotation)) {
2361                         pitch_tiles = pitch / tile_height;
2362                         swap(tile_width, tile_height);
2363                 } else {
2364                         pitch_tiles = pitch / (tile_width * cpp);
2365                 }
2366
2367                 tile_rows = *y / tile_height;
2368                 *y %= tile_height;
2369
2370                 tiles = *x / tile_width;
2371                 *x %= tile_width;
2372
2373                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2374                 offset_aligned = offset & ~alignment;
2375
2376                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2377                                          tile_size, pitch_tiles,
2378                                          offset, offset_aligned);
2379         } else {
2380                 offset = *y * pitch + *x * cpp;
2381                 offset_aligned = offset & ~alignment;
2382
2383                 *y = (offset & alignment) / pitch;
2384                 *x = ((offset & alignment) - *y * pitch) / cpp;
2385         }
2386
2387         return offset_aligned;
2388 }
2389
2390 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2391                                               const struct intel_plane_state *state,
2392                                               int color_plane)
2393 {
2394         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2395         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2396         const struct drm_framebuffer *fb = state->hw.fb;
2397         unsigned int rotation = state->hw.rotation;
2398         int pitch = state->color_plane[color_plane].stride;
2399         u32 alignment;
2400
2401         if (intel_plane->id == PLANE_CURSOR)
2402                 alignment = intel_cursor_alignment(dev_priv);
2403         else
2404                 alignment = intel_surf_alignment(fb, color_plane);
2405
2406         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2407                                             pitch, rotation, alignment);
2408 }
2409
2410 /* Convert the fb->offset[] into x/y offsets */
2411 static int intel_fb_offset_to_xy(int *x, int *y,
2412                                  const struct drm_framebuffer *fb,
2413                                  int color_plane)
2414 {
2415         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2416         unsigned int height;
2417
2418         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2419             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2420                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2421                               fb->offsets[color_plane], color_plane);
2422                 return -EINVAL;
2423         }
2424
2425         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2426         height = ALIGN(height, intel_tile_height(fb, color_plane));
2427
2428         /* Catch potential overflows early */
2429         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2430                             fb->offsets[color_plane])) {
2431                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2432                               fb->offsets[color_plane], fb->pitches[color_plane],
2433                               color_plane);
2434                 return -ERANGE;
2435         }
2436
2437         *x = 0;
2438         *y = 0;
2439
2440         intel_adjust_aligned_offset(x, y,
2441                                     fb, color_plane, DRM_MODE_ROTATE_0,
2442                                     fb->pitches[color_plane],
2443                                     fb->offsets[color_plane], 0);
2444
2445         return 0;
2446 }
2447
2448 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2449 {
2450         switch (fb_modifier) {
2451         case I915_FORMAT_MOD_X_TILED:
2452                 return I915_TILING_X;
2453         case I915_FORMAT_MOD_Y_TILED:
2454         case I915_FORMAT_MOD_Y_TILED_CCS:
2455                 return I915_TILING_Y;
2456         default:
2457                 return I915_TILING_NONE;
2458         }
2459 }
2460
2461 /*
2462  * From the Sky Lake PRM:
2463  * "The Color Control Surface (CCS) contains the compression status of
2464  *  the cache-line pairs. The compression state of the cache-line pair
2465  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2466  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2467  *  cache-line-pairs. CCS is always Y tiled."
2468  *
2469  * Since cache line pairs refers to horizontally adjacent cache lines,
2470  * each cache line in the CCS corresponds to an area of 32x16 cache
2471  * lines on the main surface. Since each pixel is 4 bytes, this gives
2472  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2473  * main surface.
2474  */
2475 static const struct drm_format_info ccs_formats[] = {
2476         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2477           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2478         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2479           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2480         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2481           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2482         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2483           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2484 };
2485
2486 static const struct drm_format_info *
2487 lookup_format_info(const struct drm_format_info formats[],
2488                    int num_formats, u32 format)
2489 {
2490         int i;
2491
2492         for (i = 0; i < num_formats; i++) {
2493                 if (formats[i].format == format)
2494                         return &formats[i];
2495         }
2496
2497         return NULL;
2498 }
2499
2500 static const struct drm_format_info *
2501 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2502 {
2503         switch (cmd->modifier[0]) {
2504         case I915_FORMAT_MOD_Y_TILED_CCS:
2505         case I915_FORMAT_MOD_Yf_TILED_CCS:
2506                 return lookup_format_info(ccs_formats,
2507                                           ARRAY_SIZE(ccs_formats),
2508                                           cmd->pixel_format);
2509         default:
2510                 return NULL;
2511         }
2512 }
2513
2514 bool is_ccs_modifier(u64 modifier)
2515 {
2516         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2517                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2518 }
2519
2520 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2521                               u32 pixel_format, u64 modifier)
2522 {
2523         struct intel_crtc *crtc;
2524         struct intel_plane *plane;
2525
2526         /*
2527          * We assume the primary plane for pipe A has
2528          * the highest stride limits of them all.
2529          */
2530         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2531         plane = to_intel_plane(crtc->base.primary);
2532
2533         return plane->max_stride(plane, pixel_format, modifier,
2534                                  DRM_MODE_ROTATE_0);
2535 }
2536
2537 static
2538 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2539                         u32 pixel_format, u64 modifier)
2540 {
2541         /*
2542          * Arbitrary limit for gen4+ chosen to match the
2543          * render engine max stride.
2544          *
2545          * The new CCS hash mode makes remapping impossible
2546          */
2547         if (!is_ccs_modifier(modifier)) {
2548                 if (INTEL_GEN(dev_priv) >= 7)
2549                         return 256*1024;
2550                 else if (INTEL_GEN(dev_priv) >= 4)
2551                         return 128*1024;
2552         }
2553
2554         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2555 }
2556
2557 static u32
2558 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2559 {
2560         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2561
2562         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2563                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2564                                                            fb->format->format,
2565                                                            fb->modifier);
2566
2567                 /*
2568                  * To make remapping with linear generally feasible
2569                  * we need the stride to be page aligned.
2570                  */
2571                 if (fb->pitches[color_plane] > max_stride)
2572                         return intel_tile_size(dev_priv);
2573                 else
2574                         return 64;
2575         } else {
2576                 return intel_tile_width_bytes(fb, color_plane);
2577         }
2578 }
2579
2580 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2581 {
2582         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2583         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2584         const struct drm_framebuffer *fb = plane_state->hw.fb;
2585         int i;
2586
2587         /* We don't want to deal with remapping with cursors */
2588         if (plane->id == PLANE_CURSOR)
2589                 return false;
2590
2591         /*
2592          * The display engine limits already match/exceed the
2593          * render engine limits, so not much point in remapping.
2594          * Would also need to deal with the fence POT alignment
2595          * and gen2 2KiB GTT tile size.
2596          */
2597         if (INTEL_GEN(dev_priv) < 4)
2598                 return false;
2599
2600         /*
2601          * The new CCS hash mode isn't compatible with remapping as
2602          * the virtual address of the pages affects the compressed data.
2603          */
2604         if (is_ccs_modifier(fb->modifier))
2605                 return false;
2606
2607         /* Linear needs a page aligned stride for remapping */
2608         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2609                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2610
2611                 for (i = 0; i < fb->format->num_planes; i++) {
2612                         if (fb->pitches[i] & alignment)
2613                                 return false;
2614                 }
2615         }
2616
2617         return true;
2618 }
2619
2620 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2621 {
2622         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2623         const struct drm_framebuffer *fb = plane_state->hw.fb;
2624         unsigned int rotation = plane_state->hw.rotation;
2625         u32 stride, max_stride;
2626
2627         /*
2628          * No remapping for invisible planes since we don't have
2629          * an actual source viewport to remap.
2630          */
2631         if (!plane_state->uapi.visible)
2632                 return false;
2633
2634         if (!intel_plane_can_remap(plane_state))
2635                 return false;
2636
2637         /*
2638          * FIXME: aux plane limits on gen9+ are
2639          * unclear in Bspec, for now no checking.
2640          */
2641         stride = intel_fb_pitch(fb, 0, rotation);
2642         max_stride = plane->max_stride(plane, fb->format->format,
2643                                        fb->modifier, rotation);
2644
2645         return stride > max_stride;
2646 }
2647
2648 static int
2649 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2650                    struct drm_framebuffer *fb)
2651 {
2652         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2653         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2654         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2655         u32 gtt_offset_rotated = 0;
2656         unsigned int max_size = 0;
2657         int i, num_planes = fb->format->num_planes;
2658         unsigned int tile_size = intel_tile_size(dev_priv);
2659
2660         for (i = 0; i < num_planes; i++) {
2661                 unsigned int width, height;
2662                 unsigned int cpp, size;
2663                 u32 offset;
2664                 int x, y;
2665                 int ret;
2666
2667                 cpp = fb->format->cpp[i];
2668                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2669                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2670
2671                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2672                 if (ret) {
2673                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2674                                       i, fb->offsets[i]);
2675                         return ret;
2676                 }
2677
2678                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2679                         int hsub = fb->format->hsub;
2680                         int vsub = fb->format->vsub;
2681                         int tile_width, tile_height;
2682                         int main_x, main_y;
2683                         int ccs_x, ccs_y;
2684
2685                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2686                         tile_width *= hsub;
2687                         tile_height *= vsub;
2688
2689                         ccs_x = (x * hsub) % tile_width;
2690                         ccs_y = (y * vsub) % tile_height;
2691                         main_x = intel_fb->normal[0].x % tile_width;
2692                         main_y = intel_fb->normal[0].y % tile_height;
2693
2694                         /*
2695                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2696                          * x/y offsets must match between CCS and the main surface.
2697                          */
2698                         if (main_x != ccs_x || main_y != ccs_y) {
2699                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2700                                               main_x, main_y,
2701                                               ccs_x, ccs_y,
2702                                               intel_fb->normal[0].x,
2703                                               intel_fb->normal[0].y,
2704                                               x, y);
2705                                 return -EINVAL;
2706                         }
2707                 }
2708
2709                 /*
2710                  * The fence (if used) is aligned to the start of the object
2711                  * so having the framebuffer wrap around across the edge of the
2712                  * fenced region doesn't really work. We have no API to configure
2713                  * the fence start offset within the object (nor could we probably
2714                  * on gen2/3). So it's just easier if we just require that the
2715                  * fb layout agrees with the fence layout. We already check that the
2716                  * fb stride matches the fence stride elsewhere.
2717                  */
2718                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2719                     (x + width) * cpp > fb->pitches[i]) {
2720                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2721                                       i, fb->offsets[i]);
2722                         return -EINVAL;
2723                 }
2724
2725                 /*
2726                  * First pixel of the framebuffer from
2727                  * the start of the normal gtt mapping.
2728                  */
2729                 intel_fb->normal[i].x = x;
2730                 intel_fb->normal[i].y = y;
2731
2732                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2733                                                       fb->pitches[i],
2734                                                       DRM_MODE_ROTATE_0,
2735                                                       tile_size);
2736                 offset /= tile_size;
2737
2738                 if (!is_surface_linear(fb->modifier, i)) {
2739                         unsigned int tile_width, tile_height;
2740                         unsigned int pitch_tiles;
2741                         struct drm_rect r;
2742
2743                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2744
2745                         rot_info->plane[i].offset = offset;
2746                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2747                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2748                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2749
2750                         intel_fb->rotated[i].pitch =
2751                                 rot_info->plane[i].height * tile_height;
2752
2753                         /* how many tiles does this plane need */
2754                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2755                         /*
2756                          * If the plane isn't horizontally tile aligned,
2757                          * we need one more tile.
2758                          */
2759                         if (x != 0)
2760                                 size++;
2761
2762                         /* rotate the x/y offsets to match the GTT view */
2763                         drm_rect_init(&r, x, y, width, height);
2764                         drm_rect_rotate(&r,
2765                                         rot_info->plane[i].width * tile_width,
2766                                         rot_info->plane[i].height * tile_height,
2767                                         DRM_MODE_ROTATE_270);
2768                         x = r.x1;
2769                         y = r.y1;
2770
2771                         /* rotate the tile dimensions to match the GTT view */
2772                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2773                         swap(tile_width, tile_height);
2774
2775                         /*
2776                          * We only keep the x/y offsets, so push all of the
2777                          * gtt offset into the x/y offsets.
2778                          */
2779                         intel_adjust_tile_offset(&x, &y,
2780                                                  tile_width, tile_height,
2781                                                  tile_size, pitch_tiles,
2782                                                  gtt_offset_rotated * tile_size, 0);
2783
2784                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2785
2786                         /*
2787                          * First pixel of the framebuffer from
2788                          * the start of the rotated gtt mapping.
2789                          */
2790                         intel_fb->rotated[i].x = x;
2791                         intel_fb->rotated[i].y = y;
2792                 } else {
2793                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2794                                             x * cpp, tile_size);
2795                 }
2796
2797                 /* how many tiles in total needed in the bo */
2798                 max_size = max(max_size, offset + size);
2799         }
2800
2801         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2802                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2803                               mul_u32_u32(max_size, tile_size), obj->base.size);
2804                 return -EINVAL;
2805         }
2806
2807         return 0;
2808 }
2809
2810 static void
2811 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2812 {
2813         struct drm_i915_private *dev_priv =
2814                 to_i915(plane_state->uapi.plane->dev);
2815         struct drm_framebuffer *fb = plane_state->hw.fb;
2816         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2817         struct intel_rotation_info *info = &plane_state->view.rotated;
2818         unsigned int rotation = plane_state->hw.rotation;
2819         int i, num_planes = fb->format->num_planes;
2820         unsigned int tile_size = intel_tile_size(dev_priv);
2821         unsigned int src_x, src_y;
2822         unsigned int src_w, src_h;
2823         u32 gtt_offset = 0;
2824
2825         memset(&plane_state->view, 0, sizeof(plane_state->view));
2826         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2827                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2828
2829         src_x = plane_state->uapi.src.x1 >> 16;
2830         src_y = plane_state->uapi.src.y1 >> 16;
2831         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2832         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2833
2834         WARN_ON(is_ccs_modifier(fb->modifier));
2835
2836         /* Make src coordinates relative to the viewport */
2837         drm_rect_translate(&plane_state->uapi.src,
2838                            -(src_x << 16), -(src_y << 16));
2839
2840         /* Rotate src coordinates to match rotated GTT view */
2841         if (drm_rotation_90_or_270(rotation))
2842                 drm_rect_rotate(&plane_state->uapi.src,
2843                                 src_w << 16, src_h << 16,
2844                                 DRM_MODE_ROTATE_270);
2845
2846         for (i = 0; i < num_planes; i++) {
2847                 unsigned int hsub = i ? fb->format->hsub : 1;
2848                 unsigned int vsub = i ? fb->format->vsub : 1;
2849                 unsigned int cpp = fb->format->cpp[i];
2850                 unsigned int tile_width, tile_height;
2851                 unsigned int width, height;
2852                 unsigned int pitch_tiles;
2853                 unsigned int x, y;
2854                 u32 offset;
2855
2856                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2857
2858                 x = src_x / hsub;
2859                 y = src_y / vsub;
2860                 width = src_w / hsub;
2861                 height = src_h / vsub;
2862
2863                 /*
2864                  * First pixel of the src viewport from the
2865                  * start of the normal gtt mapping.
2866                  */
2867                 x += intel_fb->normal[i].x;
2868                 y += intel_fb->normal[i].y;
2869
2870                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2871                                                       fb, i, fb->pitches[i],
2872                                                       DRM_MODE_ROTATE_0, tile_size);
2873                 offset /= tile_size;
2874
2875                 info->plane[i].offset = offset;
2876                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2877                                                      tile_width * cpp);
2878                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2879                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2880
2881                 if (drm_rotation_90_or_270(rotation)) {
2882                         struct drm_rect r;
2883
2884                         /* rotate the x/y offsets to match the GTT view */
2885                         drm_rect_init(&r, x, y, width, height);
2886                         drm_rect_rotate(&r,
2887                                         info->plane[i].width * tile_width,
2888                                         info->plane[i].height * tile_height,
2889                                         DRM_MODE_ROTATE_270);
2890                         x = r.x1;
2891                         y = r.y1;
2892
2893                         pitch_tiles = info->plane[i].height;
2894                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2895
2896                         /* rotate the tile dimensions to match the GTT view */
2897                         swap(tile_width, tile_height);
2898                 } else {
2899                         pitch_tiles = info->plane[i].width;
2900                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2901                 }
2902
2903                 /*
2904                  * We only keep the x/y offsets, so push all of the
2905                  * gtt offset into the x/y offsets.
2906                  */
2907                 intel_adjust_tile_offset(&x, &y,
2908                                          tile_width, tile_height,
2909                                          tile_size, pitch_tiles,
2910                                          gtt_offset * tile_size, 0);
2911
2912                 gtt_offset += info->plane[i].width * info->plane[i].height;
2913
2914                 plane_state->color_plane[i].offset = 0;
2915                 plane_state->color_plane[i].x = x;
2916                 plane_state->color_plane[i].y = y;
2917         }
2918 }
2919
2920 static int
2921 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2922 {
2923         const struct intel_framebuffer *fb =
2924                 to_intel_framebuffer(plane_state->hw.fb);
2925         unsigned int rotation = plane_state->hw.rotation;
2926         int i, num_planes;
2927
2928         if (!fb)
2929                 return 0;
2930
2931         num_planes = fb->base.format->num_planes;
2932
2933         if (intel_plane_needs_remap(plane_state)) {
2934                 intel_plane_remap_gtt(plane_state);
2935
2936                 /*
2937                  * Sometimes even remapping can't overcome
2938                  * the stride limitations :( Can happen with
2939                  * big plane sizes and suitably misaligned
2940                  * offsets.
2941                  */
2942                 return intel_plane_check_stride(plane_state);
2943         }
2944
2945         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2946
2947         for (i = 0; i < num_planes; i++) {
2948                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2949                 plane_state->color_plane[i].offset = 0;
2950
2951                 if (drm_rotation_90_or_270(rotation)) {
2952                         plane_state->color_plane[i].x = fb->rotated[i].x;
2953                         plane_state->color_plane[i].y = fb->rotated[i].y;
2954                 } else {
2955                         plane_state->color_plane[i].x = fb->normal[i].x;
2956                         plane_state->color_plane[i].y = fb->normal[i].y;
2957                 }
2958         }
2959
2960         /* Rotate src coordinates to match rotated GTT view */
2961         if (drm_rotation_90_or_270(rotation))
2962                 drm_rect_rotate(&plane_state->uapi.src,
2963                                 fb->base.width << 16, fb->base.height << 16,
2964                                 DRM_MODE_ROTATE_270);
2965
2966         return intel_plane_check_stride(plane_state);
2967 }
2968
2969 static int i9xx_format_to_fourcc(int format)
2970 {
2971         switch (format) {
2972         case DISPPLANE_8BPP:
2973                 return DRM_FORMAT_C8;
2974         case DISPPLANE_BGRX555:
2975                 return DRM_FORMAT_XRGB1555;
2976         case DISPPLANE_BGRX565:
2977                 return DRM_FORMAT_RGB565;
2978         default:
2979         case DISPPLANE_BGRX888:
2980                 return DRM_FORMAT_XRGB8888;
2981         case DISPPLANE_RGBX888:
2982                 return DRM_FORMAT_XBGR8888;
2983         case DISPPLANE_BGRX101010:
2984                 return DRM_FORMAT_XRGB2101010;
2985         case DISPPLANE_RGBX101010:
2986                 return DRM_FORMAT_XBGR2101010;
2987         case DISPPLANE_RGBX161616:
2988                 return DRM_FORMAT_XBGR16161616F;
2989         }
2990 }
2991
2992 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2993 {
2994         switch (format) {
2995         case PLANE_CTL_FORMAT_RGB_565:
2996                 return DRM_FORMAT_RGB565;
2997         case PLANE_CTL_FORMAT_NV12:
2998                 return DRM_FORMAT_NV12;
2999         case PLANE_CTL_FORMAT_P010:
3000                 return DRM_FORMAT_P010;
3001         case PLANE_CTL_FORMAT_P012:
3002                 return DRM_FORMAT_P012;
3003         case PLANE_CTL_FORMAT_P016:
3004                 return DRM_FORMAT_P016;
3005         case PLANE_CTL_FORMAT_Y210:
3006                 return DRM_FORMAT_Y210;
3007         case PLANE_CTL_FORMAT_Y212:
3008                 return DRM_FORMAT_Y212;
3009         case PLANE_CTL_FORMAT_Y216:
3010                 return DRM_FORMAT_Y216;
3011         case PLANE_CTL_FORMAT_Y410:
3012                 return DRM_FORMAT_XVYU2101010;
3013         case PLANE_CTL_FORMAT_Y412:
3014                 return DRM_FORMAT_XVYU12_16161616;
3015         case PLANE_CTL_FORMAT_Y416:
3016                 return DRM_FORMAT_XVYU16161616;
3017         default:
3018         case PLANE_CTL_FORMAT_XRGB_8888:
3019                 if (rgb_order) {
3020                         if (alpha)
3021                                 return DRM_FORMAT_ABGR8888;
3022                         else
3023                                 return DRM_FORMAT_XBGR8888;
3024                 } else {
3025                         if (alpha)
3026                                 return DRM_FORMAT_ARGB8888;
3027                         else
3028                                 return DRM_FORMAT_XRGB8888;
3029                 }
3030         case PLANE_CTL_FORMAT_XRGB_2101010:
3031                 if (rgb_order)
3032                         return DRM_FORMAT_XBGR2101010;
3033                 else
3034                         return DRM_FORMAT_XRGB2101010;
3035         case PLANE_CTL_FORMAT_XRGB_16161616F:
3036                 if (rgb_order) {
3037                         if (alpha)
3038                                 return DRM_FORMAT_ABGR16161616F;
3039                         else
3040                                 return DRM_FORMAT_XBGR16161616F;
3041                 } else {
3042                         if (alpha)
3043                                 return DRM_FORMAT_ARGB16161616F;
3044                         else
3045                                 return DRM_FORMAT_XRGB16161616F;
3046                 }
3047         }
3048 }
3049
3050 static bool
3051 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3052                               struct intel_initial_plane_config *plane_config)
3053 {
3054         struct drm_device *dev = crtc->base.dev;
3055         struct drm_i915_private *dev_priv = to_i915(dev);
3056         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3057         struct drm_framebuffer *fb = &plane_config->fb->base;
3058         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3059         u32 size_aligned = round_up(plane_config->base + plane_config->size,
3060                                     PAGE_SIZE);
3061         struct drm_i915_gem_object *obj;
3062         bool ret = false;
3063
3064         size_aligned -= base_aligned;
3065
3066         if (plane_config->size == 0)
3067                 return false;
3068
3069         /* If the FB is too big, just don't use it since fbdev is not very
3070          * important and we should probably use that space with FBC or other
3071          * features. */
3072         if (size_aligned * 2 > dev_priv->stolen_usable_size)
3073                 return false;
3074
3075         switch (fb->modifier) {
3076         case DRM_FORMAT_MOD_LINEAR:
3077         case I915_FORMAT_MOD_X_TILED:
3078         case I915_FORMAT_MOD_Y_TILED:
3079                 break;
3080         default:
3081                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3082                                  fb->modifier);
3083                 return false;
3084         }
3085
3086         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3087                                                              base_aligned,
3088                                                              base_aligned,
3089                                                              size_aligned);
3090         if (IS_ERR(obj))
3091                 return false;
3092
3093         switch (plane_config->tiling) {
3094         case I915_TILING_NONE:
3095                 break;
3096         case I915_TILING_X:
3097         case I915_TILING_Y:
3098                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3099                 break;
3100         default:
3101                 MISSING_CASE(plane_config->tiling);
3102                 goto out;
3103         }
3104
3105         mode_cmd.pixel_format = fb->format->format;
3106         mode_cmd.width = fb->width;
3107         mode_cmd.height = fb->height;
3108         mode_cmd.pitches[0] = fb->pitches[0];
3109         mode_cmd.modifier[0] = fb->modifier;
3110         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3111
3112         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3113                 DRM_DEBUG_KMS("intel fb init failed\n");
3114                 goto out;
3115         }
3116
3117
3118         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3119         ret = true;
3120 out:
3121         i915_gem_object_put(obj);
3122         return ret;
3123 }
3124
3125 static void
3126 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3127                         struct intel_plane_state *plane_state,
3128                         bool visible)
3129 {
3130         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3131
3132         plane_state->uapi.visible = visible;
3133
3134         if (visible)
3135                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3136         else
3137                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3138 }
3139
3140 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3141 {
3142         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3143         struct drm_plane *plane;
3144
3145         /*
3146          * Active_planes aliases if multiple "primary" or cursor planes
3147          * have been used on the same (or wrong) pipe. plane_mask uses
3148          * unique ids, hence we can use that to reconstruct active_planes.
3149          */
3150         crtc_state->active_planes = 0;
3151
3152         drm_for_each_plane_mask(plane, &dev_priv->drm,
3153                                 crtc_state->uapi.plane_mask)
3154                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3155 }
3156
3157 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3158                                          struct intel_plane *plane)
3159 {
3160         struct intel_crtc_state *crtc_state =
3161                 to_intel_crtc_state(crtc->base.state);
3162         struct intel_plane_state *plane_state =
3163                 to_intel_plane_state(plane->base.state);
3164
3165         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3166                       plane->base.base.id, plane->base.name,
3167                       crtc->base.base.id, crtc->base.name);
3168
3169         intel_set_plane_visible(crtc_state, plane_state, false);
3170         fixup_active_planes(crtc_state);
3171         crtc_state->data_rate[plane->id] = 0;
3172         crtc_state->min_cdclk[plane->id] = 0;
3173
3174         if (plane->id == PLANE_PRIMARY)
3175                 intel_pre_disable_primary_noatomic(&crtc->base);
3176
3177         intel_disable_plane(plane, crtc_state);
3178 }
3179
3180 static struct intel_frontbuffer *
3181 to_intel_frontbuffer(struct drm_framebuffer *fb)
3182 {
3183         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3184 }
3185
3186 static void
3187 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3188                              struct intel_initial_plane_config *plane_config)
3189 {
3190         struct drm_device *dev = intel_crtc->base.dev;
3191         struct drm_i915_private *dev_priv = to_i915(dev);
3192         struct drm_crtc *c;
3193         struct drm_plane *primary = intel_crtc->base.primary;
3194         struct drm_plane_state *plane_state = primary->state;
3195         struct intel_plane *intel_plane = to_intel_plane(primary);
3196         struct intel_plane_state *intel_state =
3197                 to_intel_plane_state(plane_state);
3198         struct drm_framebuffer *fb;
3199
3200         if (!plane_config->fb)
3201                 return;
3202
3203         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3204                 fb = &plane_config->fb->base;
3205                 goto valid_fb;
3206         }
3207
3208         kfree(plane_config->fb);
3209
3210         /*
3211          * Failed to alloc the obj, check to see if we should share
3212          * an fb with another CRTC instead
3213          */
3214         for_each_crtc(dev, c) {
3215                 struct intel_plane_state *state;
3216
3217                 if (c == &intel_crtc->base)
3218                         continue;
3219
3220                 if (!to_intel_crtc(c)->active)
3221                         continue;
3222
3223                 state = to_intel_plane_state(c->primary->state);
3224                 if (!state->vma)
3225                         continue;
3226
3227                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3228                         fb = state->hw.fb;
3229                         drm_framebuffer_get(fb);
3230                         goto valid_fb;
3231                 }
3232         }
3233
3234         /*
3235          * We've failed to reconstruct the BIOS FB.  Current display state
3236          * indicates that the primary plane is visible, but has a NULL FB,
3237          * which will lead to problems later if we don't fix it up.  The
3238          * simplest solution is to just disable the primary plane now and
3239          * pretend the BIOS never had it enabled.
3240          */
3241         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3242
3243         return;
3244
3245 valid_fb:
3246         intel_state->hw.rotation = plane_config->rotation;
3247         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3248                                 intel_state->hw.rotation);
3249         intel_state->color_plane[0].stride =
3250                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3251
3252         intel_state->vma =
3253                 intel_pin_and_fence_fb_obj(fb,
3254                                            &intel_state->view,
3255                                            intel_plane_uses_fence(intel_state),
3256                                            &intel_state->flags);
3257         if (IS_ERR(intel_state->vma)) {
3258                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3259                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
3260
3261                 intel_state->vma = NULL;
3262                 drm_framebuffer_put(fb);
3263                 return;
3264         }
3265
3266         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3267
3268         plane_state->src_x = 0;
3269         plane_state->src_y = 0;
3270         plane_state->src_w = fb->width << 16;
3271         plane_state->src_h = fb->height << 16;
3272
3273         plane_state->crtc_x = 0;
3274         plane_state->crtc_y = 0;
3275         plane_state->crtc_w = fb->width;
3276         plane_state->crtc_h = fb->height;
3277
3278         intel_state->uapi.src = drm_plane_state_src(plane_state);
3279         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3280
3281         if (plane_config->tiling)
3282                 dev_priv->preserve_bios_swizzle = true;
3283
3284         plane_state->fb = fb;
3285         plane_state->crtc = &intel_crtc->base;
3286         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3287
3288         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3289                   &to_intel_frontbuffer(fb)->bits);
3290 }
3291
3292 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3293                                int color_plane,
3294                                unsigned int rotation)
3295 {
3296         int cpp = fb->format->cpp[color_plane];
3297
3298         switch (fb->modifier) {
3299         case DRM_FORMAT_MOD_LINEAR:
3300         case I915_FORMAT_MOD_X_TILED:
3301                 /*
3302                  * Validated limit is 4k, but has 5k should
3303                  * work apart from the following features:
3304                  * - Ytile (already limited to 4k)
3305                  * - FP16 (already limited to 4k)
3306                  * - render compression (already limited to 4k)
3307                  * - KVMR sprite and cursor (don't care)
3308                  * - horizontal panning (TODO verify this)
3309                  * - pipe and plane scaling (TODO verify this)
3310                  */
3311                 if (cpp == 8)
3312                         return 4096;
3313                 else
3314                         return 5120;
3315         case I915_FORMAT_MOD_Y_TILED_CCS:
3316         case I915_FORMAT_MOD_Yf_TILED_CCS:
3317                 /* FIXME AUX plane? */
3318         case I915_FORMAT_MOD_Y_TILED:
3319         case I915_FORMAT_MOD_Yf_TILED:
3320                 if (cpp == 8)
3321                         return 2048;
3322                 else
3323                         return 4096;
3324         default:
3325                 MISSING_CASE(fb->modifier);
3326                 return 2048;
3327         }
3328 }
3329
3330 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3331                                int color_plane,
3332                                unsigned int rotation)
3333 {
3334         int cpp = fb->format->cpp[color_plane];
3335
3336         switch (fb->modifier) {
3337         case DRM_FORMAT_MOD_LINEAR:
3338         case I915_FORMAT_MOD_X_TILED:
3339                 if (cpp == 8)
3340                         return 4096;
3341                 else
3342                         return 5120;
3343         case I915_FORMAT_MOD_Y_TILED_CCS:
3344         case I915_FORMAT_MOD_Yf_TILED_CCS:
3345                 /* FIXME AUX plane? */
3346         case I915_FORMAT_MOD_Y_TILED:
3347         case I915_FORMAT_MOD_Yf_TILED:
3348                 if (cpp == 8)
3349                         return 2048;
3350                 else
3351                         return 5120;
3352         default:
3353                 MISSING_CASE(fb->modifier);
3354                 return 2048;
3355         }
3356 }
3357
3358 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3359                                int color_plane,
3360                                unsigned int rotation)
3361 {
3362         return 5120;
3363 }
3364
3365 static int skl_max_plane_height(void)
3366 {
3367         return 4096;
3368 }
3369
3370 static int icl_max_plane_height(void)
3371 {
3372         return 4320;
3373 }
3374
3375 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3376                                            int main_x, int main_y, u32 main_offset)
3377 {
3378         const struct drm_framebuffer *fb = plane_state->hw.fb;
3379         int hsub = fb->format->hsub;
3380         int vsub = fb->format->vsub;
3381         int aux_x = plane_state->color_plane[1].x;
3382         int aux_y = plane_state->color_plane[1].y;
3383         u32 aux_offset = plane_state->color_plane[1].offset;
3384         u32 alignment = intel_surf_alignment(fb, 1);
3385
3386         while (aux_offset >= main_offset && aux_y <= main_y) {
3387                 int x, y;
3388
3389                 if (aux_x == main_x && aux_y == main_y)
3390                         break;
3391
3392                 if (aux_offset == 0)
3393                         break;
3394
3395                 x = aux_x / hsub;
3396                 y = aux_y / vsub;
3397                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3398                                                                aux_offset, aux_offset - alignment);
3399                 aux_x = x * hsub + aux_x % hsub;
3400                 aux_y = y * vsub + aux_y % vsub;
3401         }
3402
3403         if (aux_x != main_x || aux_y != main_y)
3404                 return false;
3405
3406         plane_state->color_plane[1].offset = aux_offset;
3407         plane_state->color_plane[1].x = aux_x;
3408         plane_state->color_plane[1].y = aux_y;
3409
3410         return true;
3411 }
3412
3413 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3414 {
3415         struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3416         const struct drm_framebuffer *fb = plane_state->hw.fb;
3417         unsigned int rotation = plane_state->hw.rotation;
3418         int x = plane_state->uapi.src.x1 >> 16;
3419         int y = plane_state->uapi.src.y1 >> 16;
3420         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3421         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3422         int max_width;
3423         int max_height;
3424         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3425
3426         if (INTEL_GEN(dev_priv) >= 11)
3427                 max_width = icl_max_plane_width(fb, 0, rotation);
3428         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3429                 max_width = glk_max_plane_width(fb, 0, rotation);
3430         else
3431                 max_width = skl_max_plane_width(fb, 0, rotation);
3432
3433         if (INTEL_GEN(dev_priv) >= 11)
3434                 max_height = icl_max_plane_height();
3435         else
3436                 max_height = skl_max_plane_height();
3437
3438         if (w > max_width || h > max_height) {
3439                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3440                               w, h, max_width, max_height);
3441                 return -EINVAL;
3442         }
3443
3444         intel_add_fb_offsets(&x, &y, plane_state, 0);
3445         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3446         alignment = intel_surf_alignment(fb, 0);
3447
3448         /*
3449          * AUX surface offset is specified as the distance from the
3450          * main surface offset, and it must be non-negative. Make
3451          * sure that is what we will get.
3452          */
3453         if (offset > aux_offset)
3454                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3455                                                            offset, aux_offset & ~(alignment - 1));
3456
3457         /*
3458          * When using an X-tiled surface, the plane blows up
3459          * if the x offset + width exceed the stride.
3460          *
3461          * TODO: linear and Y-tiled seem fine, Yf untested,
3462          */
3463         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3464                 int cpp = fb->format->cpp[0];
3465
3466                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3467                         if (offset == 0) {
3468                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3469                                 return -EINVAL;
3470                         }
3471
3472                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3473                                                                    offset, offset - alignment);
3474                 }
3475         }
3476
3477         /*
3478          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3479          * they match with the main surface x/y offsets.
3480          */
3481         if (is_ccs_modifier(fb->modifier)) {
3482                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3483                         if (offset == 0)
3484                                 break;
3485
3486                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3487                                                                    offset, offset - alignment);
3488                 }
3489
3490                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3491                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3492                         return -EINVAL;
3493                 }
3494         }
3495
3496         plane_state->color_plane[0].offset = offset;
3497         plane_state->color_plane[0].x = x;
3498         plane_state->color_plane[0].y = y;
3499
3500         /*
3501          * Put the final coordinates back so that the src
3502          * coordinate checks will see the right values.
3503          */
3504         drm_rect_translate_to(&plane_state->uapi.src,
3505                               x << 16, y << 16);
3506
3507         return 0;
3508 }
3509
3510 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3511 {
3512         const struct drm_framebuffer *fb = plane_state->hw.fb;
3513         unsigned int rotation = plane_state->hw.rotation;
3514         int max_width = skl_max_plane_width(fb, 1, rotation);
3515         int max_height = 4096;
3516         int x = plane_state->uapi.src.x1 >> 17;
3517         int y = plane_state->uapi.src.y1 >> 17;
3518         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3519         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3520         u32 offset;
3521
3522         intel_add_fb_offsets(&x, &y, plane_state, 1);
3523         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3524
3525         /* FIXME not quite sure how/if these apply to the chroma plane */
3526         if (w > max_width || h > max_height) {
3527                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3528                               w, h, max_width, max_height);
3529                 return -EINVAL;
3530         }
3531
3532         plane_state->color_plane[1].offset = offset;
3533         plane_state->color_plane[1].x = x;
3534         plane_state->color_plane[1].y = y;
3535
3536         return 0;
3537 }
3538
3539 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3540 {
3541         const struct drm_framebuffer *fb = plane_state->hw.fb;
3542         int src_x = plane_state->uapi.src.x1 >> 16;
3543         int src_y = plane_state->uapi.src.y1 >> 16;
3544         int hsub = fb->format->hsub;
3545         int vsub = fb->format->vsub;
3546         int x = src_x / hsub;
3547         int y = src_y / vsub;
3548         u32 offset;
3549
3550         intel_add_fb_offsets(&x, &y, plane_state, 1);
3551         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3552
3553         plane_state->color_plane[1].offset = offset;
3554         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3555         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3556
3557         return 0;
3558 }
3559
3560 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3561 {
3562         const struct drm_framebuffer *fb = plane_state->hw.fb;
3563         int ret;
3564
3565         ret = intel_plane_compute_gtt(plane_state);
3566         if (ret)
3567                 return ret;
3568
3569         if (!plane_state->uapi.visible)
3570                 return 0;
3571
3572         /*
3573          * Handle the AUX surface first since
3574          * the main surface setup depends on it.
3575          */
3576         if (drm_format_info_is_yuv_semiplanar(fb->format)) {
3577                 ret = skl_check_nv12_aux_surface(plane_state);
3578                 if (ret)
3579                         return ret;
3580         } else if (is_ccs_modifier(fb->modifier)) {
3581                 ret = skl_check_ccs_aux_surface(plane_state);
3582                 if (ret)
3583                         return ret;
3584         } else {
3585                 plane_state->color_plane[1].offset = ~0xfff;
3586                 plane_state->color_plane[1].x = 0;
3587                 plane_state->color_plane[1].y = 0;
3588         }
3589
3590         ret = skl_check_main_surface(plane_state);
3591         if (ret)
3592                 return ret;
3593
3594         return 0;
3595 }
3596
3597 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
3598                              const struct intel_plane_state *plane_state,
3599                              unsigned int *num, unsigned int *den)
3600 {
3601         const struct drm_framebuffer *fb = plane_state->hw.fb;
3602         unsigned int cpp = fb->format->cpp[0];
3603
3604         /*
3605          * g4x bspec says 64bpp pixel rate can't exceed 80%
3606          * of cdclk when the sprite plane is enabled on the
3607          * same pipe. ilk/snb bspec says 64bpp pixel rate is
3608          * never allowed to exceed 80% of cdclk. Let's just go
3609          * with the ilk/snb limit always.
3610          */
3611         if (cpp == 8) {
3612                 *num = 10;
3613                 *den = 8;
3614         } else {
3615                 *num = 1;
3616                 *den = 1;
3617         }
3618 }
3619
3620 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
3621                                 const struct intel_plane_state *plane_state)
3622 {
3623         unsigned int pixel_rate;
3624         unsigned int num, den;
3625
3626         /*
3627          * Note that crtc_state->pixel_rate accounts for both
3628          * horizontal and vertical panel fitter downscaling factors.
3629          * Pre-HSW bspec tells us to only consider the horizontal
3630          * downscaling factor here. We ignore that and just consider
3631          * both for simplicity.
3632          */
3633         pixel_rate = crtc_state->pixel_rate;
3634
3635         i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
3636
3637         /* two pixels per clock with double wide pipe */
3638         if (crtc_state->double_wide)
3639                 den *= 2;
3640
3641         return DIV_ROUND_UP(pixel_rate * num, den);
3642 }
3643
3644 unsigned int
3645 i9xx_plane_max_stride(struct intel_plane *plane,
3646                       u32 pixel_format, u64 modifier,
3647                       unsigned int rotation)
3648 {
3649         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3650
3651         if (!HAS_GMCH(dev_priv)) {
3652                 return 32*1024;
3653         } else if (INTEL_GEN(dev_priv) >= 4) {
3654                 if (modifier == I915_FORMAT_MOD_X_TILED)
3655                         return 16*1024;
3656                 else
3657                         return 32*1024;
3658         } else if (INTEL_GEN(dev_priv) >= 3) {
3659                 if (modifier == I915_FORMAT_MOD_X_TILED)
3660                         return 8*1024;
3661                 else
3662                         return 16*1024;
3663         } else {
3664                 if (plane->i9xx_plane == PLANE_C)
3665                         return 4*1024;
3666                 else
3667                         return 8*1024;
3668         }
3669 }
3670
3671 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3672 {
3673         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3674         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3675         u32 dspcntr = 0;
3676
3677         if (crtc_state->gamma_enable)
3678                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3679
3680         if (crtc_state->csc_enable)
3681                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3682
3683         if (INTEL_GEN(dev_priv) < 5)
3684                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3685
3686         return dspcntr;
3687 }
3688
3689 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3690                           const struct intel_plane_state *plane_state)
3691 {
3692         struct drm_i915_private *dev_priv =
3693                 to_i915(plane_state->uapi.plane->dev);
3694         const struct drm_framebuffer *fb = plane_state->hw.fb;
3695         unsigned int rotation = plane_state->hw.rotation;
3696         u32 dspcntr;
3697
3698         dspcntr = DISPLAY_PLANE_ENABLE;
3699
3700         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3701             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3702                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3703
3704         switch (fb->format->format) {
3705         case DRM_FORMAT_C8:
3706                 dspcntr |= DISPPLANE_8BPP;
3707                 break;
3708         case DRM_FORMAT_XRGB1555:
3709                 dspcntr |= DISPPLANE_BGRX555;
3710                 break;
3711         case DRM_FORMAT_RGB565:
3712                 dspcntr |= DISPPLANE_BGRX565;
3713                 break;
3714         case DRM_FORMAT_XRGB8888:
3715                 dspcntr |= DISPPLANE_BGRX888;
3716                 break;
3717         case DRM_FORMAT_XBGR8888:
3718                 dspcntr |= DISPPLANE_RGBX888;
3719                 break;
3720         case DRM_FORMAT_XRGB2101010:
3721                 dspcntr |= DISPPLANE_BGRX101010;
3722                 break;
3723         case DRM_FORMAT_XBGR2101010:
3724                 dspcntr |= DISPPLANE_RGBX101010;
3725                 break;
3726         case DRM_FORMAT_XBGR16161616F:
3727                 dspcntr |= DISPPLANE_RGBX161616;
3728                 break;
3729         default:
3730                 MISSING_CASE(fb->format->format);
3731                 return 0;
3732         }
3733
3734         if (INTEL_GEN(dev_priv) >= 4 &&
3735             fb->modifier == I915_FORMAT_MOD_X_TILED)
3736                 dspcntr |= DISPPLANE_TILED;
3737
3738         if (rotation & DRM_MODE_ROTATE_180)
3739                 dspcntr |= DISPPLANE_ROTATE_180;
3740
3741         if (rotation & DRM_MODE_REFLECT_X)
3742                 dspcntr |= DISPPLANE_MIRROR;
3743
3744         return dspcntr;
3745 }
3746
3747 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3748 {
3749         struct drm_i915_private *dev_priv =
3750                 to_i915(plane_state->uapi.plane->dev);
3751         const struct drm_framebuffer *fb = plane_state->hw.fb;
3752         int src_x, src_y, src_w;
3753         u32 offset;
3754         int ret;
3755
3756         ret = intel_plane_compute_gtt(plane_state);
3757         if (ret)
3758                 return ret;
3759
3760         if (!plane_state->uapi.visible)
3761                 return 0;
3762
3763         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3764         src_x = plane_state->uapi.src.x1 >> 16;
3765         src_y = plane_state->uapi.src.y1 >> 16;
3766
3767         /* Undocumented hardware limit on i965/g4x/vlv/chv */
3768         if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
3769                 return -EINVAL;
3770
3771         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3772
3773         if (INTEL_GEN(dev_priv) >= 4)
3774                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3775                                                             plane_state, 0);
3776         else
3777                 offset = 0;
3778
3779         /*
3780          * Put the final coordinates back so that the src
3781          * coordinate checks will see the right values.
3782          */
3783         drm_rect_translate_to(&plane_state->uapi.src,
3784                               src_x << 16, src_y << 16);
3785
3786         /* HSW/BDW do this automagically in hardware */
3787         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3788                 unsigned int rotation = plane_state->hw.rotation;
3789                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3790                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3791
3792                 if (rotation & DRM_MODE_ROTATE_180) {
3793                         src_x += src_w - 1;
3794                         src_y += src_h - 1;
3795                 } else if (rotation & DRM_MODE_REFLECT_X) {
3796                         src_x += src_w - 1;
3797                 }
3798         }
3799
3800         plane_state->color_plane[0].offset = offset;
3801         plane_state->color_plane[0].x = src_x;
3802         plane_state->color_plane[0].y = src_y;
3803
3804         return 0;
3805 }
3806
3807 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
3808 {
3809         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3810         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3811
3812         if (IS_CHERRYVIEW(dev_priv))
3813                 return i9xx_plane == PLANE_B;
3814         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3815                 return false;
3816         else if (IS_GEN(dev_priv, 4))
3817                 return i9xx_plane == PLANE_C;
3818         else
3819                 return i9xx_plane == PLANE_B ||
3820                         i9xx_plane == PLANE_C;
3821 }
3822
3823 static int
3824 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3825                  struct intel_plane_state *plane_state)
3826 {
3827         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3828         int ret;
3829
3830         ret = chv_plane_check_rotation(plane_state);
3831         if (ret)
3832                 return ret;
3833
3834         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
3835                                                   &crtc_state->uapi,
3836                                                   DRM_PLANE_HELPER_NO_SCALING,
3837                                                   DRM_PLANE_HELPER_NO_SCALING,
3838                                                   i9xx_plane_has_windowing(plane),
3839                                                   true);
3840         if (ret)
3841                 return ret;
3842
3843         ret = i9xx_check_plane_surface(plane_state);
3844         if (ret)
3845                 return ret;
3846
3847         if (!plane_state->uapi.visible)
3848                 return 0;
3849
3850         ret = intel_plane_check_src_coordinates(plane_state);
3851         if (ret)
3852                 return ret;
3853
3854         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3855
3856         return 0;
3857 }
3858
3859 static void i9xx_update_plane(struct intel_plane *plane,
3860                               const struct intel_crtc_state *crtc_state,
3861                               const struct intel_plane_state *plane_state)
3862 {
3863         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3864         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3865         u32 linear_offset;
3866         int x = plane_state->color_plane[0].x;
3867         int y = plane_state->color_plane[0].y;
3868         int crtc_x = plane_state->uapi.dst.x1;
3869         int crtc_y = plane_state->uapi.dst.y1;
3870         int crtc_w = drm_rect_width(&plane_state->uapi.dst);
3871         int crtc_h = drm_rect_height(&plane_state->uapi.dst);
3872         unsigned long irqflags;
3873         u32 dspaddr_offset;
3874         u32 dspcntr;
3875
3876         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3877
3878         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3879
3880         if (INTEL_GEN(dev_priv) >= 4)
3881                 dspaddr_offset = plane_state->color_plane[0].offset;
3882         else
3883                 dspaddr_offset = linear_offset;
3884
3885         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3886
3887         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3888
3889         if (INTEL_GEN(dev_priv) < 4) {
3890                 /*
3891                  * PLANE_A doesn't actually have a full window
3892                  * generator but let's assume we still need to
3893                  * program whatever is there.
3894                  */
3895                 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3896                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3897                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3898         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3899                 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3900                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3901                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3902                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3903         }
3904
3905         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3906                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3907         } else if (INTEL_GEN(dev_priv) >= 4) {
3908                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3909                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3910         }
3911
3912         /*
3913          * The control register self-arms if the plane was previously
3914          * disabled. Try to make the plane enable atomic by writing
3915          * the control register just before the surface register.
3916          */
3917         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3918         if (INTEL_GEN(dev_priv) >= 4)
3919                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3920                               intel_plane_ggtt_offset(plane_state) +
3921                               dspaddr_offset);
3922         else
3923                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3924                               intel_plane_ggtt_offset(plane_state) +
3925                               dspaddr_offset);
3926
3927         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3928 }
3929
3930 static void i9xx_disable_plane(struct intel_plane *plane,
3931                                const struct intel_crtc_state *crtc_state)
3932 {
3933         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3934         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3935         unsigned long irqflags;
3936         u32 dspcntr;
3937
3938         /*
3939          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3940          * enable on ilk+ affect the pipe bottom color as
3941          * well, so we must configure them even if the plane
3942          * is disabled.
3943          *
3944          * On pre-g4x there is no way to gamma correct the
3945          * pipe bottom color but we'll keep on doing this
3946          * anyway so that the crtc state readout works correctly.
3947          */
3948         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3949
3950         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3951
3952         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3953         if (INTEL_GEN(dev_priv) >= 4)
3954                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3955         else
3956                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3957
3958         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3959 }
3960
3961 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3962                                     enum pipe *pipe)
3963 {
3964         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3965         enum intel_display_power_domain power_domain;
3966         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3967         intel_wakeref_t wakeref;
3968         bool ret;
3969         u32 val;
3970
3971         /*
3972          * Not 100% correct for planes that can move between pipes,
3973          * but that's only the case for gen2-4 which don't have any
3974          * display power wells.
3975          */
3976         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3977         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3978         if (!wakeref)
3979                 return false;
3980
3981         val = I915_READ(DSPCNTR(i9xx_plane));
3982
3983         ret = val & DISPLAY_PLANE_ENABLE;
3984
3985         if (INTEL_GEN(dev_priv) >= 5)
3986                 *pipe = plane->pipe;
3987         else
3988                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3989                         DISPPLANE_SEL_PIPE_SHIFT;
3990
3991         intel_display_power_put(dev_priv, power_domain, wakeref);
3992
3993         return ret;
3994 }
3995
3996 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3997 {
3998         struct drm_device *dev = intel_crtc->base.dev;
3999         struct drm_i915_private *dev_priv = to_i915(dev);
4000
4001         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4002         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4003         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4004 }
4005
4006 /*
4007  * This function detaches (aka. unbinds) unused scalers in hardware
4008  */
4009 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4010 {
4011         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4012         const struct intel_crtc_scaler_state *scaler_state =
4013                 &crtc_state->scaler_state;
4014         int i;
4015
4016         /* loop through and disable scalers that aren't in use */
4017         for (i = 0; i < intel_crtc->num_scalers; i++) {
4018                 if (!scaler_state->scalers[i].in_use)
4019                         skl_detach_scaler(intel_crtc, i);
4020         }
4021 }
4022
4023 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4024                                           int color_plane, unsigned int rotation)
4025 {
4026         /*
4027          * The stride is either expressed as a multiple of 64 bytes chunks for
4028          * linear buffers or in number of tiles for tiled buffers.
4029          */
4030         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
4031                 return 64;
4032         else if (drm_rotation_90_or_270(rotation))
4033                 return intel_tile_height(fb, color_plane);
4034         else
4035                 return intel_tile_width_bytes(fb, color_plane);
4036 }
4037
4038 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4039                      int color_plane)
4040 {
4041         const struct drm_framebuffer *fb = plane_state->hw.fb;
4042         unsigned int rotation = plane_state->hw.rotation;
4043         u32 stride = plane_state->color_plane[color_plane].stride;
4044
4045         if (color_plane >= fb->format->num_planes)
4046                 return 0;
4047
4048         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4049 }
4050
4051 static u32 skl_plane_ctl_format(u32 pixel_format)
4052 {
4053         switch (pixel_format) {
4054         case DRM_FORMAT_C8:
4055                 return PLANE_CTL_FORMAT_INDEXED;
4056         case DRM_FORMAT_RGB565:
4057                 return PLANE_CTL_FORMAT_RGB_565;
4058         case DRM_FORMAT_XBGR8888:
4059         case DRM_FORMAT_ABGR8888:
4060                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4061         case DRM_FORMAT_XRGB8888:
4062         case DRM_FORMAT_ARGB8888:
4063                 return PLANE_CTL_FORMAT_XRGB_8888;
4064         case DRM_FORMAT_XBGR2101010:
4065                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4066         case DRM_FORMAT_XRGB2101010:
4067                 return PLANE_CTL_FORMAT_XRGB_2101010;
4068         case DRM_FORMAT_XBGR16161616F:
4069         case DRM_FORMAT_ABGR16161616F:
4070                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4071         case DRM_FORMAT_XRGB16161616F:
4072         case DRM_FORMAT_ARGB16161616F:
4073                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4074         case DRM_FORMAT_YUYV:
4075                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4076         case DRM_FORMAT_YVYU:
4077                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4078         case DRM_FORMAT_UYVY:
4079                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4080         case DRM_FORMAT_VYUY:
4081                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4082         case DRM_FORMAT_NV12:
4083                 return PLANE_CTL_FORMAT_NV12;
4084         case DRM_FORMAT_P010:
4085                 return PLANE_CTL_FORMAT_P010;
4086         case DRM_FORMAT_P012:
4087                 return PLANE_CTL_FORMAT_P012;
4088         case DRM_FORMAT_P016:
4089                 return PLANE_CTL_FORMAT_P016;
4090         case DRM_FORMAT_Y210:
4091                 return PLANE_CTL_FORMAT_Y210;
4092         case DRM_FORMAT_Y212:
4093                 return PLANE_CTL_FORMAT_Y212;
4094         case DRM_FORMAT_Y216:
4095                 return PLANE_CTL_FORMAT_Y216;
4096         case DRM_FORMAT_XVYU2101010:
4097                 return PLANE_CTL_FORMAT_Y410;
4098         case DRM_FORMAT_XVYU12_16161616:
4099                 return PLANE_CTL_FORMAT_Y412;
4100         case DRM_FORMAT_XVYU16161616:
4101                 return PLANE_CTL_FORMAT_Y416;
4102         default:
4103                 MISSING_CASE(pixel_format);
4104         }
4105
4106         return 0;
4107 }
4108
4109 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4110 {
4111         if (!plane_state->hw.fb->format->has_alpha)
4112                 return PLANE_CTL_ALPHA_DISABLE;
4113
4114         switch (plane_state->hw.pixel_blend_mode) {
4115         case DRM_MODE_BLEND_PIXEL_NONE:
4116                 return PLANE_CTL_ALPHA_DISABLE;
4117         case DRM_MODE_BLEND_PREMULTI:
4118                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4119         case DRM_MODE_BLEND_COVERAGE:
4120                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4121         default:
4122                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4123                 return PLANE_CTL_ALPHA_DISABLE;
4124         }
4125 }
4126
4127 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4128 {
4129         if (!plane_state->hw.fb->format->has_alpha)
4130                 return PLANE_COLOR_ALPHA_DISABLE;
4131
4132         switch (plane_state->hw.pixel_blend_mode) {
4133         case DRM_MODE_BLEND_PIXEL_NONE:
4134                 return PLANE_COLOR_ALPHA_DISABLE;
4135         case DRM_MODE_BLEND_PREMULTI:
4136                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4137         case DRM_MODE_BLEND_COVERAGE:
4138                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4139         default:
4140                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4141                 return PLANE_COLOR_ALPHA_DISABLE;
4142         }
4143 }
4144
4145 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4146 {
4147         switch (fb_modifier) {
4148         case DRM_FORMAT_MOD_LINEAR:
4149                 break;
4150         case I915_FORMAT_MOD_X_TILED:
4151                 return PLANE_CTL_TILED_X;
4152         case I915_FORMAT_MOD_Y_TILED:
4153                 return PLANE_CTL_TILED_Y;
4154         case I915_FORMAT_MOD_Y_TILED_CCS:
4155                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4156         case I915_FORMAT_MOD_Yf_TILED:
4157                 return PLANE_CTL_TILED_YF;
4158         case I915_FORMAT_MOD_Yf_TILED_CCS:
4159                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4160         default:
4161                 MISSING_CASE(fb_modifier);
4162         }
4163
4164         return 0;
4165 }
4166
4167 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4168 {
4169         switch (rotate) {
4170         case DRM_MODE_ROTATE_0:
4171                 break;
4172         /*
4173          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4174          * while i915 HW rotation is clockwise, thats why this swapping.
4175          */
4176         case DRM_MODE_ROTATE_90:
4177                 return PLANE_CTL_ROTATE_270;
4178         case DRM_MODE_ROTATE_180:
4179                 return PLANE_CTL_ROTATE_180;
4180         case DRM_MODE_ROTATE_270:
4181                 return PLANE_CTL_ROTATE_90;
4182         default:
4183                 MISSING_CASE(rotate);
4184         }
4185
4186         return 0;
4187 }
4188
4189 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4190 {
4191         switch (reflect) {
4192         case 0:
4193                 break;
4194         case DRM_MODE_REFLECT_X:
4195                 return PLANE_CTL_FLIP_HORIZONTAL;
4196         case DRM_MODE_REFLECT_Y:
4197         default:
4198                 MISSING_CASE(reflect);
4199         }
4200
4201         return 0;
4202 }
4203
4204 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4205 {
4206         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4207         u32 plane_ctl = 0;
4208
4209         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4210                 return plane_ctl;
4211
4212         if (crtc_state->gamma_enable)
4213                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4214
4215         if (crtc_state->csc_enable)
4216                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4217
4218         return plane_ctl;
4219 }
4220
4221 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4222                   const struct intel_plane_state *plane_state)
4223 {
4224         struct drm_i915_private *dev_priv =
4225                 to_i915(plane_state->uapi.plane->dev);
4226         const struct drm_framebuffer *fb = plane_state->hw.fb;
4227         unsigned int rotation = plane_state->hw.rotation;
4228         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4229         u32 plane_ctl;
4230
4231         plane_ctl = PLANE_CTL_ENABLE;
4232
4233         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4234                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4235                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4236
4237                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4238                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4239
4240                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4241                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4242         }
4243
4244         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4245         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4246         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4247
4248         if (INTEL_GEN(dev_priv) >= 10)
4249                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4250                                                 DRM_MODE_REFLECT_MASK);
4251
4252         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4253                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4254         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4255                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4256
4257         return plane_ctl;
4258 }
4259
4260 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4261 {
4262         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4263         u32 plane_color_ctl = 0;
4264
4265         if (INTEL_GEN(dev_priv) >= 11)
4266                 return plane_color_ctl;
4267
4268         if (crtc_state->gamma_enable)
4269                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4270
4271         if (crtc_state->csc_enable)
4272                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4273
4274         return plane_color_ctl;
4275 }
4276
4277 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4278                         const struct intel_plane_state *plane_state)
4279 {
4280         struct drm_i915_private *dev_priv =
4281                 to_i915(plane_state->uapi.plane->dev);
4282         const struct drm_framebuffer *fb = plane_state->hw.fb;
4283         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4284         u32 plane_color_ctl = 0;
4285
4286         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4287         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4288
4289         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4290                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4291                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4292                 else
4293                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4294
4295                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4296                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4297         } else if (fb->format->is_yuv) {
4298                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4299         }
4300
4301         return plane_color_ctl;
4302 }
4303
4304 static int
4305 __intel_display_resume(struct drm_device *dev,
4306                        struct drm_atomic_state *state,
4307                        struct drm_modeset_acquire_ctx *ctx)
4308 {
4309         struct drm_crtc_state *crtc_state;
4310         struct drm_crtc *crtc;
4311         int i, ret;
4312
4313         intel_modeset_setup_hw_state(dev, ctx);
4314         intel_vga_redisable(to_i915(dev));
4315
4316         if (!state)
4317                 return 0;
4318
4319         /*
4320          * We've duplicated the state, pointers to the old state are invalid.
4321          *
4322          * Don't attempt to use the old state until we commit the duplicated state.
4323          */
4324         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4325                 /*
4326                  * Force recalculation even if we restore
4327                  * current state. With fast modeset this may not result
4328                  * in a modeset when the state is compatible.
4329                  */
4330                 crtc_state->mode_changed = true;
4331         }
4332
4333         /* ignore any reset values/BIOS leftovers in the WM registers */
4334         if (!HAS_GMCH(to_i915(dev)))
4335                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4336
4337         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4338
4339         WARN_ON(ret == -EDEADLK);
4340         return ret;
4341 }
4342
4343 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4344 {
4345         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4346                 intel_has_gpu_reset(&dev_priv->gt));
4347 }
4348
4349 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4350 {
4351         struct drm_device *dev = &dev_priv->drm;
4352         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4353         struct drm_atomic_state *state;
4354         int ret;
4355
4356         /* reset doesn't touch the display */
4357         if (!i915_modparams.force_reset_modeset_test &&
4358             !gpu_reset_clobbers_display(dev_priv))
4359                 return;
4360
4361         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4362         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4363         smp_mb__after_atomic();
4364         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4365
4366         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4367                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4368                 intel_gt_set_wedged(&dev_priv->gt);
4369         }
4370
4371         /*
4372          * Need mode_config.mutex so that we don't
4373          * trample ongoing ->detect() and whatnot.
4374          */
4375         mutex_lock(&dev->mode_config.mutex);
4376         drm_modeset_acquire_init(ctx, 0);
4377         while (1) {
4378                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4379                 if (ret != -EDEADLK)
4380                         break;
4381
4382                 drm_modeset_backoff(ctx);
4383         }
4384         /*
4385          * Disabling the crtcs gracefully seems nicer. Also the
4386          * g33 docs say we should at least disable all the planes.
4387          */
4388         state = drm_atomic_helper_duplicate_state(dev, ctx);
4389         if (IS_ERR(state)) {
4390                 ret = PTR_ERR(state);
4391                 DRM_ERROR("Duplicating state failed with %i\n", ret);
4392                 return;
4393         }
4394
4395         ret = drm_atomic_helper_disable_all(dev, ctx);
4396         if (ret) {
4397                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4398                 drm_atomic_state_put(state);
4399                 return;
4400         }
4401
4402         dev_priv->modeset_restore_state = state;
4403         state->acquire_ctx = ctx;
4404 }
4405
4406 void intel_finish_reset(struct drm_i915_private *dev_priv)
4407 {
4408         struct drm_device *dev = &dev_priv->drm;
4409         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4410         struct drm_atomic_state *state;
4411         int ret;
4412
4413         /* reset doesn't touch the display */
4414         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4415                 return;
4416
4417         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4418         if (!state)
4419                 goto unlock;
4420
4421         /* reset doesn't touch the display */
4422         if (!gpu_reset_clobbers_display(dev_priv)) {
4423                 /* for testing only restore the display */
4424                 ret = __intel_display_resume(dev, state, ctx);
4425                 if (ret)
4426                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4427         } else {
4428                 /*
4429                  * The display has been reset as well,
4430                  * so need a full re-initialization.
4431                  */
4432                 intel_pps_unlock_regs_wa(dev_priv);
4433                 intel_modeset_init_hw(dev_priv);
4434                 intel_init_clock_gating(dev_priv);
4435
4436                 spin_lock_irq(&dev_priv->irq_lock);
4437                 if (dev_priv->display.hpd_irq_setup)
4438                         dev_priv->display.hpd_irq_setup(dev_priv);
4439                 spin_unlock_irq(&dev_priv->irq_lock);
4440
4441                 ret = __intel_display_resume(dev, state, ctx);
4442                 if (ret)
4443                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4444
4445                 intel_hpd_init(dev_priv);
4446         }
4447
4448         drm_atomic_state_put(state);
4449 unlock:
4450         drm_modeset_drop_locks(ctx);
4451         drm_modeset_acquire_fini(ctx);
4452         mutex_unlock(&dev->mode_config.mutex);
4453
4454         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4455 }
4456
4457 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4458 {
4459         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4460         enum pipe pipe = crtc->pipe;
4461         u32 tmp;
4462
4463         tmp = I915_READ(PIPE_CHICKEN(pipe));
4464
4465         /*
4466          * Display WA #1153: icl
4467          * enable hardware to bypass the alpha math
4468          * and rounding for per-pixel values 00 and 0xff
4469          */
4470         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4471         /*
4472          * Display WA # 1605353570: icl
4473          * Set the pixel rounding bit to 1 for allowing
4474          * passthrough of Frame buffer pixels unmodified
4475          * across pipe
4476          */
4477         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4478         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4479 }
4480
4481 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4482 {
4483         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4484         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4485         u32 trans_ddi_func_ctl2_val;
4486         u8 master_select;
4487
4488         /*
4489          * Configure the master select and enable Transcoder Port Sync for
4490          * Slave CRTCs transcoder.
4491          */
4492         if (crtc_state->master_transcoder == INVALID_TRANSCODER)
4493                 return;
4494
4495         if (crtc_state->master_transcoder == TRANSCODER_EDP)
4496                 master_select = 0;
4497         else
4498                 master_select = crtc_state->master_transcoder + 1;
4499
4500         /* Set the master select bits for Tranascoder Port Sync */
4501         trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
4502                                    PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
4503                 PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
4504         /* Enable Transcoder Port Sync */
4505         trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
4506
4507         I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
4508                    trans_ddi_func_ctl2_val);
4509 }
4510
4511 static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
4512 {
4513         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4514         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4515         i915_reg_t reg;
4516         u32 trans_ddi_func_ctl2_val;
4517
4518         if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
4519                 return;
4520
4521         DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
4522                       transcoder_name(old_crtc_state->cpu_transcoder));
4523
4524         reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
4525         trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
4526                                     PORT_SYNC_MODE_MASTER_SELECT_MASK);
4527         I915_WRITE(reg, trans_ddi_func_ctl2_val);
4528 }
4529
4530 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4531 {
4532         struct drm_device *dev = crtc->base.dev;
4533         struct drm_i915_private *dev_priv = to_i915(dev);
4534         enum pipe pipe = crtc->pipe;
4535         i915_reg_t reg;
4536         u32 temp;
4537
4538         /* enable normal train */
4539         reg = FDI_TX_CTL(pipe);
4540         temp = I915_READ(reg);
4541         if (IS_IVYBRIDGE(dev_priv)) {
4542                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4543                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4544         } else {
4545                 temp &= ~FDI_LINK_TRAIN_NONE;
4546                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4547         }
4548         I915_WRITE(reg, temp);
4549
4550         reg = FDI_RX_CTL(pipe);
4551         temp = I915_READ(reg);
4552         if (HAS_PCH_CPT(dev_priv)) {
4553                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4554                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4555         } else {
4556                 temp &= ~FDI_LINK_TRAIN_NONE;
4557                 temp |= FDI_LINK_TRAIN_NONE;
4558         }
4559         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4560
4561         /* wait one idle pattern time */
4562         POSTING_READ(reg);
4563         udelay(1000);
4564
4565         /* IVB wants error correction enabled */
4566         if (IS_IVYBRIDGE(dev_priv))
4567                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4568                            FDI_FE_ERRC_ENABLE);
4569 }
4570
4571 /* The FDI link training functions for ILK/Ibexpeak. */
4572 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4573                                     const struct intel_crtc_state *crtc_state)
4574 {
4575         struct drm_device *dev = crtc->base.dev;
4576         struct drm_i915_private *dev_priv = to_i915(dev);
4577         enum pipe pipe = crtc->pipe;
4578         i915_reg_t reg;
4579         u32 temp, tries;
4580
4581         /* FDI needs bits from pipe first */
4582         assert_pipe_enabled(dev_priv, pipe);
4583
4584         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4585            for train result */
4586         reg = FDI_RX_IMR(pipe);
4587         temp = I915_READ(reg);
4588         temp &= ~FDI_RX_SYMBOL_LOCK;
4589         temp &= ~FDI_RX_BIT_LOCK;
4590         I915_WRITE(reg, temp);
4591         I915_READ(reg);
4592         udelay(150);
4593
4594         /* enable CPU FDI TX and PCH FDI RX */
4595         reg = FDI_TX_CTL(pipe);
4596         temp = I915_READ(reg);
4597         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4598         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4599         temp &= ~FDI_LINK_TRAIN_NONE;
4600         temp |= FDI_LINK_TRAIN_PATTERN_1;
4601         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4602
4603         reg = FDI_RX_CTL(pipe);
4604         temp = I915_READ(reg);
4605         temp &= ~FDI_LINK_TRAIN_NONE;
4606         temp |= FDI_LINK_TRAIN_PATTERN_1;
4607         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4608
4609         POSTING_READ(reg);
4610         udelay(150);
4611
4612         /* Ironlake workaround, enable clock pointer after FDI enable*/
4613         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4614         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4615                    FDI_RX_PHASE_SYNC_POINTER_EN);
4616
4617         reg = FDI_RX_IIR(pipe);
4618         for (tries = 0; tries < 5; tries++) {
4619                 temp = I915_READ(reg);
4620                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4621
4622                 if ((temp & FDI_RX_BIT_LOCK)) {
4623                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4624                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4625                         break;
4626                 }
4627         }
4628         if (tries == 5)
4629                 DRM_ERROR("FDI train 1 fail!\n");
4630
4631         /* Train 2 */
4632         reg = FDI_TX_CTL(pipe);
4633         temp = I915_READ(reg);
4634         temp &= ~FDI_LINK_TRAIN_NONE;
4635         temp |= FDI_LINK_TRAIN_PATTERN_2;
4636         I915_WRITE(reg, temp);
4637
4638         reg = FDI_RX_CTL(pipe);
4639         temp = I915_READ(reg);
4640         temp &= ~FDI_LINK_TRAIN_NONE;
4641         temp |= FDI_LINK_TRAIN_PATTERN_2;
4642         I915_WRITE(reg, temp);
4643
4644         POSTING_READ(reg);
4645         udelay(150);
4646
4647         reg = FDI_RX_IIR(pipe);
4648         for (tries = 0; tries < 5; tries++) {
4649                 temp = I915_READ(reg);
4650                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4651
4652                 if (temp & FDI_RX_SYMBOL_LOCK) {
4653                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4654                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4655                         break;
4656                 }
4657         }
4658         if (tries == 5)
4659                 DRM_ERROR("FDI train 2 fail!\n");
4660
4661         DRM_DEBUG_KMS("FDI train done\n");
4662
4663 }
4664
4665 static const int snb_b_fdi_train_param[] = {
4666         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4667         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4668         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4669         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4670 };
4671
4672 /* The FDI link training functions for SNB/Cougarpoint. */
4673 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4674                                 const struct intel_crtc_state *crtc_state)
4675 {
4676         struct drm_device *dev = crtc->base.dev;
4677         struct drm_i915_private *dev_priv = to_i915(dev);
4678         enum pipe pipe = crtc->pipe;
4679         i915_reg_t reg;
4680         u32 temp, i, retry;
4681
4682         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4683            for train result */
4684         reg = FDI_RX_IMR(pipe);
4685         temp = I915_READ(reg);
4686         temp &= ~FDI_RX_SYMBOL_LOCK;
4687         temp &= ~FDI_RX_BIT_LOCK;
4688         I915_WRITE(reg, temp);
4689
4690         POSTING_READ(reg);
4691         udelay(150);
4692
4693         /* enable CPU FDI TX and PCH FDI RX */
4694         reg = FDI_TX_CTL(pipe);
4695         temp = I915_READ(reg);
4696         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4697         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4698         temp &= ~FDI_LINK_TRAIN_NONE;
4699         temp |= FDI_LINK_TRAIN_PATTERN_1;
4700         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4701         /* SNB-B */
4702         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4703         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4704
4705         I915_WRITE(FDI_RX_MISC(pipe),
4706                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4707
4708         reg = FDI_RX_CTL(pipe);
4709         temp = I915_READ(reg);
4710         if (HAS_PCH_CPT(dev_priv)) {
4711                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4712                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4713         } else {
4714                 temp &= ~FDI_LINK_TRAIN_NONE;
4715                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4716         }
4717         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4718
4719         POSTING_READ(reg);
4720         udelay(150);
4721
4722         for (i = 0; i < 4; i++) {
4723                 reg = FDI_TX_CTL(pipe);
4724                 temp = I915_READ(reg);
4725                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4726                 temp |= snb_b_fdi_train_param[i];
4727                 I915_WRITE(reg, temp);
4728
4729                 POSTING_READ(reg);
4730                 udelay(500);
4731
4732                 for (retry = 0; retry < 5; retry++) {
4733                         reg = FDI_RX_IIR(pipe);
4734                         temp = I915_READ(reg);
4735                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4736                         if (temp & FDI_RX_BIT_LOCK) {
4737                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4738                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4739                                 break;
4740                         }
4741                         udelay(50);
4742                 }
4743                 if (retry < 5)
4744                         break;
4745         }
4746         if (i == 4)
4747                 DRM_ERROR("FDI train 1 fail!\n");
4748
4749         /* Train 2 */
4750         reg = FDI_TX_CTL(pipe);
4751         temp = I915_READ(reg);
4752         temp &= ~FDI_LINK_TRAIN_NONE;
4753         temp |= FDI_LINK_TRAIN_PATTERN_2;
4754         if (IS_GEN(dev_priv, 6)) {
4755                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4756                 /* SNB-B */
4757                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4758         }
4759         I915_WRITE(reg, temp);
4760
4761         reg = FDI_RX_CTL(pipe);
4762         temp = I915_READ(reg);
4763         if (HAS_PCH_CPT(dev_priv)) {
4764                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4765                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4766         } else {
4767                 temp &= ~FDI_LINK_TRAIN_NONE;
4768                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4769         }
4770         I915_WRITE(reg, temp);
4771
4772         POSTING_READ(reg);
4773         udelay(150);
4774
4775         for (i = 0; i < 4; i++) {
4776                 reg = FDI_TX_CTL(pipe);
4777                 temp = I915_READ(reg);
4778                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4779                 temp |= snb_b_fdi_train_param[i];
4780                 I915_WRITE(reg, temp);
4781
4782                 POSTING_READ(reg);
4783                 udelay(500);
4784
4785                 for (retry = 0; retry < 5; retry++) {
4786                         reg = FDI_RX_IIR(pipe);
4787                         temp = I915_READ(reg);
4788                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4789                         if (temp & FDI_RX_SYMBOL_LOCK) {
4790                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4791                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4792                                 break;
4793                         }
4794                         udelay(50);
4795                 }
4796                 if (retry < 5)
4797                         break;
4798         }
4799         if (i == 4)
4800                 DRM_ERROR("FDI train 2 fail!\n");
4801
4802         DRM_DEBUG_KMS("FDI train done.\n");
4803 }
4804
4805 /* Manual link training for Ivy Bridge A0 parts */
4806 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4807                                       const struct intel_crtc_state *crtc_state)
4808 {
4809         struct drm_device *dev = crtc->base.dev;
4810         struct drm_i915_private *dev_priv = to_i915(dev);
4811         enum pipe pipe = crtc->pipe;
4812         i915_reg_t reg;
4813         u32 temp, i, j;
4814
4815         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4816            for train result */
4817         reg = FDI_RX_IMR(pipe);
4818         temp = I915_READ(reg);
4819         temp &= ~FDI_RX_SYMBOL_LOCK;
4820         temp &= ~FDI_RX_BIT_LOCK;
4821         I915_WRITE(reg, temp);
4822
4823         POSTING_READ(reg);
4824         udelay(150);
4825
4826         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4827                       I915_READ(FDI_RX_IIR(pipe)));
4828
4829         /* Try each vswing and preemphasis setting twice before moving on */
4830         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4831                 /* disable first in case we need to retry */
4832                 reg = FDI_TX_CTL(pipe);
4833                 temp = I915_READ(reg);
4834                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4835                 temp &= ~FDI_TX_ENABLE;
4836                 I915_WRITE(reg, temp);
4837
4838                 reg = FDI_RX_CTL(pipe);
4839                 temp = I915_READ(reg);
4840                 temp &= ~FDI_LINK_TRAIN_AUTO;
4841                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4842                 temp &= ~FDI_RX_ENABLE;
4843                 I915_WRITE(reg, temp);
4844
4845                 /* enable CPU FDI TX and PCH FDI RX */
4846                 reg = FDI_TX_CTL(pipe);
4847                 temp = I915_READ(reg);
4848                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4849                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4850                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4851                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4852                 temp |= snb_b_fdi_train_param[j/2];
4853                 temp |= FDI_COMPOSITE_SYNC;
4854                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4855
4856                 I915_WRITE(FDI_RX_MISC(pipe),
4857                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4858
4859                 reg = FDI_RX_CTL(pipe);
4860                 temp = I915_READ(reg);
4861                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4862                 temp |= FDI_COMPOSITE_SYNC;
4863                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4864
4865                 POSTING_READ(reg);
4866                 udelay(1); /* should be 0.5us */
4867
4868                 for (i = 0; i < 4; i++) {
4869                         reg = FDI_RX_IIR(pipe);
4870                         temp = I915_READ(reg);
4871                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4872
4873                         if (temp & FDI_RX_BIT_LOCK ||
4874                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4875                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4876                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4877                                               i);
4878                                 break;
4879                         }
4880                         udelay(1); /* should be 0.5us */
4881                 }
4882                 if (i == 4) {
4883                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4884                         continue;
4885                 }
4886
4887                 /* Train 2 */
4888                 reg = FDI_TX_CTL(pipe);
4889                 temp = I915_READ(reg);
4890                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4891                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4892                 I915_WRITE(reg, temp);
4893
4894                 reg = FDI_RX_CTL(pipe);
4895                 temp = I915_READ(reg);
4896                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4897                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4898                 I915_WRITE(reg, temp);
4899
4900                 POSTING_READ(reg);
4901                 udelay(2); /* should be 1.5us */
4902
4903                 for (i = 0; i < 4; i++) {
4904                         reg = FDI_RX_IIR(pipe);
4905                         temp = I915_READ(reg);
4906                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4907
4908                         if (temp & FDI_RX_SYMBOL_LOCK ||
4909                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4910                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4911                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4912                                               i);
4913                                 goto train_done;
4914                         }
4915                         udelay(2); /* should be 1.5us */
4916                 }
4917                 if (i == 4)
4918                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4919         }
4920
4921 train_done:
4922         DRM_DEBUG_KMS("FDI train done.\n");
4923 }
4924
4925 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4926 {
4927         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4928         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4929         enum pipe pipe = intel_crtc->pipe;
4930         i915_reg_t reg;
4931         u32 temp;
4932
4933         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4934         reg = FDI_RX_CTL(pipe);
4935         temp = I915_READ(reg);
4936         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4937         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4938         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4939         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4940
4941         POSTING_READ(reg);
4942         udelay(200);
4943
4944         /* Switch from Rawclk to PCDclk */
4945         temp = I915_READ(reg);
4946         I915_WRITE(reg, temp | FDI_PCDCLK);
4947
4948         POSTING_READ(reg);
4949         udelay(200);
4950
4951         /* Enable CPU FDI TX PLL, always on for Ironlake */
4952         reg = FDI_TX_CTL(pipe);
4953         temp = I915_READ(reg);
4954         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4955                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4956
4957                 POSTING_READ(reg);
4958                 udelay(100);
4959         }
4960 }
4961
4962 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4963 {
4964         struct drm_device *dev = intel_crtc->base.dev;
4965         struct drm_i915_private *dev_priv = to_i915(dev);
4966         enum pipe pipe = intel_crtc->pipe;
4967         i915_reg_t reg;
4968         u32 temp;
4969
4970         /* Switch from PCDclk to Rawclk */
4971         reg = FDI_RX_CTL(pipe);
4972         temp = I915_READ(reg);
4973         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4974
4975         /* Disable CPU FDI TX PLL */
4976         reg = FDI_TX_CTL(pipe);
4977         temp = I915_READ(reg);
4978         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4979
4980         POSTING_READ(reg);
4981         udelay(100);
4982
4983         reg = FDI_RX_CTL(pipe);
4984         temp = I915_READ(reg);
4985         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4986
4987         /* Wait for the clocks to turn off. */
4988         POSTING_READ(reg);
4989         udelay(100);
4990 }
4991
4992 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4993 {
4994         struct drm_device *dev = crtc->dev;
4995         struct drm_i915_private *dev_priv = to_i915(dev);
4996         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4997         enum pipe pipe = intel_crtc->pipe;
4998         i915_reg_t reg;
4999         u32 temp;
5000
5001         /* disable CPU FDI tx and PCH FDI rx */
5002         reg = FDI_TX_CTL(pipe);
5003         temp = I915_READ(reg);
5004         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
5005         POSTING_READ(reg);
5006
5007         reg = FDI_RX_CTL(pipe);
5008         temp = I915_READ(reg);
5009         temp &= ~(0x7 << 16);
5010         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5011         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
5012
5013         POSTING_READ(reg);
5014         udelay(100);
5015
5016         /* Ironlake workaround, disable clock pointer after downing FDI */
5017         if (HAS_PCH_IBX(dev_priv))
5018                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
5019
5020         /* still set train pattern 1 */
5021         reg = FDI_TX_CTL(pipe);
5022         temp = I915_READ(reg);
5023         temp &= ~FDI_LINK_TRAIN_NONE;
5024         temp |= FDI_LINK_TRAIN_PATTERN_1;
5025         I915_WRITE(reg, temp);
5026
5027         reg = FDI_RX_CTL(pipe);
5028         temp = I915_READ(reg);
5029         if (HAS_PCH_CPT(dev_priv)) {
5030                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5031                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5032         } else {
5033                 temp &= ~FDI_LINK_TRAIN_NONE;
5034                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5035         }
5036         /* BPC in FDI rx is consistent with that in PIPECONF */
5037         temp &= ~(0x07 << 16);
5038         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5039         I915_WRITE(reg, temp);
5040
5041         POSTING_READ(reg);
5042         udelay(100);
5043 }
5044
5045 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5046 {
5047         struct drm_crtc *crtc;
5048         bool cleanup_done;
5049
5050         drm_for_each_crtc(crtc, &dev_priv->drm) {
5051                 struct drm_crtc_commit *commit;
5052                 spin_lock(&crtc->commit_lock);
5053                 commit = list_first_entry_or_null(&crtc->commit_list,
5054                                                   struct drm_crtc_commit, commit_entry);
5055                 cleanup_done = commit ?
5056                         try_wait_for_completion(&commit->cleanup_done) : true;
5057                 spin_unlock(&crtc->commit_lock);
5058
5059                 if (cleanup_done)
5060                         continue;
5061
5062                 drm_crtc_wait_one_vblank(crtc);
5063
5064                 return true;
5065         }
5066
5067         return false;
5068 }
5069
5070 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5071 {
5072         u32 temp;
5073
5074         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
5075
5076         mutex_lock(&dev_priv->sb_lock);
5077
5078         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5079         temp |= SBI_SSCCTL_DISABLE;
5080         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5081
5082         mutex_unlock(&dev_priv->sb_lock);
5083 }
5084
5085 /* Program iCLKIP clock to the desired frequency */
5086 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5087 {
5088         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5089         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5090         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5091         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5092         u32 temp;
5093
5094         lpt_disable_iclkip(dev_priv);
5095
5096         /* The iCLK virtual clock root frequency is in MHz,
5097          * but the adjusted_mode->crtc_clock in in KHz. To get the
5098          * divisors, it is necessary to divide one by another, so we
5099          * convert the virtual clock precision to KHz here for higher
5100          * precision.
5101          */
5102         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5103                 u32 iclk_virtual_root_freq = 172800 * 1000;
5104                 u32 iclk_pi_range = 64;
5105                 u32 desired_divisor;
5106
5107                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5108                                                     clock << auxdiv);
5109                 divsel = (desired_divisor / iclk_pi_range) - 2;
5110                 phaseinc = desired_divisor % iclk_pi_range;
5111
5112                 /*
5113                  * Near 20MHz is a corner case which is
5114                  * out of range for the 7-bit divisor
5115                  */
5116                 if (divsel <= 0x7f)
5117                         break;
5118         }
5119
5120         /* This should not happen with any sane values */
5121         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5122                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5123         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5124                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5125
5126         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5127                         clock,
5128                         auxdiv,
5129                         divsel,
5130                         phasedir,
5131                         phaseinc);
5132
5133         mutex_lock(&dev_priv->sb_lock);
5134
5135         /* Program SSCDIVINTPHASE6 */
5136         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5137         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5138         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5139         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5140         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5141         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5142         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5143         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5144
5145         /* Program SSCAUXDIV */
5146         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5147         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5148         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5149         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5150
5151         /* Enable modulator and associated divider */
5152         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5153         temp &= ~SBI_SSCCTL_DISABLE;
5154         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5155
5156         mutex_unlock(&dev_priv->sb_lock);
5157
5158         /* Wait for initialization time */
5159         udelay(24);
5160
5161         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5162 }
5163
5164 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5165 {
5166         u32 divsel, phaseinc, auxdiv;
5167         u32 iclk_virtual_root_freq = 172800 * 1000;
5168         u32 iclk_pi_range = 64;
5169         u32 desired_divisor;
5170         u32 temp;
5171
5172         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5173                 return 0;
5174
5175         mutex_lock(&dev_priv->sb_lock);
5176
5177         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5178         if (temp & SBI_SSCCTL_DISABLE) {
5179                 mutex_unlock(&dev_priv->sb_lock);
5180                 return 0;
5181         }
5182
5183         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5184         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5185                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5186         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5187                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5188
5189         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5190         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5191                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5192
5193         mutex_unlock(&dev_priv->sb_lock);
5194
5195         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5196
5197         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5198                                  desired_divisor << auxdiv);
5199 }
5200
5201 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5202                                                 enum pipe pch_transcoder)
5203 {
5204         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5205         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5206         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5207
5208         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5209                    I915_READ(HTOTAL(cpu_transcoder)));
5210         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5211                    I915_READ(HBLANK(cpu_transcoder)));
5212         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5213                    I915_READ(HSYNC(cpu_transcoder)));
5214
5215         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5216                    I915_READ(VTOTAL(cpu_transcoder)));
5217         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5218                    I915_READ(VBLANK(cpu_transcoder)));
5219         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5220                    I915_READ(VSYNC(cpu_transcoder)));
5221         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5222                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
5223 }
5224
5225 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5226 {
5227         u32 temp;
5228
5229         temp = I915_READ(SOUTH_CHICKEN1);
5230         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5231                 return;
5232
5233         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5234         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5235
5236         temp &= ~FDI_BC_BIFURCATION_SELECT;
5237         if (enable)
5238                 temp |= FDI_BC_BIFURCATION_SELECT;
5239
5240         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5241         I915_WRITE(SOUTH_CHICKEN1, temp);
5242         POSTING_READ(SOUTH_CHICKEN1);
5243 }
5244
5245 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5246 {
5247         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5248         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5249
5250         switch (crtc->pipe) {
5251         case PIPE_A:
5252                 break;
5253         case PIPE_B:
5254                 if (crtc_state->fdi_lanes > 2)
5255                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5256                 else
5257                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5258
5259                 break;
5260         case PIPE_C:
5261                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5262
5263                 break;
5264         default:
5265                 BUG();
5266         }
5267 }
5268
5269 /*
5270  * Finds the encoder associated with the given CRTC. This can only be
5271  * used when we know that the CRTC isn't feeding multiple encoders!
5272  */
5273 static struct intel_encoder *
5274 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5275                            const struct intel_crtc_state *crtc_state)
5276 {
5277         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5278         const struct drm_connector_state *connector_state;
5279         const struct drm_connector *connector;
5280         struct intel_encoder *encoder = NULL;
5281         int num_encoders = 0;
5282         int i;
5283
5284         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5285                 if (connector_state->crtc != &crtc->base)
5286                         continue;
5287
5288                 encoder = to_intel_encoder(connector_state->best_encoder);
5289                 num_encoders++;
5290         }
5291
5292         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5293              num_encoders, pipe_name(crtc->pipe));
5294
5295         return encoder;
5296 }
5297
5298 /*
5299  * Enable PCH resources required for PCH ports:
5300  *   - PCH PLLs
5301  *   - FDI training & RX/TX
5302  *   - update transcoder timings
5303  *   - DP transcoding bits
5304  *   - transcoder
5305  */
5306 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5307                                 const struct intel_crtc_state *crtc_state)
5308 {
5309         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5310         struct drm_device *dev = crtc->base.dev;
5311         struct drm_i915_private *dev_priv = to_i915(dev);
5312         enum pipe pipe = crtc->pipe;
5313         u32 temp;
5314
5315         assert_pch_transcoder_disabled(dev_priv, pipe);
5316
5317         if (IS_IVYBRIDGE(dev_priv))
5318                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5319
5320         /* Write the TU size bits before fdi link training, so that error
5321          * detection works. */
5322         I915_WRITE(FDI_RX_TUSIZE1(pipe),
5323                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5324
5325         /* For PCH output, training FDI link */
5326         dev_priv->display.fdi_link_train(crtc, crtc_state);
5327
5328         /* We need to program the right clock selection before writing the pixel
5329          * mutliplier into the DPLL. */
5330         if (HAS_PCH_CPT(dev_priv)) {
5331                 u32 sel;
5332
5333                 temp = I915_READ(PCH_DPLL_SEL);
5334                 temp |= TRANS_DPLL_ENABLE(pipe);
5335                 sel = TRANS_DPLLB_SEL(pipe);
5336                 if (crtc_state->shared_dpll ==
5337                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5338                         temp |= sel;
5339                 else
5340                         temp &= ~sel;
5341                 I915_WRITE(PCH_DPLL_SEL, temp);
5342         }
5343
5344         /* XXX: pch pll's can be enabled any time before we enable the PCH
5345          * transcoder, and we actually should do this to not upset any PCH
5346          * transcoder that already use the clock when we share it.
5347          *
5348          * Note that enable_shared_dpll tries to do the right thing, but
5349          * get_shared_dpll unconditionally resets the pll - we need that to have
5350          * the right LVDS enable sequence. */
5351         intel_enable_shared_dpll(crtc_state);
5352
5353         /* set transcoder timing, panel must allow it */
5354         assert_panel_unlocked(dev_priv, pipe);
5355         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5356
5357         intel_fdi_normal_train(crtc);
5358
5359         /* For PCH DP, enable TRANS_DP_CTL */
5360         if (HAS_PCH_CPT(dev_priv) &&
5361             intel_crtc_has_dp_encoder(crtc_state)) {
5362                 const struct drm_display_mode *adjusted_mode =
5363                         &crtc_state->hw.adjusted_mode;
5364                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5365                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5366                 enum port port;
5367
5368                 temp = I915_READ(reg);
5369                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5370                           TRANS_DP_SYNC_MASK |
5371                           TRANS_DP_BPC_MASK);
5372                 temp |= TRANS_DP_OUTPUT_ENABLE;
5373                 temp |= bpc << 9; /* same format but at 11:9 */
5374
5375                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5376                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5377                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5378                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5379
5380                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5381                 WARN_ON(port < PORT_B || port > PORT_D);
5382                 temp |= TRANS_DP_PORT_SEL(port);
5383
5384                 I915_WRITE(reg, temp);
5385         }
5386
5387         ironlake_enable_pch_transcoder(crtc_state);
5388 }
5389
5390 static void lpt_pch_enable(const struct intel_atomic_state *state,
5391                            const struct intel_crtc_state *crtc_state)
5392 {
5393         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5394         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5395         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5396
5397         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5398
5399         lpt_program_iclkip(crtc_state);
5400
5401         /* Set transcoder timing. */
5402         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5403
5404         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5405 }
5406
5407 static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
5408 {
5409         struct drm_i915_private *dev_priv = to_i915(dev);
5410         i915_reg_t dslreg = PIPEDSL(pipe);
5411         u32 temp;
5412
5413         temp = I915_READ(dslreg);
5414         udelay(500);
5415         if (wait_for(I915_READ(dslreg) != temp, 5)) {
5416                 if (wait_for(I915_READ(dslreg) != temp, 5))
5417                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5418         }
5419 }
5420
5421 /*
5422  * The hardware phase 0.0 refers to the center of the pixel.
5423  * We want to start from the top/left edge which is phase
5424  * -0.5. That matches how the hardware calculates the scaling
5425  * factors (from top-left of the first pixel to bottom-right
5426  * of the last pixel, as opposed to the pixel centers).
5427  *
5428  * For 4:2:0 subsampled chroma planes we obviously have to
5429  * adjust that so that the chroma sample position lands in
5430  * the right spot.
5431  *
5432  * Note that for packed YCbCr 4:2:2 formats there is no way to
5433  * control chroma siting. The hardware simply replicates the
5434  * chroma samples for both of the luma samples, and thus we don't
5435  * actually get the expected MPEG2 chroma siting convention :(
5436  * The same behaviour is observed on pre-SKL platforms as well.
5437  *
5438  * Theory behind the formula (note that we ignore sub-pixel
5439  * source coordinates):
5440  * s = source sample position
5441  * d = destination sample position
5442  *
5443  * Downscaling 4:1:
5444  * -0.5
5445  * | 0.0
5446  * | |     1.5 (initial phase)
5447  * | |     |
5448  * v v     v
5449  * | s | s | s | s |
5450  * |       d       |
5451  *
5452  * Upscaling 1:4:
5453  * -0.5
5454  * | -0.375 (initial phase)
5455  * | |     0.0
5456  * | |     |
5457  * v v     v
5458  * |       s       |
5459  * | d | d | d | d |
5460  */
5461 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5462 {
5463         int phase = -0x8000;
5464         u16 trip = 0;
5465
5466         if (chroma_cosited)
5467                 phase += (sub - 1) * 0x8000 / sub;
5468
5469         phase += scale / (2 * sub);
5470
5471         /*
5472          * Hardware initial phase limited to [-0.5:1.5].
5473          * Since the max hardware scale factor is 3.0, we
5474          * should never actually excdeed 1.0 here.
5475          */
5476         WARN_ON(phase < -0x8000 || phase > 0x18000);
5477
5478         if (phase < 0)
5479                 phase = 0x10000 + phase;
5480         else
5481                 trip = PS_PHASE_TRIP;
5482
5483         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5484 }
5485
5486 #define SKL_MIN_SRC_W 8
5487 #define SKL_MAX_SRC_W 4096
5488 #define SKL_MIN_SRC_H 8
5489 #define SKL_MAX_SRC_H 4096
5490 #define SKL_MIN_DST_W 8
5491 #define SKL_MAX_DST_W 4096
5492 #define SKL_MIN_DST_H 8
5493 #define SKL_MAX_DST_H 4096
5494 #define ICL_MAX_SRC_W 5120
5495 #define ICL_MAX_SRC_H 4096
5496 #define ICL_MAX_DST_W 5120
5497 #define ICL_MAX_DST_H 4096
5498 #define SKL_MIN_YUV_420_SRC_W 16
5499 #define SKL_MIN_YUV_420_SRC_H 16
5500
5501 static int
5502 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5503                   unsigned int scaler_user, int *scaler_id,
5504                   int src_w, int src_h, int dst_w, int dst_h,
5505                   const struct drm_format_info *format, bool need_scaler)
5506 {
5507         struct intel_crtc_scaler_state *scaler_state =
5508                 &crtc_state->scaler_state;
5509         struct intel_crtc *intel_crtc =
5510                 to_intel_crtc(crtc_state->uapi.crtc);
5511         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5512         const struct drm_display_mode *adjusted_mode =
5513                 &crtc_state->hw.adjusted_mode;
5514
5515         /*
5516          * Src coordinates are already rotated by 270 degrees for
5517          * the 90/270 degree plane rotation cases (to match the
5518          * GTT mapping), hence no need to account for rotation here.
5519          */
5520         if (src_w != dst_w || src_h != dst_h)
5521                 need_scaler = true;
5522
5523         /*
5524          * Scaling/fitting not supported in IF-ID mode in GEN9+
5525          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5526          * Once NV12 is enabled, handle it here while allocating scaler
5527          * for NV12.
5528          */
5529         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
5530             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5531                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5532                 return -EINVAL;
5533         }
5534
5535         /*
5536          * if plane is being disabled or scaler is no more required or force detach
5537          *  - free scaler binded to this plane/crtc
5538          *  - in order to do this, update crtc->scaler_usage
5539          *
5540          * Here scaler state in crtc_state is set free so that
5541          * scaler can be assigned to other user. Actual register
5542          * update to free the scaler is done in plane/panel-fit programming.
5543          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5544          */
5545         if (force_detach || !need_scaler) {
5546                 if (*scaler_id >= 0) {
5547                         scaler_state->scaler_users &= ~(1 << scaler_user);
5548                         scaler_state->scalers[*scaler_id].in_use = 0;
5549
5550                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5551                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5552                                 intel_crtc->pipe, scaler_user, *scaler_id,
5553                                 scaler_state->scaler_users);
5554                         *scaler_id = -1;
5555                 }
5556                 return 0;
5557         }
5558
5559         if (format && drm_format_info_is_yuv_semiplanar(format) &&
5560             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5561                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5562                 return -EINVAL;
5563         }
5564
5565         /* range checks */
5566         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5567             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5568             (INTEL_GEN(dev_priv) >= 11 &&
5569              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5570               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5571             (INTEL_GEN(dev_priv) < 11 &&
5572              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5573               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5574                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5575                         "size is out of scaler range\n",
5576                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5577                 return -EINVAL;
5578         }
5579
5580         /* mark this plane as a scaler user in crtc_state */
5581         scaler_state->scaler_users |= (1 << scaler_user);
5582         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5583                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5584                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5585                 scaler_state->scaler_users);
5586
5587         return 0;
5588 }
5589
5590 /**
5591  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5592  *
5593  * @state: crtc's scaler state
5594  *
5595  * Return
5596  *     0 - scaler_usage updated successfully
5597  *    error - requested scaling cannot be supported or other error condition
5598  */
5599 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5600 {
5601         const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
5602         bool need_scaler = false;
5603
5604         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5605                 need_scaler = true;
5606
5607         return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
5608                                  &state->scaler_state.scaler_id,
5609                                  state->pipe_src_w, state->pipe_src_h,
5610                                  adjusted_mode->crtc_hdisplay,
5611                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5612 }
5613
5614 /**
5615  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5616  * @crtc_state: crtc's scaler state
5617  * @plane_state: atomic plane state to update
5618  *
5619  * Return
5620  *     0 - scaler_usage updated successfully
5621  *    error - requested scaling cannot be supported or other error condition
5622  */
5623 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5624                                    struct intel_plane_state *plane_state)
5625 {
5626         struct intel_plane *intel_plane =
5627                 to_intel_plane(plane_state->uapi.plane);
5628         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5629         struct drm_framebuffer *fb = plane_state->hw.fb;
5630         int ret;
5631         bool force_detach = !fb || !plane_state->uapi.visible;
5632         bool need_scaler = false;
5633
5634         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5635         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5636             fb && drm_format_info_is_yuv_semiplanar(fb->format))
5637                 need_scaler = true;
5638
5639         ret = skl_update_scaler(crtc_state, force_detach,
5640                                 drm_plane_index(&intel_plane->base),
5641                                 &plane_state->scaler_id,
5642                                 drm_rect_width(&plane_state->uapi.src) >> 16,
5643                                 drm_rect_height(&plane_state->uapi.src) >> 16,
5644                                 drm_rect_width(&plane_state->uapi.dst),
5645                                 drm_rect_height(&plane_state->uapi.dst),
5646                                 fb ? fb->format : NULL, need_scaler);
5647
5648         if (ret || plane_state->scaler_id < 0)
5649                 return ret;
5650
5651         /* check colorkey */
5652         if (plane_state->ckey.flags) {
5653                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5654                               intel_plane->base.base.id,
5655                               intel_plane->base.name);
5656                 return -EINVAL;
5657         }
5658
5659         /* Check src format */
5660         switch (fb->format->format) {
5661         case DRM_FORMAT_RGB565:
5662         case DRM_FORMAT_XBGR8888:
5663         case DRM_FORMAT_XRGB8888:
5664         case DRM_FORMAT_ABGR8888:
5665         case DRM_FORMAT_ARGB8888:
5666         case DRM_FORMAT_XRGB2101010:
5667         case DRM_FORMAT_XBGR2101010:
5668         case DRM_FORMAT_YUYV:
5669         case DRM_FORMAT_YVYU:
5670         case DRM_FORMAT_UYVY:
5671         case DRM_FORMAT_VYUY:
5672         case DRM_FORMAT_NV12:
5673         case DRM_FORMAT_P010:
5674         case DRM_FORMAT_P012:
5675         case DRM_FORMAT_P016:
5676         case DRM_FORMAT_Y210:
5677         case DRM_FORMAT_Y212:
5678         case DRM_FORMAT_Y216:
5679         case DRM_FORMAT_XVYU2101010:
5680         case DRM_FORMAT_XVYU12_16161616:
5681         case DRM_FORMAT_XVYU16161616:
5682                 break;
5683         case DRM_FORMAT_XBGR16161616F:
5684         case DRM_FORMAT_ABGR16161616F:
5685         case DRM_FORMAT_XRGB16161616F:
5686         case DRM_FORMAT_ARGB16161616F:
5687                 if (INTEL_GEN(dev_priv) >= 11)
5688                         break;
5689                 /* fall through */
5690         default:
5691                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5692                               intel_plane->base.base.id, intel_plane->base.name,
5693                               fb->base.id, fb->format->format);
5694                 return -EINVAL;
5695         }
5696
5697         return 0;
5698 }
5699
5700 static void skylake_scaler_disable(struct intel_crtc *crtc)
5701 {
5702         int i;
5703
5704         for (i = 0; i < crtc->num_scalers; i++)
5705                 skl_detach_scaler(crtc, i);
5706 }
5707
5708 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5709 {
5710         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5711         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5712         enum pipe pipe = crtc->pipe;
5713         const struct intel_crtc_scaler_state *scaler_state =
5714                 &crtc_state->scaler_state;
5715
5716         if (crtc_state->pch_pfit.enabled) {
5717                 u16 uv_rgb_hphase, uv_rgb_vphase;
5718                 int pfit_w, pfit_h, hscale, vscale;
5719                 int id;
5720
5721                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5722                         return;
5723
5724                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5725                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5726
5727                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5728                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5729
5730                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5731                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5732
5733                 id = scaler_state->scaler_id;
5734                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5735                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5736                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5737                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5738                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5739                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5740                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5741                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5742         }
5743 }
5744
5745 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5746 {
5747         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5748         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5749         enum pipe pipe = crtc->pipe;
5750
5751         if (crtc_state->pch_pfit.enabled) {
5752                 /* Force use of hard-coded filter coefficients
5753                  * as some pre-programmed values are broken,
5754                  * e.g. x201.
5755                  */
5756                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5757                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5758                                                  PF_PIPE_SEL_IVB(pipe));
5759                 else
5760                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5761                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5762                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5763         }
5764 }
5765
5766 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5767 {
5768         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5769         struct drm_device *dev = crtc->base.dev;
5770         struct drm_i915_private *dev_priv = to_i915(dev);
5771
5772         if (!crtc_state->ips_enabled)
5773                 return;
5774
5775         /*
5776          * We can only enable IPS after we enable a plane and wait for a vblank
5777          * This function is called from post_plane_update, which is run after
5778          * a vblank wait.
5779          */
5780         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5781
5782         if (IS_BROADWELL(dev_priv)) {
5783                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5784                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5785                 /* Quoting Art Runyan: "its not safe to expect any particular
5786                  * value in IPS_CTL bit 31 after enabling IPS through the
5787                  * mailbox." Moreover, the mailbox may return a bogus state,
5788                  * so we need to just enable it and continue on.
5789                  */
5790         } else {
5791                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5792                 /* The bit only becomes 1 in the next vblank, so this wait here
5793                  * is essentially intel_wait_for_vblank. If we don't have this
5794                  * and don't wait for vblanks until the end of crtc_enable, then
5795                  * the HW state readout code will complain that the expected
5796                  * IPS_CTL value is not the one we read. */
5797                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5798                         DRM_ERROR("Timed out waiting for IPS enable\n");
5799         }
5800 }
5801
5802 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5803 {
5804         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5805         struct drm_device *dev = crtc->base.dev;
5806         struct drm_i915_private *dev_priv = to_i915(dev);
5807
5808         if (!crtc_state->ips_enabled)
5809                 return;
5810
5811         if (IS_BROADWELL(dev_priv)) {
5812                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5813                 /*
5814                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5815                  * 42ms timeout value leads to occasional timeouts so use 100ms
5816                  * instead.
5817                  */
5818                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5819                         DRM_ERROR("Timed out waiting for IPS disable\n");
5820         } else {
5821                 I915_WRITE(IPS_CTL, 0);
5822                 POSTING_READ(IPS_CTL);
5823         }
5824
5825         /* We need to wait for a vblank before we can disable the plane. */
5826         intel_wait_for_vblank(dev_priv, crtc->pipe);
5827 }
5828
5829 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5830 {
5831         if (intel_crtc->overlay)
5832                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5833
5834         /* Let userspace switch the overlay on again. In most cases userspace
5835          * has to recompute where to put it anyway.
5836          */
5837 }
5838
5839 /**
5840  * intel_post_enable_primary - Perform operations after enabling primary plane
5841  * @crtc: the CRTC whose primary plane was just enabled
5842  * @new_crtc_state: the enabling state
5843  *
5844  * Performs potentially sleeping operations that must be done after the primary
5845  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5846  * called due to an explicit primary plane update, or due to an implicit
5847  * re-enable that is caused when a sprite plane is updated to no longer
5848  * completely hide the primary plane.
5849  */
5850 static void
5851 intel_post_enable_primary(struct drm_crtc *crtc,
5852                           const struct intel_crtc_state *new_crtc_state)
5853 {
5854         struct drm_device *dev = crtc->dev;
5855         struct drm_i915_private *dev_priv = to_i915(dev);
5856         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5857         enum pipe pipe = intel_crtc->pipe;
5858
5859         /*
5860          * Gen2 reports pipe underruns whenever all planes are disabled.
5861          * So don't enable underrun reporting before at least some planes
5862          * are enabled.
5863          * FIXME: Need to fix the logic to work when we turn off all planes
5864          * but leave the pipe running.
5865          */
5866         if (IS_GEN(dev_priv, 2))
5867                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5868
5869         /* Underruns don't always raise interrupts, so check manually. */
5870         intel_check_cpu_fifo_underruns(dev_priv);
5871         intel_check_pch_fifo_underruns(dev_priv);
5872 }
5873
5874 /* FIXME get rid of this and use pre_plane_update */
5875 static void
5876 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5877 {
5878         struct drm_device *dev = crtc->dev;
5879         struct drm_i915_private *dev_priv = to_i915(dev);
5880         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5881         enum pipe pipe = intel_crtc->pipe;
5882
5883         /*
5884          * Gen2 reports pipe underruns whenever all planes are disabled.
5885          * So disable underrun reporting before all the planes get disabled.
5886          */
5887         if (IS_GEN(dev_priv, 2))
5888                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5889
5890         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5891
5892         /*
5893          * Vblank time updates from the shadow to live plane control register
5894          * are blocked if the memory self-refresh mode is active at that
5895          * moment. So to make sure the plane gets truly disabled, disable
5896          * first the self-refresh mode. The self-refresh enable bit in turn
5897          * will be checked/applied by the HW only at the next frame start
5898          * event which is after the vblank start event, so we need to have a
5899          * wait-for-vblank between disabling the plane and the pipe.
5900          */
5901         if (HAS_GMCH(dev_priv) &&
5902             intel_set_memory_cxsr(dev_priv, false))
5903                 intel_wait_for_vblank(dev_priv, pipe);
5904 }
5905
5906 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5907                                        const struct intel_crtc_state *new_crtc_state)
5908 {
5909         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5910         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5911
5912         if (!old_crtc_state->ips_enabled)
5913                 return false;
5914
5915         if (needs_modeset(new_crtc_state))
5916                 return true;
5917
5918         /*
5919          * Workaround : Do not read or write the pipe palette/gamma data while
5920          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5921          *
5922          * Disable IPS before we program the LUT.
5923          */
5924         if (IS_HASWELL(dev_priv) &&
5925             (new_crtc_state->uapi.color_mgmt_changed ||
5926              new_crtc_state->update_pipe) &&
5927             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5928                 return true;
5929
5930         return !new_crtc_state->ips_enabled;
5931 }
5932
5933 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5934                                        const struct intel_crtc_state *new_crtc_state)
5935 {
5936         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5937         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5938
5939         if (!new_crtc_state->ips_enabled)
5940                 return false;
5941
5942         if (needs_modeset(new_crtc_state))
5943                 return true;
5944
5945         /*
5946          * Workaround : Do not read or write the pipe palette/gamma data while
5947          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5948          *
5949          * Re-enable IPS after the LUT has been programmed.
5950          */
5951         if (IS_HASWELL(dev_priv) &&
5952             (new_crtc_state->uapi.color_mgmt_changed ||
5953              new_crtc_state->update_pipe) &&
5954             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5955                 return true;
5956
5957         /*
5958          * We can't read out IPS on broadwell, assume the worst and
5959          * forcibly enable IPS on the first fastset.
5960          */
5961         if (new_crtc_state->update_pipe &&
5962             old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5963                 return true;
5964
5965         return !old_crtc_state->ips_enabled;
5966 }
5967
5968 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5969                           const struct intel_crtc_state *crtc_state)
5970 {
5971         if (!crtc_state->nv12_planes)
5972                 return false;
5973
5974         /* WA Display #0827: Gen9:all */
5975         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5976                 return true;
5977
5978         return false;
5979 }
5980
5981 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5982                                const struct intel_crtc_state *crtc_state)
5983 {
5984         /* Wa_2006604312:icl */
5985         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5986                 return true;
5987
5988         return false;
5989 }
5990
5991 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5992 {
5993         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
5994         struct drm_device *dev = crtc->base.dev;
5995         struct drm_i915_private *dev_priv = to_i915(dev);
5996         struct drm_atomic_state *state = old_crtc_state->uapi.state;
5997         struct intel_crtc_state *pipe_config =
5998                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
5999                                                 crtc);
6000         struct drm_plane *primary = crtc->base.primary;
6001         struct drm_plane_state *old_primary_state =
6002                 drm_atomic_get_old_plane_state(state, primary);
6003
6004         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
6005
6006         if (pipe_config->update_wm_post && pipe_config->hw.active)
6007                 intel_update_watermarks(crtc);
6008
6009         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
6010                 hsw_enable_ips(pipe_config);
6011
6012         if (old_primary_state) {
6013                 struct drm_plane_state *new_primary_state =
6014                         drm_atomic_get_new_plane_state(state, primary);
6015
6016                 intel_fbc_post_update(crtc);
6017
6018                 if (new_primary_state->visible &&
6019                     (needs_modeset(pipe_config) ||
6020                      !old_primary_state->visible))
6021                         intel_post_enable_primary(&crtc->base, pipe_config);
6022         }
6023
6024         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
6025             !needs_nv12_wa(dev_priv, pipe_config))
6026                 skl_wa_827(dev_priv, crtc->pipe, false);
6027
6028         if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6029             !needs_scalerclk_wa(dev_priv, pipe_config))
6030                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
6031 }
6032
6033 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
6034                                    struct intel_crtc_state *pipe_config)
6035 {
6036         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6037         struct drm_device *dev = crtc->base.dev;
6038         struct drm_i915_private *dev_priv = to_i915(dev);
6039         struct drm_atomic_state *state = old_crtc_state->uapi.state;
6040         struct drm_plane *primary = crtc->base.primary;
6041         struct drm_plane_state *old_primary_state =
6042                 drm_atomic_get_old_plane_state(state, primary);
6043         bool modeset = needs_modeset(pipe_config);
6044         struct intel_atomic_state *intel_state =
6045                 to_intel_atomic_state(state);
6046
6047         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
6048                 hsw_disable_ips(old_crtc_state);
6049
6050         if (old_primary_state) {
6051                 struct intel_plane_state *new_primary_state =
6052                         intel_atomic_get_new_plane_state(intel_state,
6053                                                          to_intel_plane(primary));
6054
6055                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
6056                 /*
6057                  * Gen2 reports pipe underruns whenever all planes are disabled.
6058                  * So disable underrun reporting before all the planes get disabled.
6059                  */
6060                 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
6061                     (modeset || !new_primary_state->uapi.visible))
6062                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
6063         }
6064
6065         /* Display WA 827 */
6066         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
6067             needs_nv12_wa(dev_priv, pipe_config))
6068                 skl_wa_827(dev_priv, crtc->pipe, true);
6069
6070         /* Wa_2006604312:icl */
6071         if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6072             needs_scalerclk_wa(dev_priv, pipe_config))
6073                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
6074
6075         /*
6076          * Vblank time updates from the shadow to live plane control register
6077          * are blocked if the memory self-refresh mode is active at that
6078          * moment. So to make sure the plane gets truly disabled, disable
6079          * first the self-refresh mode. The self-refresh enable bit in turn
6080          * will be checked/applied by the HW only at the next frame start
6081          * event which is after the vblank start event, so we need to have a
6082          * wait-for-vblank between disabling the plane and the pipe.
6083          */
6084         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6085             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6086                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6087
6088         /*
6089          * IVB workaround: must disable low power watermarks for at least
6090          * one frame before enabling scaling.  LP watermarks can be re-enabled
6091          * when scaling is disabled.
6092          *
6093          * WaCxSRDisabledForSpriteScaling:ivb
6094          */
6095         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
6096             old_crtc_state->hw.active)
6097                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6098
6099         /*
6100          * If we're doing a modeset, we're done.  No need to do any pre-vblank
6101          * watermark programming here.
6102          */
6103         if (needs_modeset(pipe_config))
6104                 return;
6105
6106         /*
6107          * For platforms that support atomic watermarks, program the
6108          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6109          * will be the intermediate values that are safe for both pre- and
6110          * post- vblank; when vblank happens, the 'active' values will be set
6111          * to the final 'target' values and we'll do this again to get the
6112          * optimal watermarks.  For gen9+ platforms, the values we program here
6113          * will be the final target values which will get automatically latched
6114          * at vblank time; no further programming will be necessary.
6115          *
6116          * If a platform hasn't been transitioned to atomic watermarks yet,
6117          * we'll continue to update watermarks the old way, if flags tell
6118          * us to.
6119          */
6120         if (dev_priv->display.initial_watermarks != NULL)
6121                 dev_priv->display.initial_watermarks(intel_state,
6122                                                      pipe_config);
6123         else if (pipe_config->update_wm_pre)
6124                 intel_update_watermarks(crtc);
6125 }
6126
6127 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6128                                       struct intel_crtc *crtc)
6129 {
6130         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6131         const struct intel_crtc_state *new_crtc_state =
6132                 intel_atomic_get_new_crtc_state(state, crtc);
6133         unsigned int update_mask = new_crtc_state->update_planes;
6134         const struct intel_plane_state *old_plane_state;
6135         struct intel_plane *plane;
6136         unsigned fb_bits = 0;
6137         int i;
6138
6139         intel_crtc_dpms_overlay_disable(crtc);
6140
6141         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6142                 if (crtc->pipe != plane->pipe ||
6143                     !(update_mask & BIT(plane->id)))
6144                         continue;
6145
6146                 intel_disable_plane(plane, new_crtc_state);
6147
6148                 if (old_plane_state->uapi.visible)
6149                         fb_bits |= plane->frontbuffer_bit;
6150         }
6151
6152         intel_frontbuffer_flip(dev_priv, fb_bits);
6153 }
6154
6155 /*
6156  * intel_connector_primary_encoder - get the primary encoder for a connector
6157  * @connector: connector for which to return the encoder
6158  *
6159  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6160  * all connectors to their encoder, except for DP-MST connectors which have
6161  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6162  * pointed to by as many DP-MST connectors as there are pipes.
6163  */
6164 static struct intel_encoder *
6165 intel_connector_primary_encoder(struct intel_connector *connector)
6166 {
6167         struct intel_encoder *encoder;
6168
6169         if (connector->mst_port)
6170                 return &dp_to_dig_port(connector->mst_port)->base;
6171
6172         encoder = intel_attached_encoder(&connector->base);
6173         WARN_ON(!encoder);
6174
6175         return encoder;
6176 }
6177
6178 static bool
6179 intel_connector_needs_modeset(struct intel_atomic_state *state,
6180                               const struct drm_connector_state *old_conn_state,
6181                               const struct drm_connector_state *new_conn_state)
6182 {
6183         struct intel_crtc *old_crtc = old_conn_state->crtc ?
6184                                       to_intel_crtc(old_conn_state->crtc) : NULL;
6185         struct intel_crtc *new_crtc = new_conn_state->crtc ?
6186                                       to_intel_crtc(new_conn_state->crtc) : NULL;
6187
6188         return new_crtc != old_crtc ||
6189                (new_crtc &&
6190                 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6191 }
6192
6193 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6194 {
6195         struct drm_connector_state *old_conn_state;
6196         struct drm_connector_state *new_conn_state;
6197         struct drm_connector *conn;
6198         int i;
6199
6200         for_each_oldnew_connector_in_state(&state->base, conn,
6201                                            old_conn_state, new_conn_state, i) {
6202                 struct intel_encoder *encoder;
6203                 struct intel_crtc *crtc;
6204
6205                 if (!intel_connector_needs_modeset(state,
6206                                                    old_conn_state,
6207                                                    new_conn_state))
6208                         continue;
6209
6210                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6211                 if (!encoder->update_prepare)
6212                         continue;
6213
6214                 crtc = new_conn_state->crtc ?
6215                         to_intel_crtc(new_conn_state->crtc) : NULL;
6216                 encoder->update_prepare(state, encoder, crtc);
6217         }
6218 }
6219
6220 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6221 {
6222         struct drm_connector_state *old_conn_state;
6223         struct drm_connector_state *new_conn_state;
6224         struct drm_connector *conn;
6225         int i;
6226
6227         for_each_oldnew_connector_in_state(&state->base, conn,
6228                                            old_conn_state, new_conn_state, i) {
6229                 struct intel_encoder *encoder;
6230                 struct intel_crtc *crtc;
6231
6232                 if (!intel_connector_needs_modeset(state,
6233                                                    old_conn_state,
6234                                                    new_conn_state))
6235                         continue;
6236
6237                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6238                 if (!encoder->update_complete)
6239                         continue;
6240
6241                 crtc = new_conn_state->crtc ?
6242                         to_intel_crtc(new_conn_state->crtc) : NULL;
6243                 encoder->update_complete(state, encoder, crtc);
6244         }
6245 }
6246
6247 static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
6248                                           struct intel_crtc_state *crtc_state,
6249                                           struct intel_atomic_state *state)
6250 {
6251         struct drm_connector_state *conn_state;
6252         struct drm_connector *conn;
6253         int i;
6254
6255         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6256                 struct intel_encoder *encoder =
6257                         to_intel_encoder(conn_state->best_encoder);
6258
6259                 if (conn_state->crtc != &crtc->base)
6260                         continue;
6261
6262                 if (encoder->pre_pll_enable)
6263                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6264         }
6265 }
6266
6267 static void intel_encoders_pre_enable(struct intel_crtc *crtc,
6268                                       struct intel_crtc_state *crtc_state,
6269                                       struct intel_atomic_state *state)
6270 {
6271         struct drm_connector_state *conn_state;
6272         struct drm_connector *conn;
6273         int i;
6274
6275         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6276                 struct intel_encoder *encoder =
6277                         to_intel_encoder(conn_state->best_encoder);
6278
6279                 if (conn_state->crtc != &crtc->base)
6280                         continue;
6281
6282                 if (encoder->pre_enable)
6283                         encoder->pre_enable(encoder, crtc_state, conn_state);
6284         }
6285 }
6286
6287 static void intel_encoders_enable(struct intel_crtc *crtc,
6288                                   struct intel_crtc_state *crtc_state,
6289                                   struct intel_atomic_state *state)
6290 {
6291         struct drm_connector_state *conn_state;
6292         struct drm_connector *conn;
6293         int i;
6294
6295         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6296                 struct intel_encoder *encoder =
6297                         to_intel_encoder(conn_state->best_encoder);
6298
6299                 if (conn_state->crtc != &crtc->base)
6300                         continue;
6301
6302                 if (encoder->enable)
6303                         encoder->enable(encoder, crtc_state, conn_state);
6304                 intel_opregion_notify_encoder(encoder, true);
6305         }
6306 }
6307
6308 static void intel_encoders_disable(struct intel_crtc *crtc,
6309                                    struct intel_crtc_state *old_crtc_state,
6310                                    struct intel_atomic_state *state)
6311 {
6312         struct drm_connector_state *old_conn_state;
6313         struct drm_connector *conn;
6314         int i;
6315
6316         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6317                 struct intel_encoder *encoder =
6318                         to_intel_encoder(old_conn_state->best_encoder);
6319
6320                 if (old_conn_state->crtc != &crtc->base)
6321                         continue;
6322
6323                 intel_opregion_notify_encoder(encoder, false);
6324                 if (encoder->disable)
6325                         encoder->disable(encoder, old_crtc_state, old_conn_state);
6326         }
6327 }
6328
6329 static void intel_encoders_post_disable(struct intel_crtc *crtc,
6330                                         struct intel_crtc_state *old_crtc_state,
6331                                         struct intel_atomic_state *state)
6332 {
6333         struct drm_connector_state *old_conn_state;
6334         struct drm_connector *conn;
6335         int i;
6336
6337         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6338                 struct intel_encoder *encoder =
6339                         to_intel_encoder(old_conn_state->best_encoder);
6340
6341                 if (old_conn_state->crtc != &crtc->base)
6342                         continue;
6343
6344                 if (encoder->post_disable)
6345                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6346         }
6347 }
6348
6349 static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
6350                                             struct intel_crtc_state *old_crtc_state,
6351                                             struct intel_atomic_state *state)
6352 {
6353         struct drm_connector_state *old_conn_state;
6354         struct drm_connector *conn;
6355         int i;
6356
6357         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6358                 struct intel_encoder *encoder =
6359                         to_intel_encoder(old_conn_state->best_encoder);
6360
6361                 if (old_conn_state->crtc != &crtc->base)
6362                         continue;
6363
6364                 if (encoder->post_pll_disable)
6365                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6366         }
6367 }
6368
6369 static void intel_encoders_update_pipe(struct intel_crtc *crtc,
6370                                        struct intel_crtc_state *crtc_state,
6371                                        struct intel_atomic_state *state)
6372 {
6373         struct drm_connector_state *conn_state;
6374         struct drm_connector *conn;
6375         int i;
6376
6377         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6378                 struct intel_encoder *encoder =
6379                         to_intel_encoder(conn_state->best_encoder);
6380
6381                 if (conn_state->crtc != &crtc->base)
6382                         continue;
6383
6384                 if (encoder->update_pipe)
6385                         encoder->update_pipe(encoder, crtc_state, conn_state);
6386         }
6387 }
6388
6389 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6390 {
6391         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6392         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6393
6394         plane->disable_plane(plane, crtc_state);
6395 }
6396
6397 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6398                                  struct intel_atomic_state *state)
6399 {
6400         struct drm_crtc *crtc = pipe_config->uapi.crtc;
6401         struct drm_device *dev = crtc->dev;
6402         struct drm_i915_private *dev_priv = to_i915(dev);
6403         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6404         enum pipe pipe = intel_crtc->pipe;
6405
6406         if (WARN_ON(intel_crtc->active))
6407                 return;
6408
6409         /*
6410          * Sometimes spurious CPU pipe underruns happen during FDI
6411          * training, at least with VGA+HDMI cloning. Suppress them.
6412          *
6413          * On ILK we get an occasional spurious CPU pipe underruns
6414          * between eDP port A enable and vdd enable. Also PCH port
6415          * enable seems to result in the occasional CPU pipe underrun.
6416          *
6417          * Spurious PCH underruns also occur during PCH enabling.
6418          */
6419         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6420         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6421
6422         if (pipe_config->has_pch_encoder)
6423                 intel_prepare_shared_dpll(pipe_config);
6424
6425         if (intel_crtc_has_dp_encoder(pipe_config))
6426                 intel_dp_set_m_n(pipe_config, M1_N1);
6427
6428         intel_set_pipe_timings(pipe_config);
6429         intel_set_pipe_src_size(pipe_config);
6430
6431         if (pipe_config->has_pch_encoder) {
6432                 intel_cpu_transcoder_set_m_n(pipe_config,
6433                                              &pipe_config->fdi_m_n, NULL);
6434         }
6435
6436         ironlake_set_pipeconf(pipe_config);
6437
6438         intel_crtc->active = true;
6439
6440         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6441
6442         if (pipe_config->has_pch_encoder) {
6443                 /* Note: FDI PLL enabling _must_ be done before we enable the
6444                  * cpu pipes, hence this is separate from all the other fdi/pch
6445                  * enabling. */
6446                 ironlake_fdi_pll_enable(pipe_config);
6447         } else {
6448                 assert_fdi_tx_disabled(dev_priv, pipe);
6449                 assert_fdi_rx_disabled(dev_priv, pipe);
6450         }
6451
6452         ironlake_pfit_enable(pipe_config);
6453
6454         /*
6455          * On ILK+ LUT must be loaded before the pipe is running but with
6456          * clocks enabled
6457          */
6458         intel_color_load_luts(pipe_config);
6459         intel_color_commit(pipe_config);
6460         /* update DSPCNTR to configure gamma for pipe bottom color */
6461         intel_disable_primary_plane(pipe_config);
6462
6463         if (dev_priv->display.initial_watermarks != NULL)
6464                 dev_priv->display.initial_watermarks(state, pipe_config);
6465         intel_enable_pipe(pipe_config);
6466
6467         if (pipe_config->has_pch_encoder)
6468                 ironlake_pch_enable(state, pipe_config);
6469
6470         assert_vblank_disabled(crtc);
6471         intel_crtc_vblank_on(pipe_config);
6472
6473         intel_encoders_enable(intel_crtc, pipe_config, state);
6474
6475         if (HAS_PCH_CPT(dev_priv))
6476                 cpt_verify_modeset(dev, intel_crtc->pipe);
6477
6478         /*
6479          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6480          * And a second vblank wait is needed at least on ILK with
6481          * some interlaced HDMI modes. Let's do the double wait always
6482          * in case there are more corner cases we don't know about.
6483          */
6484         if (pipe_config->has_pch_encoder) {
6485                 intel_wait_for_vblank(dev_priv, pipe);
6486                 intel_wait_for_vblank(dev_priv, pipe);
6487         }
6488         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6489         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6490 }
6491
6492 /* IPS only exists on ULT machines and is tied to pipe A. */
6493 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6494 {
6495         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6496 }
6497
6498 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6499                                             enum pipe pipe, bool apply)
6500 {
6501         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6502         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6503
6504         if (apply)
6505                 val |= mask;
6506         else
6507                 val &= ~mask;
6508
6509         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6510 }
6511
6512 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6513 {
6514         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6515         enum pipe pipe = crtc->pipe;
6516         u32 val;
6517
6518         val = MBUS_DBOX_A_CREDIT(2);
6519
6520         if (INTEL_GEN(dev_priv) >= 12) {
6521                 val |= MBUS_DBOX_BW_CREDIT(2);
6522                 val |= MBUS_DBOX_B_CREDIT(12);
6523         } else {
6524                 val |= MBUS_DBOX_BW_CREDIT(1);
6525                 val |= MBUS_DBOX_B_CREDIT(8);
6526         }
6527
6528         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6529 }
6530
6531 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6532                                 struct intel_atomic_state *state)
6533 {
6534         struct drm_crtc *crtc = pipe_config->uapi.crtc;
6535         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6536         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6537         enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
6538         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6539         bool psl_clkgate_wa;
6540
6541         if (WARN_ON(intel_crtc->active))
6542                 return;
6543
6544         intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6545
6546         if (pipe_config->shared_dpll)
6547                 intel_enable_shared_dpll(pipe_config);
6548
6549         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6550
6551         if (intel_crtc_has_dp_encoder(pipe_config))
6552                 intel_dp_set_m_n(pipe_config, M1_N1);
6553
6554         if (!transcoder_is_dsi(cpu_transcoder))
6555                 intel_set_pipe_timings(pipe_config);
6556
6557         if (INTEL_GEN(dev_priv) >= 11)
6558                 icl_enable_trans_port_sync(pipe_config);
6559
6560         intel_set_pipe_src_size(pipe_config);
6561
6562         if (cpu_transcoder != TRANSCODER_EDP &&
6563             !transcoder_is_dsi(cpu_transcoder)) {
6564                 I915_WRITE(PIPE_MULT(cpu_transcoder),
6565                            pipe_config->pixel_multiplier - 1);
6566         }
6567
6568         if (pipe_config->has_pch_encoder) {
6569                 intel_cpu_transcoder_set_m_n(pipe_config,
6570                                              &pipe_config->fdi_m_n, NULL);
6571         }
6572
6573         if (!transcoder_is_dsi(cpu_transcoder))
6574                 haswell_set_pipeconf(pipe_config);
6575
6576         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6577                 bdw_set_pipemisc(pipe_config);
6578
6579         intel_crtc->active = true;
6580
6581         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6582         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6583                          pipe_config->pch_pfit.enabled;
6584         if (psl_clkgate_wa)
6585                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6586
6587         if (INTEL_GEN(dev_priv) >= 9)
6588                 skylake_pfit_enable(pipe_config);
6589         else
6590                 ironlake_pfit_enable(pipe_config);
6591
6592         /*
6593          * On ILK+ LUT must be loaded before the pipe is running but with
6594          * clocks enabled
6595          */
6596         intel_color_load_luts(pipe_config);
6597         intel_color_commit(pipe_config);
6598         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6599         if (INTEL_GEN(dev_priv) < 9)
6600                 intel_disable_primary_plane(pipe_config);
6601
6602         if (INTEL_GEN(dev_priv) >= 11)
6603                 icl_set_pipe_chicken(intel_crtc);
6604
6605         if (!transcoder_is_dsi(cpu_transcoder))
6606                 intel_ddi_enable_transcoder_func(pipe_config);
6607
6608         if (dev_priv->display.initial_watermarks != NULL)
6609                 dev_priv->display.initial_watermarks(state, pipe_config);
6610
6611         if (INTEL_GEN(dev_priv) >= 11)
6612                 icl_pipe_mbus_enable(intel_crtc);
6613
6614         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6615         if (!transcoder_is_dsi(cpu_transcoder))
6616                 intel_enable_pipe(pipe_config);
6617
6618         if (pipe_config->has_pch_encoder)
6619                 lpt_pch_enable(state, pipe_config);
6620
6621         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6622                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6623
6624         assert_vblank_disabled(crtc);
6625         intel_crtc_vblank_on(pipe_config);
6626
6627         intel_encoders_enable(intel_crtc, pipe_config, state);
6628
6629         if (psl_clkgate_wa) {
6630                 intel_wait_for_vblank(dev_priv, pipe);
6631                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6632         }
6633
6634         /* If we change the relative order between pipe/planes enabling, we need
6635          * to change the workaround. */
6636         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6637         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6638                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6639                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6640         }
6641 }
6642
6643 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6644 {
6645         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6646         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6647         enum pipe pipe = crtc->pipe;
6648
6649         /* To avoid upsetting the power well on haswell only disable the pfit if
6650          * it's in use. The hw state code will make sure we get this right. */
6651         if (old_crtc_state->pch_pfit.enabled) {
6652                 I915_WRITE(PF_CTL(pipe), 0);
6653                 I915_WRITE(PF_WIN_POS(pipe), 0);
6654                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6655         }
6656 }
6657
6658 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6659                                   struct intel_atomic_state *state)
6660 {
6661         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
6662         struct drm_device *dev = crtc->dev;
6663         struct drm_i915_private *dev_priv = to_i915(dev);
6664         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6665         enum pipe pipe = intel_crtc->pipe;
6666
6667         /*
6668          * Sometimes spurious CPU pipe underruns happen when the
6669          * pipe is already disabled, but FDI RX/TX is still enabled.
6670          * Happens at least with VGA+HDMI cloning. Suppress them.
6671          */
6672         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6673         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6674
6675         intel_encoders_disable(intel_crtc, old_crtc_state, state);
6676
6677         drm_crtc_vblank_off(crtc);
6678         assert_vblank_disabled(crtc);
6679
6680         intel_disable_pipe(old_crtc_state);
6681
6682         ironlake_pfit_disable(old_crtc_state);
6683
6684         if (old_crtc_state->has_pch_encoder)
6685                 ironlake_fdi_disable(crtc);
6686
6687         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6688
6689         if (old_crtc_state->has_pch_encoder) {
6690                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6691
6692                 if (HAS_PCH_CPT(dev_priv)) {
6693                         i915_reg_t reg;
6694                         u32 temp;
6695
6696                         /* disable TRANS_DP_CTL */
6697                         reg = TRANS_DP_CTL(pipe);
6698                         temp = I915_READ(reg);
6699                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6700                                   TRANS_DP_PORT_SEL_MASK);
6701                         temp |= TRANS_DP_PORT_SEL_NONE;
6702                         I915_WRITE(reg, temp);
6703
6704                         /* disable DPLL_SEL */
6705                         temp = I915_READ(PCH_DPLL_SEL);
6706                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6707                         I915_WRITE(PCH_DPLL_SEL, temp);
6708                 }
6709
6710                 ironlake_fdi_pll_disable(intel_crtc);
6711         }
6712
6713         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6714         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6715 }
6716
6717 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6718                                  struct intel_atomic_state *state)
6719 {
6720         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
6721         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6722         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6723         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6724
6725         intel_encoders_disable(intel_crtc, old_crtc_state, state);
6726
6727         drm_crtc_vblank_off(crtc);
6728         assert_vblank_disabled(crtc);
6729
6730         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6731         if (!transcoder_is_dsi(cpu_transcoder))
6732                 intel_disable_pipe(old_crtc_state);
6733
6734         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6735                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6736
6737         if (INTEL_GEN(dev_priv) >= 11)
6738                 icl_disable_transcoder_port_sync(old_crtc_state);
6739
6740         if (!transcoder_is_dsi(cpu_transcoder))
6741                 intel_ddi_disable_transcoder_func(old_crtc_state);
6742
6743         intel_dsc_disable(old_crtc_state);
6744
6745         if (INTEL_GEN(dev_priv) >= 9)
6746                 skylake_scaler_disable(intel_crtc);
6747         else
6748                 ironlake_pfit_disable(old_crtc_state);
6749
6750         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6751
6752         intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
6753 }
6754
6755 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6756 {
6757         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6758         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6759
6760         if (!crtc_state->gmch_pfit.control)
6761                 return;
6762
6763         /*
6764          * The panel fitter should only be adjusted whilst the pipe is disabled,
6765          * according to register description and PRM.
6766          */
6767         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6768         assert_pipe_disabled(dev_priv, crtc->pipe);
6769
6770         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6771         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6772
6773         /* Border color in case we don't scale up to the full screen. Black by
6774          * default, change to something else for debugging. */
6775         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6776 }
6777
6778 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6779 {
6780         if (phy == PHY_NONE)
6781                 return false;
6782
6783         if (IS_ELKHARTLAKE(dev_priv))
6784                 return phy <= PHY_C;
6785
6786         if (INTEL_GEN(dev_priv) >= 11)
6787                 return phy <= PHY_B;
6788
6789         return false;
6790 }
6791
6792 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6793 {
6794         if (INTEL_GEN(dev_priv) >= 12)
6795                 return phy >= PHY_D && phy <= PHY_I;
6796
6797         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6798                 return phy >= PHY_C && phy <= PHY_F;
6799
6800         return false;
6801 }
6802
6803 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6804 {
6805         if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6806                 return PHY_A;
6807
6808         return (enum phy)port;
6809 }
6810
6811 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6812 {
6813         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6814                 return PORT_TC_NONE;
6815
6816         if (INTEL_GEN(dev_priv) >= 12)
6817                 return port - PORT_D;
6818
6819         return port - PORT_C;
6820 }
6821
6822 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6823 {
6824         switch (port) {
6825         case PORT_A:
6826                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6827         case PORT_B:
6828                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6829         case PORT_C:
6830                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6831         case PORT_D:
6832                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6833         case PORT_E:
6834                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6835         case PORT_F:
6836                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6837         case PORT_G:
6838                 return POWER_DOMAIN_PORT_DDI_G_LANES;
6839         default:
6840                 MISSING_CASE(port);
6841                 return POWER_DOMAIN_PORT_OTHER;
6842         }
6843 }
6844
6845 enum intel_display_power_domain
6846 intel_aux_power_domain(struct intel_digital_port *dig_port)
6847 {
6848         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
6849         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
6850
6851         if (intel_phy_is_tc(dev_priv, phy) &&
6852             dig_port->tc_mode == TC_PORT_TBT_ALT) {
6853                 switch (dig_port->aux_ch) {
6854                 case AUX_CH_C:
6855                         return POWER_DOMAIN_AUX_C_TBT;
6856                 case AUX_CH_D:
6857                         return POWER_DOMAIN_AUX_D_TBT;
6858                 case AUX_CH_E:
6859                         return POWER_DOMAIN_AUX_E_TBT;
6860                 case AUX_CH_F:
6861                         return POWER_DOMAIN_AUX_F_TBT;
6862                 case AUX_CH_G:
6863                         return POWER_DOMAIN_AUX_G_TBT;
6864                 default:
6865                         MISSING_CASE(dig_port->aux_ch);
6866                         return POWER_DOMAIN_AUX_C_TBT;
6867                 }
6868         }
6869
6870         switch (dig_port->aux_ch) {
6871         case AUX_CH_A:
6872                 return POWER_DOMAIN_AUX_A;
6873         case AUX_CH_B:
6874                 return POWER_DOMAIN_AUX_B;
6875         case AUX_CH_C:
6876                 return POWER_DOMAIN_AUX_C;
6877         case AUX_CH_D:
6878                 return POWER_DOMAIN_AUX_D;
6879         case AUX_CH_E:
6880                 return POWER_DOMAIN_AUX_E;
6881         case AUX_CH_F:
6882                 return POWER_DOMAIN_AUX_F;
6883         case AUX_CH_G:
6884                 return POWER_DOMAIN_AUX_G;
6885         default:
6886                 MISSING_CASE(dig_port->aux_ch);
6887                 return POWER_DOMAIN_AUX_A;
6888         }
6889 }
6890
6891 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6892 {
6893         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6894         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6895         struct drm_encoder *encoder;
6896         enum pipe pipe = crtc->pipe;
6897         u64 mask;
6898         enum transcoder transcoder = crtc_state->cpu_transcoder;
6899
6900         if (!crtc_state->hw.active)
6901                 return 0;
6902
6903         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6904         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6905         if (crtc_state->pch_pfit.enabled ||
6906             crtc_state->pch_pfit.force_thru)
6907                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6908
6909         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
6910                                   crtc_state->uapi.encoder_mask) {
6911                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6912
6913                 mask |= BIT_ULL(intel_encoder->power_domain);
6914         }
6915
6916         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6917                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6918
6919         if (crtc_state->shared_dpll)
6920                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6921
6922         return mask;
6923 }
6924
6925 static u64
6926 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6927 {
6928         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6929         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6930         enum intel_display_power_domain domain;
6931         u64 domains, new_domains, old_domains;
6932
6933         old_domains = crtc->enabled_power_domains;
6934         crtc->enabled_power_domains = new_domains =
6935                 get_crtc_power_domains(crtc_state);
6936
6937         domains = new_domains & ~old_domains;
6938
6939         for_each_power_domain(domain, domains)
6940                 intel_display_power_get(dev_priv, domain);
6941
6942         return old_domains & ~new_domains;
6943 }
6944
6945 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6946                                       u64 domains)
6947 {
6948         enum intel_display_power_domain domain;
6949
6950         for_each_power_domain(domain, domains)
6951                 intel_display_power_put_unchecked(dev_priv, domain);
6952 }
6953
6954 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6955                                    struct intel_atomic_state *state)
6956 {
6957         struct drm_crtc *crtc = pipe_config->uapi.crtc;
6958         struct drm_device *dev = crtc->dev;
6959         struct drm_i915_private *dev_priv = to_i915(dev);
6960         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6961         enum pipe pipe = intel_crtc->pipe;
6962
6963         if (WARN_ON(intel_crtc->active))
6964                 return;
6965
6966         if (intel_crtc_has_dp_encoder(pipe_config))
6967                 intel_dp_set_m_n(pipe_config, M1_N1);
6968
6969         intel_set_pipe_timings(pipe_config);
6970         intel_set_pipe_src_size(pipe_config);
6971
6972         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6973                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6974                 I915_WRITE(CHV_CANVAS(pipe), 0);
6975         }
6976
6977         i9xx_set_pipeconf(pipe_config);
6978
6979         intel_crtc->active = true;
6980
6981         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6982
6983         intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6984
6985         if (IS_CHERRYVIEW(dev_priv)) {
6986                 chv_prepare_pll(intel_crtc, pipe_config);
6987                 chv_enable_pll(intel_crtc, pipe_config);
6988         } else {
6989                 vlv_prepare_pll(intel_crtc, pipe_config);
6990                 vlv_enable_pll(intel_crtc, pipe_config);
6991         }
6992
6993         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6994
6995         i9xx_pfit_enable(pipe_config);
6996
6997         intel_color_load_luts(pipe_config);
6998         intel_color_commit(pipe_config);
6999         /* update DSPCNTR to configure gamma for pipe bottom color */
7000         intel_disable_primary_plane(pipe_config);
7001
7002         dev_priv->display.initial_watermarks(state, pipe_config);
7003         intel_enable_pipe(pipe_config);
7004
7005         assert_vblank_disabled(crtc);
7006         intel_crtc_vblank_on(pipe_config);
7007
7008         intel_encoders_enable(intel_crtc, pipe_config, state);
7009 }
7010
7011 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7012 {
7013         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7014         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7015
7016         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
7017         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
7018 }
7019
7020 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
7021                              struct intel_atomic_state *state)
7022 {
7023         struct drm_crtc *crtc = pipe_config->uapi.crtc;
7024         struct drm_device *dev = crtc->dev;
7025         struct drm_i915_private *dev_priv = to_i915(dev);
7026         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7027         enum pipe pipe = intel_crtc->pipe;
7028
7029         if (WARN_ON(intel_crtc->active))
7030                 return;
7031
7032         i9xx_set_pll_dividers(pipe_config);
7033
7034         if (intel_crtc_has_dp_encoder(pipe_config))
7035                 intel_dp_set_m_n(pipe_config, M1_N1);
7036
7037         intel_set_pipe_timings(pipe_config);
7038         intel_set_pipe_src_size(pipe_config);
7039
7040         i9xx_set_pipeconf(pipe_config);
7041
7042         intel_crtc->active = true;
7043
7044         if (!IS_GEN(dev_priv, 2))
7045                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7046
7047         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
7048
7049         i9xx_enable_pll(intel_crtc, pipe_config);
7050
7051         i9xx_pfit_enable(pipe_config);
7052
7053         intel_color_load_luts(pipe_config);
7054         intel_color_commit(pipe_config);
7055         /* update DSPCNTR to configure gamma for pipe bottom color */
7056         intel_disable_primary_plane(pipe_config);
7057
7058         if (dev_priv->display.initial_watermarks != NULL)
7059                 dev_priv->display.initial_watermarks(state,
7060                                                      pipe_config);
7061         else
7062                 intel_update_watermarks(intel_crtc);
7063         intel_enable_pipe(pipe_config);
7064
7065         assert_vblank_disabled(crtc);
7066         intel_crtc_vblank_on(pipe_config);
7067
7068         intel_encoders_enable(intel_crtc, pipe_config, state);
7069 }
7070
7071 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7072 {
7073         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7074         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7075
7076         if (!old_crtc_state->gmch_pfit.control)
7077                 return;
7078
7079         assert_pipe_disabled(dev_priv, crtc->pipe);
7080
7081         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
7082                       I915_READ(PFIT_CONTROL));
7083         I915_WRITE(PFIT_CONTROL, 0);
7084 }
7085
7086 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
7087                               struct intel_atomic_state *state)
7088 {
7089         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
7090         struct drm_device *dev = crtc->dev;
7091         struct drm_i915_private *dev_priv = to_i915(dev);
7092         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7093         enum pipe pipe = intel_crtc->pipe;
7094
7095         /*
7096          * On gen2 planes are double buffered but the pipe isn't, so we must
7097          * wait for planes to fully turn off before disabling the pipe.
7098          */
7099         if (IS_GEN(dev_priv, 2))
7100                 intel_wait_for_vblank(dev_priv, pipe);
7101
7102         intel_encoders_disable(intel_crtc, old_crtc_state, state);
7103
7104         drm_crtc_vblank_off(crtc);
7105         assert_vblank_disabled(crtc);
7106
7107         intel_disable_pipe(old_crtc_state);
7108
7109         i9xx_pfit_disable(old_crtc_state);
7110
7111         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
7112
7113         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7114                 if (IS_CHERRYVIEW(dev_priv))
7115                         chv_disable_pll(dev_priv, pipe);
7116                 else if (IS_VALLEYVIEW(dev_priv))
7117                         vlv_disable_pll(dev_priv, pipe);
7118                 else
7119                         i9xx_disable_pll(old_crtc_state);
7120         }
7121
7122         intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
7123
7124         if (!IS_GEN(dev_priv, 2))
7125                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7126
7127         if (!dev_priv->display.initial_watermarks)
7128                 intel_update_watermarks(intel_crtc);
7129
7130         /* clock the pipe down to 640x480@60 to potentially save power */
7131         if (IS_I830(dev_priv))
7132                 i830_enable_pipe(dev_priv, pipe);
7133 }
7134
7135 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
7136                                         struct drm_modeset_acquire_ctx *ctx)
7137 {
7138         struct intel_encoder *encoder;
7139         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7140         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
7141         struct intel_bw_state *bw_state =
7142                 to_intel_bw_state(dev_priv->bw_obj.state);
7143         enum intel_display_power_domain domain;
7144         struct intel_plane *plane;
7145         u64 domains;
7146         struct drm_atomic_state *state;
7147         struct intel_crtc_state *crtc_state;
7148         int ret;
7149
7150         if (!intel_crtc->active)
7151                 return;
7152
7153         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
7154                 const struct intel_plane_state *plane_state =
7155                         to_intel_plane_state(plane->base.state);
7156
7157                 if (plane_state->uapi.visible)
7158                         intel_plane_disable_noatomic(intel_crtc, plane);
7159         }
7160
7161         state = drm_atomic_state_alloc(crtc->dev);
7162         if (!state) {
7163                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7164                               crtc->base.id, crtc->name);
7165                 return;
7166         }
7167
7168         state->acquire_ctx = ctx;
7169
7170         /* Everything's already locked, -EDEADLK can't happen. */
7171         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7172         ret = drm_atomic_add_affected_connectors(state, crtc);
7173
7174         WARN_ON(IS_ERR(crtc_state) || ret);
7175
7176         dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
7177
7178         drm_atomic_state_put(state);
7179
7180         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7181                       crtc->base.id, crtc->name);
7182
7183         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
7184         crtc->state->active = false;
7185         intel_crtc->active = false;
7186         crtc->enabled = false;
7187         crtc->state->connector_mask = 0;
7188         crtc->state->encoder_mask = 0;
7189         intel_crtc_free_hw_state(crtc_state);
7190         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7191
7192         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
7193                 encoder->base.crtc = NULL;
7194
7195         intel_fbc_disable(intel_crtc);
7196         intel_update_watermarks(intel_crtc);
7197         intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
7198
7199         domains = intel_crtc->enabled_power_domains;
7200         for_each_power_domain(domain, domains)
7201                 intel_display_power_put_unchecked(dev_priv, domain);
7202         intel_crtc->enabled_power_domains = 0;
7203
7204         dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
7205         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
7206         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
7207
7208         bw_state->data_rate[intel_crtc->pipe] = 0;
7209         bw_state->num_active_planes[intel_crtc->pipe] = 0;
7210 }
7211
7212 /*
7213  * turn all crtc's off, but do not adjust state
7214  * This has to be paired with a call to intel_modeset_setup_hw_state.
7215  */
7216 int intel_display_suspend(struct drm_device *dev)
7217 {
7218         struct drm_i915_private *dev_priv = to_i915(dev);
7219         struct drm_atomic_state *state;
7220         int ret;
7221
7222         state = drm_atomic_helper_suspend(dev);
7223         ret = PTR_ERR_OR_ZERO(state);
7224         if (ret)
7225                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7226         else
7227                 dev_priv->modeset_restore_state = state;
7228         return ret;
7229 }
7230
7231 void intel_encoder_destroy(struct drm_encoder *encoder)
7232 {
7233         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7234
7235         drm_encoder_cleanup(encoder);
7236         kfree(intel_encoder);
7237 }
7238
7239 /* Cross check the actual hw state with our own modeset state tracking (and it's
7240  * internal consistency). */
7241 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7242                                          struct drm_connector_state *conn_state)
7243 {
7244         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7245
7246         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7247                       connector->base.base.id,
7248                       connector->base.name);
7249
7250         if (connector->get_hw_state(connector)) {
7251                 struct intel_encoder *encoder = connector->encoder;
7252
7253                 I915_STATE_WARN(!crtc_state,
7254                          "connector enabled without attached crtc\n");
7255
7256                 if (!crtc_state)
7257                         return;
7258
7259                 I915_STATE_WARN(!crtc_state->hw.active,
7260                                 "connector is active, but attached crtc isn't\n");
7261
7262                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7263                         return;
7264
7265                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7266                         "atomic encoder doesn't match attached encoder\n");
7267
7268                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7269                         "attached encoder crtc differs from connector crtc\n");
7270         } else {
7271                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7272                                 "attached crtc is active, but connector isn't\n");
7273                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7274                         "best encoder set without crtc!\n");
7275         }
7276 }
7277
7278 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7279 {
7280         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7281                 return crtc_state->fdi_lanes;
7282
7283         return 0;
7284 }
7285
7286 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7287                                      struct intel_crtc_state *pipe_config)
7288 {
7289         struct drm_i915_private *dev_priv = to_i915(dev);
7290         struct drm_atomic_state *state = pipe_config->uapi.state;
7291         struct intel_crtc *other_crtc;
7292         struct intel_crtc_state *other_crtc_state;
7293
7294         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7295                       pipe_name(pipe), pipe_config->fdi_lanes);
7296         if (pipe_config->fdi_lanes > 4) {
7297                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7298                               pipe_name(pipe), pipe_config->fdi_lanes);
7299                 return -EINVAL;
7300         }
7301
7302         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7303                 if (pipe_config->fdi_lanes > 2) {
7304                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7305                                       pipe_config->fdi_lanes);
7306                         return -EINVAL;
7307                 } else {
7308                         return 0;
7309                 }
7310         }
7311
7312         if (INTEL_NUM_PIPES(dev_priv) == 2)
7313                 return 0;
7314
7315         /* Ivybridge 3 pipe is really complicated */
7316         switch (pipe) {
7317         case PIPE_A:
7318                 return 0;
7319         case PIPE_B:
7320                 if (pipe_config->fdi_lanes <= 2)
7321                         return 0;
7322
7323                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7324                 other_crtc_state =
7325                         intel_atomic_get_crtc_state(state, other_crtc);
7326                 if (IS_ERR(other_crtc_state))
7327                         return PTR_ERR(other_crtc_state);
7328
7329                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7330                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7331                                       pipe_name(pipe), pipe_config->fdi_lanes);
7332                         return -EINVAL;
7333                 }
7334                 return 0;
7335         case PIPE_C:
7336                 if (pipe_config->fdi_lanes > 2) {
7337                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7338                                       pipe_name(pipe), pipe_config->fdi_lanes);
7339                         return -EINVAL;
7340                 }
7341
7342                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7343                 other_crtc_state =
7344                         intel_atomic_get_crtc_state(state, other_crtc);
7345                 if (IS_ERR(other_crtc_state))
7346                         return PTR_ERR(other_crtc_state);
7347
7348                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7349                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7350                         return -EINVAL;
7351                 }
7352                 return 0;
7353         default:
7354                 BUG();
7355         }
7356 }
7357
7358 #define RETRY 1
7359 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7360                                        struct intel_crtc_state *pipe_config)
7361 {
7362         struct drm_device *dev = intel_crtc->base.dev;
7363         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7364         int lane, link_bw, fdi_dotclock, ret;
7365         bool needs_recompute = false;
7366
7367 retry:
7368         /* FDI is a binary signal running at ~2.7GHz, encoding
7369          * each output octet as 10 bits. The actual frequency
7370          * is stored as a divider into a 100MHz clock, and the
7371          * mode pixel clock is stored in units of 1KHz.
7372          * Hence the bw of each lane in terms of the mode signal
7373          * is:
7374          */
7375         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7376
7377         fdi_dotclock = adjusted_mode->crtc_clock;
7378
7379         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7380                                            pipe_config->pipe_bpp);
7381
7382         pipe_config->fdi_lanes = lane;
7383
7384         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7385                                link_bw, &pipe_config->fdi_m_n, false, false);
7386
7387         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7388         if (ret == -EDEADLK)
7389                 return ret;
7390
7391         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7392                 pipe_config->pipe_bpp -= 2*3;
7393                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7394                               pipe_config->pipe_bpp);
7395                 needs_recompute = true;
7396                 pipe_config->bw_constrained = true;
7397
7398                 goto retry;
7399         }
7400
7401         if (needs_recompute)
7402                 return RETRY;
7403
7404         return ret;
7405 }
7406
7407 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7408 {
7409         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7410         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7411
7412         /* IPS only exists on ULT machines and is tied to pipe A. */
7413         if (!hsw_crtc_supports_ips(crtc))
7414                 return false;
7415
7416         if (!i915_modparams.enable_ips)
7417                 return false;
7418
7419         if (crtc_state->pipe_bpp > 24)
7420                 return false;
7421
7422         /*
7423          * We compare against max which means we must take
7424          * the increased cdclk requirement into account when
7425          * calculating the new cdclk.
7426          *
7427          * Should measure whether using a lower cdclk w/o IPS
7428          */
7429         if (IS_BROADWELL(dev_priv) &&
7430             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7431                 return false;
7432
7433         return true;
7434 }
7435
7436 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7437 {
7438         struct drm_i915_private *dev_priv =
7439                 to_i915(crtc_state->uapi.crtc->dev);
7440         struct intel_atomic_state *intel_state =
7441                 to_intel_atomic_state(crtc_state->uapi.state);
7442
7443         if (!hsw_crtc_state_ips_capable(crtc_state))
7444                 return false;
7445
7446         /*
7447          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7448          * enabled and disabled dynamically based on package C states,
7449          * user space can't make reliable use of the CRCs, so let's just
7450          * completely disable it.
7451          */
7452         if (crtc_state->crc_enabled)
7453                 return false;
7454
7455         /* IPS should be fine as long as at least one plane is enabled. */
7456         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7457                 return false;
7458
7459         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7460         if (IS_BROADWELL(dev_priv) &&
7461             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7462                 return false;
7463
7464         return true;
7465 }
7466
7467 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7468 {
7469         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7470
7471         /* GDG double wide on either pipe, otherwise pipe A only */
7472         return INTEL_GEN(dev_priv) < 4 &&
7473                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7474 }
7475
7476 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7477 {
7478         u32 pixel_rate;
7479
7480         pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7481
7482         /*
7483          * We only use IF-ID interlacing. If we ever use
7484          * PF-ID we'll need to adjust the pixel_rate here.
7485          */
7486
7487         if (pipe_config->pch_pfit.enabled) {
7488                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7489                 u32 pfit_size = pipe_config->pch_pfit.size;
7490
7491                 pipe_w = pipe_config->pipe_src_w;
7492                 pipe_h = pipe_config->pipe_src_h;
7493
7494                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7495                 pfit_h = pfit_size & 0xFFFF;
7496                 if (pipe_w < pfit_w)
7497                         pipe_w = pfit_w;
7498                 if (pipe_h < pfit_h)
7499                         pipe_h = pfit_h;
7500
7501                 if (WARN_ON(!pfit_w || !pfit_h))
7502                         return pixel_rate;
7503
7504                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7505                                      pfit_w * pfit_h);
7506         }
7507
7508         return pixel_rate;
7509 }
7510
7511 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7512 {
7513         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7514
7515         if (HAS_GMCH(dev_priv))
7516                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7517                 crtc_state->pixel_rate =
7518                         crtc_state->hw.adjusted_mode.crtc_clock;
7519         else
7520                 crtc_state->pixel_rate =
7521                         ilk_pipe_pixel_rate(crtc_state);
7522 }
7523
7524 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7525                                      struct intel_crtc_state *pipe_config)
7526 {
7527         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7528         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7529         int clock_limit = dev_priv->max_dotclk_freq;
7530
7531         if (INTEL_GEN(dev_priv) < 4) {
7532                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7533
7534                 /*
7535                  * Enable double wide mode when the dot clock
7536                  * is > 90% of the (display) core speed.
7537                  */
7538                 if (intel_crtc_supports_double_wide(crtc) &&
7539                     adjusted_mode->crtc_clock > clock_limit) {
7540                         clock_limit = dev_priv->max_dotclk_freq;
7541                         pipe_config->double_wide = true;
7542                 }
7543         }
7544
7545         if (adjusted_mode->crtc_clock > clock_limit) {
7546                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7547                               adjusted_mode->crtc_clock, clock_limit,
7548                               yesno(pipe_config->double_wide));
7549                 return -EINVAL;
7550         }
7551
7552         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7553              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7554              pipe_config->hw.ctm) {
7555                 /*
7556                  * There is only one pipe CSC unit per pipe, and we need that
7557                  * for output conversion from RGB->YCBCR. So if CTM is already
7558                  * applied we can't support YCBCR420 output.
7559                  */
7560                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7561                 return -EINVAL;
7562         }
7563
7564         /*
7565          * Pipe horizontal size must be even in:
7566          * - DVO ganged mode
7567          * - LVDS dual channel mode
7568          * - Double wide pipe
7569          */
7570         if (pipe_config->pipe_src_w & 1) {
7571                 if (pipe_config->double_wide) {
7572                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7573                         return -EINVAL;
7574                 }
7575
7576                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7577                     intel_is_dual_link_lvds(dev_priv)) {
7578                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7579                         return -EINVAL;
7580                 }
7581         }
7582
7583         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7584          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7585          */
7586         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7587                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7588                 return -EINVAL;
7589
7590         intel_crtc_compute_pixel_rate(pipe_config);
7591
7592         if (pipe_config->has_pch_encoder)
7593                 return ironlake_fdi_compute_config(crtc, pipe_config);
7594
7595         return 0;
7596 }
7597
7598 static void
7599 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7600 {
7601         while (*num > DATA_LINK_M_N_MASK ||
7602                *den > DATA_LINK_M_N_MASK) {
7603                 *num >>= 1;
7604                 *den >>= 1;
7605         }
7606 }
7607
7608 static void compute_m_n(unsigned int m, unsigned int n,
7609                         u32 *ret_m, u32 *ret_n,
7610                         bool constant_n)
7611 {
7612         /*
7613          * Several DP dongles in particular seem to be fussy about
7614          * too large link M/N values. Give N value as 0x8000 that
7615          * should be acceptable by specific devices. 0x8000 is the
7616          * specified fixed N value for asynchronous clock mode,
7617          * which the devices expect also in synchronous clock mode.
7618          */
7619         if (constant_n)
7620                 *ret_n = 0x8000;
7621         else
7622                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7623
7624         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7625         intel_reduce_m_n_ratio(ret_m, ret_n);
7626 }
7627
7628 void
7629 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7630                        int pixel_clock, int link_clock,
7631                        struct intel_link_m_n *m_n,
7632                        bool constant_n, bool fec_enable)
7633 {
7634         u32 data_clock = bits_per_pixel * pixel_clock;
7635
7636         if (fec_enable)
7637                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7638
7639         m_n->tu = 64;
7640         compute_m_n(data_clock,
7641                     link_clock * nlanes * 8,
7642                     &m_n->gmch_m, &m_n->gmch_n,
7643                     constant_n);
7644
7645         compute_m_n(pixel_clock, link_clock,
7646                     &m_n->link_m, &m_n->link_n,
7647                     constant_n);
7648 }
7649
7650 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7651 {
7652         /*
7653          * There may be no VBT; and if the BIOS enabled SSC we can
7654          * just keep using it to avoid unnecessary flicker.  Whereas if the
7655          * BIOS isn't using it, don't assume it will work even if the VBT
7656          * indicates as much.
7657          */
7658         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7659                 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
7660                         DREF_SSC1_ENABLE;
7661
7662                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7663                         DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
7664                                       enableddisabled(bios_lvds_use_ssc),
7665                                       enableddisabled(dev_priv->vbt.lvds_use_ssc));
7666                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7667                 }
7668         }
7669 }
7670
7671 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7672 {
7673         if (i915_modparams.panel_use_ssc >= 0)
7674                 return i915_modparams.panel_use_ssc != 0;
7675         return dev_priv->vbt.lvds_use_ssc
7676                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7677 }
7678
7679 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7680 {
7681         return (1 << dpll->n) << 16 | dpll->m2;
7682 }
7683
7684 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7685 {
7686         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7687 }
7688
7689 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7690                                      struct intel_crtc_state *crtc_state,
7691                                      struct dpll *reduced_clock)
7692 {
7693         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7694         u32 fp, fp2 = 0;
7695
7696         if (IS_PINEVIEW(dev_priv)) {
7697                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7698                 if (reduced_clock)
7699                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7700         } else {
7701                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7702                 if (reduced_clock)
7703                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7704         }
7705
7706         crtc_state->dpll_hw_state.fp0 = fp;
7707
7708         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7709             reduced_clock) {
7710                 crtc_state->dpll_hw_state.fp1 = fp2;
7711         } else {
7712                 crtc_state->dpll_hw_state.fp1 = fp;
7713         }
7714 }
7715
7716 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7717                 pipe)
7718 {
7719         u32 reg_val;
7720
7721         /*
7722          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7723          * and set it to a reasonable value instead.
7724          */
7725         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7726         reg_val &= 0xffffff00;
7727         reg_val |= 0x00000030;
7728         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7729
7730         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7731         reg_val &= 0x00ffffff;
7732         reg_val |= 0x8c000000;
7733         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7734
7735         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7736         reg_val &= 0xffffff00;
7737         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7738
7739         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7740         reg_val &= 0x00ffffff;
7741         reg_val |= 0xb0000000;
7742         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7743 }
7744
7745 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7746                                          const struct intel_link_m_n *m_n)
7747 {
7748         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7749         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7750         enum pipe pipe = crtc->pipe;
7751
7752         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7753         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7754         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7755         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7756 }
7757
7758 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7759                                  enum transcoder transcoder)
7760 {
7761         if (IS_HASWELL(dev_priv))
7762                 return transcoder == TRANSCODER_EDP;
7763
7764         /*
7765          * Strictly speaking some registers are available before
7766          * gen7, but we only support DRRS on gen7+
7767          */
7768         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7769 }
7770
7771 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7772                                          const struct intel_link_m_n *m_n,
7773                                          const struct intel_link_m_n *m2_n2)
7774 {
7775         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7776         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7777         enum pipe pipe = crtc->pipe;
7778         enum transcoder transcoder = crtc_state->cpu_transcoder;
7779
7780         if (INTEL_GEN(dev_priv) >= 5) {
7781                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7782                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7783                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7784                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7785                 /*
7786                  *  M2_N2 registers are set only if DRRS is supported
7787                  * (to make sure the registers are not unnecessarily accessed).
7788                  */
7789                 if (m2_n2 && crtc_state->has_drrs &&
7790                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7791                         I915_WRITE(PIPE_DATA_M2(transcoder),
7792                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7793                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7794                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7795                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7796                 }
7797         } else {
7798                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7799                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7800                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7801                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7802         }
7803 }
7804
7805 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7806 {
7807         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7808
7809         if (m_n == M1_N1) {
7810                 dp_m_n = &crtc_state->dp_m_n;
7811                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7812         } else if (m_n == M2_N2) {
7813
7814                 /*
7815                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7816                  * needs to be programmed into M1_N1.
7817                  */
7818                 dp_m_n = &crtc_state->dp_m2_n2;
7819         } else {
7820                 DRM_ERROR("Unsupported divider value\n");
7821                 return;
7822         }
7823
7824         if (crtc_state->has_pch_encoder)
7825                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7826         else
7827                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7828 }
7829
7830 static void vlv_compute_dpll(struct intel_crtc *crtc,
7831                              struct intel_crtc_state *pipe_config)
7832 {
7833         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7834                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7835         if (crtc->pipe != PIPE_A)
7836                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7837
7838         /* DPLL not used with DSI, but still need the rest set up */
7839         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7840                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7841                         DPLL_EXT_BUFFER_ENABLE_VLV;
7842
7843         pipe_config->dpll_hw_state.dpll_md =
7844                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7845 }
7846
7847 static void chv_compute_dpll(struct intel_crtc *crtc,
7848                              struct intel_crtc_state *pipe_config)
7849 {
7850         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7851                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7852         if (crtc->pipe != PIPE_A)
7853                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7854
7855         /* DPLL not used with DSI, but still need the rest set up */
7856         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7857                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7858
7859         pipe_config->dpll_hw_state.dpll_md =
7860                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7861 }
7862
7863 static void vlv_prepare_pll(struct intel_crtc *crtc,
7864                             const struct intel_crtc_state *pipe_config)
7865 {
7866         struct drm_device *dev = crtc->base.dev;
7867         struct drm_i915_private *dev_priv = to_i915(dev);
7868         enum pipe pipe = crtc->pipe;
7869         u32 mdiv;
7870         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7871         u32 coreclk, reg_val;
7872
7873         /* Enable Refclk */
7874         I915_WRITE(DPLL(pipe),
7875                    pipe_config->dpll_hw_state.dpll &
7876                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7877
7878         /* No need to actually set up the DPLL with DSI */
7879         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7880                 return;
7881
7882         vlv_dpio_get(dev_priv);
7883
7884         bestn = pipe_config->dpll.n;
7885         bestm1 = pipe_config->dpll.m1;
7886         bestm2 = pipe_config->dpll.m2;
7887         bestp1 = pipe_config->dpll.p1;
7888         bestp2 = pipe_config->dpll.p2;
7889
7890         /* See eDP HDMI DPIO driver vbios notes doc */
7891
7892         /* PLL B needs special handling */
7893         if (pipe == PIPE_B)
7894                 vlv_pllb_recal_opamp(dev_priv, pipe);
7895
7896         /* Set up Tx target for periodic Rcomp update */
7897         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7898
7899         /* Disable target IRef on PLL */
7900         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7901         reg_val &= 0x00ffffff;
7902         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7903
7904         /* Disable fast lock */
7905         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7906
7907         /* Set idtafcrecal before PLL is enabled */
7908         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7909         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7910         mdiv |= ((bestn << DPIO_N_SHIFT));
7911         mdiv |= (1 << DPIO_K_SHIFT);
7912
7913         /*
7914          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7915          * but we don't support that).
7916          * Note: don't use the DAC post divider as it seems unstable.
7917          */
7918         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7919         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7920
7921         mdiv |= DPIO_ENABLE_CALIBRATION;
7922         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7923
7924         /* Set HBR and RBR LPF coefficients */
7925         if (pipe_config->port_clock == 162000 ||
7926             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7927             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7928                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7929                                  0x009f0003);
7930         else
7931                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7932                                  0x00d0000f);
7933
7934         if (intel_crtc_has_dp_encoder(pipe_config)) {
7935                 /* Use SSC source */
7936                 if (pipe == PIPE_A)
7937                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7938                                          0x0df40000);
7939                 else
7940                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7941                                          0x0df70000);
7942         } else { /* HDMI or VGA */
7943                 /* Use bend source */
7944                 if (pipe == PIPE_A)
7945                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7946                                          0x0df70000);
7947                 else
7948                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7949                                          0x0df40000);
7950         }
7951
7952         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7953         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7954         if (intel_crtc_has_dp_encoder(pipe_config))
7955                 coreclk |= 0x01000000;
7956         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7957
7958         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7959
7960         vlv_dpio_put(dev_priv);
7961 }
7962
7963 static void chv_prepare_pll(struct intel_crtc *crtc,
7964                             const struct intel_crtc_state *pipe_config)
7965 {
7966         struct drm_device *dev = crtc->base.dev;
7967         struct drm_i915_private *dev_priv = to_i915(dev);
7968         enum pipe pipe = crtc->pipe;
7969         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7970         u32 loopfilter, tribuf_calcntr;
7971         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7972         u32 dpio_val;
7973         int vco;
7974
7975         /* Enable Refclk and SSC */
7976         I915_WRITE(DPLL(pipe),
7977                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7978
7979         /* No need to actually set up the DPLL with DSI */
7980         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7981                 return;
7982
7983         bestn = pipe_config->dpll.n;
7984         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7985         bestm1 = pipe_config->dpll.m1;
7986         bestm2 = pipe_config->dpll.m2 >> 22;
7987         bestp1 = pipe_config->dpll.p1;
7988         bestp2 = pipe_config->dpll.p2;
7989         vco = pipe_config->dpll.vco;
7990         dpio_val = 0;
7991         loopfilter = 0;
7992
7993         vlv_dpio_get(dev_priv);
7994
7995         /* p1 and p2 divider */
7996         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7997                         5 << DPIO_CHV_S1_DIV_SHIFT |
7998                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7999                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8000                         1 << DPIO_CHV_K_DIV_SHIFT);
8001
8002         /* Feedback post-divider - m2 */
8003         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8004
8005         /* Feedback refclk divider - n and m1 */
8006         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8007                         DPIO_CHV_M1_DIV_BY_2 |
8008                         1 << DPIO_CHV_N_DIV_SHIFT);
8009
8010         /* M2 fraction division */
8011         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8012
8013         /* M2 fraction division enable */
8014         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8015         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8016         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8017         if (bestm2_frac)
8018                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8019         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8020
8021         /* Program digital lock detect threshold */
8022         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8023         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8024                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8025         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8026         if (!bestm2_frac)
8027                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8028         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8029
8030         /* Loop filter */
8031         if (vco == 5400000) {
8032                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8033                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8034                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8035                 tribuf_calcntr = 0x9;
8036         } else if (vco <= 6200000) {
8037                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8038                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8039                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8040                 tribuf_calcntr = 0x9;
8041         } else if (vco <= 6480000) {
8042                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8043                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8044                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8045                 tribuf_calcntr = 0x8;
8046         } else {
8047                 /* Not supported. Apply the same limits as in the max case */
8048                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8049                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8050                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8051                 tribuf_calcntr = 0;
8052         }
8053         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8054
8055         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8056         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8057         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8058         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8059
8060         /* AFC Recal */
8061         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8062                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8063                         DPIO_AFC_RECAL);
8064
8065         vlv_dpio_put(dev_priv);
8066 }
8067
8068 /**
8069  * vlv_force_pll_on - forcibly enable just the PLL
8070  * @dev_priv: i915 private structure
8071  * @pipe: pipe PLL to enable
8072  * @dpll: PLL configuration
8073  *
8074  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8075  * in cases where we need the PLL enabled even when @pipe is not going to
8076  * be enabled.
8077  */
8078 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8079                      const struct dpll *dpll)
8080 {
8081         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8082         struct intel_crtc_state *pipe_config;
8083
8084         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8085         if (!pipe_config)
8086                 return -ENOMEM;
8087
8088         pipe_config->uapi.crtc = &crtc->base;
8089         pipe_config->pixel_multiplier = 1;
8090         pipe_config->dpll = *dpll;
8091
8092         if (IS_CHERRYVIEW(dev_priv)) {
8093                 chv_compute_dpll(crtc, pipe_config);
8094                 chv_prepare_pll(crtc, pipe_config);
8095                 chv_enable_pll(crtc, pipe_config);
8096         } else {
8097                 vlv_compute_dpll(crtc, pipe_config);
8098                 vlv_prepare_pll(crtc, pipe_config);
8099                 vlv_enable_pll(crtc, pipe_config);
8100         }
8101
8102         kfree(pipe_config);
8103
8104         return 0;
8105 }
8106
8107 /**
8108  * vlv_force_pll_off - forcibly disable just the PLL
8109  * @dev_priv: i915 private structure
8110  * @pipe: pipe PLL to disable
8111  *
8112  * Disable the PLL for @pipe. To be used in cases where we need
8113  * the PLL enabled even when @pipe is not going to be enabled.
8114  */
8115 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8116 {
8117         if (IS_CHERRYVIEW(dev_priv))
8118                 chv_disable_pll(dev_priv, pipe);
8119         else
8120                 vlv_disable_pll(dev_priv, pipe);
8121 }
8122
8123 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8124                               struct intel_crtc_state *crtc_state,
8125                               struct dpll *reduced_clock)
8126 {
8127         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8128         u32 dpll;
8129         struct dpll *clock = &crtc_state->dpll;
8130
8131         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8132
8133         dpll = DPLL_VGA_MODE_DIS;
8134
8135         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8136                 dpll |= DPLLB_MODE_LVDS;
8137         else
8138                 dpll |= DPLLB_MODE_DAC_SERIAL;
8139
8140         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8141             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8142                 dpll |= (crtc_state->pixel_multiplier - 1)
8143                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8144         }
8145
8146         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8147             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8148                 dpll |= DPLL_SDVO_HIGH_SPEED;
8149
8150         if (intel_crtc_has_dp_encoder(crtc_state))
8151                 dpll |= DPLL_SDVO_HIGH_SPEED;
8152
8153         /* compute bitmask from p1 value */
8154         if (IS_PINEVIEW(dev_priv))
8155                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8156         else {
8157                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8158                 if (IS_G4X(dev_priv) && reduced_clock)
8159                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8160         }
8161         switch (clock->p2) {
8162         case 5:
8163                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8164                 break;
8165         case 7:
8166                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8167                 break;
8168         case 10:
8169                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8170                 break;
8171         case 14:
8172                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8173                 break;
8174         }
8175         if (INTEL_GEN(dev_priv) >= 4)
8176                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8177
8178         if (crtc_state->sdvo_tv_clock)
8179                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8180         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8181                  intel_panel_use_ssc(dev_priv))
8182                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8183         else
8184                 dpll |= PLL_REF_INPUT_DREFCLK;
8185
8186         dpll |= DPLL_VCO_ENABLE;
8187         crtc_state->dpll_hw_state.dpll = dpll;
8188
8189         if (INTEL_GEN(dev_priv) >= 4) {
8190                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8191                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8192                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8193         }
8194 }
8195
8196 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8197                               struct intel_crtc_state *crtc_state,
8198                               struct dpll *reduced_clock)
8199 {
8200         struct drm_device *dev = crtc->base.dev;
8201         struct drm_i915_private *dev_priv = to_i915(dev);
8202         u32 dpll;
8203         struct dpll *clock = &crtc_state->dpll;
8204
8205         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8206
8207         dpll = DPLL_VGA_MODE_DIS;
8208
8209         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8210                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8211         } else {
8212                 if (clock->p1 == 2)
8213                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8214                 else
8215                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8216                 if (clock->p2 == 4)
8217                         dpll |= PLL_P2_DIVIDE_BY_4;
8218         }
8219
8220         /*
8221          * Bspec:
8222          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8223          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8224          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8225          *  Enable) must be set to “1” in both the DPLL A Control Register
8226          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8227          *
8228          * For simplicity We simply keep both bits always enabled in
8229          * both DPLLS. The spec says we should disable the DVO 2X clock
8230          * when not needed, but this seems to work fine in practice.
8231          */
8232         if (IS_I830(dev_priv) ||
8233             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8234                 dpll |= DPLL_DVO_2X_MODE;
8235
8236         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8237             intel_panel_use_ssc(dev_priv))
8238                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8239         else
8240                 dpll |= PLL_REF_INPUT_DREFCLK;
8241
8242         dpll |= DPLL_VCO_ENABLE;
8243         crtc_state->dpll_hw_state.dpll = dpll;
8244 }
8245
8246 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8247 {
8248         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8249         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8250         enum pipe pipe = crtc->pipe;
8251         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8252         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8253         u32 crtc_vtotal, crtc_vblank_end;
8254         int vsyncshift = 0;
8255
8256         /* We need to be careful not to changed the adjusted mode, for otherwise
8257          * the hw state checker will get angry at the mismatch. */
8258         crtc_vtotal = adjusted_mode->crtc_vtotal;
8259         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8260
8261         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8262                 /* the chip adds 2 halflines automatically */
8263                 crtc_vtotal -= 1;
8264                 crtc_vblank_end -= 1;
8265
8266                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8267                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8268                 else
8269                         vsyncshift = adjusted_mode->crtc_hsync_start -
8270                                 adjusted_mode->crtc_htotal / 2;
8271                 if (vsyncshift < 0)
8272                         vsyncshift += adjusted_mode->crtc_htotal;
8273         }
8274
8275         if (INTEL_GEN(dev_priv) > 3)
8276                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8277
8278         I915_WRITE(HTOTAL(cpu_transcoder),
8279                    (adjusted_mode->crtc_hdisplay - 1) |
8280                    ((adjusted_mode->crtc_htotal - 1) << 16));
8281         I915_WRITE(HBLANK(cpu_transcoder),
8282                    (adjusted_mode->crtc_hblank_start - 1) |
8283                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
8284         I915_WRITE(HSYNC(cpu_transcoder),
8285                    (adjusted_mode->crtc_hsync_start - 1) |
8286                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
8287
8288         I915_WRITE(VTOTAL(cpu_transcoder),
8289                    (adjusted_mode->crtc_vdisplay - 1) |
8290                    ((crtc_vtotal - 1) << 16));
8291         I915_WRITE(VBLANK(cpu_transcoder),
8292                    (adjusted_mode->crtc_vblank_start - 1) |
8293                    ((crtc_vblank_end - 1) << 16));
8294         I915_WRITE(VSYNC(cpu_transcoder),
8295                    (adjusted_mode->crtc_vsync_start - 1) |
8296                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
8297
8298         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8299          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8300          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8301          * bits. */
8302         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8303             (pipe == PIPE_B || pipe == PIPE_C))
8304                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8305
8306 }
8307
8308 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8309 {
8310         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8311         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8312         enum pipe pipe = crtc->pipe;
8313
8314         /* pipesrc controls the size that is scaled from, which should
8315          * always be the user's requested size.
8316          */
8317         I915_WRITE(PIPESRC(pipe),
8318                    ((crtc_state->pipe_src_w - 1) << 16) |
8319                    (crtc_state->pipe_src_h - 1));
8320 }
8321
8322 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8323 {
8324         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8325         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8326
8327         if (IS_GEN(dev_priv, 2))
8328                 return false;
8329
8330         if (INTEL_GEN(dev_priv) >= 9 ||
8331             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8332                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8333         else
8334                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8335 }
8336
8337 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8338                                    struct intel_crtc_state *pipe_config)
8339 {
8340         struct drm_device *dev = crtc->base.dev;
8341         struct drm_i915_private *dev_priv = to_i915(dev);
8342         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8343         u32 tmp;
8344
8345         tmp = I915_READ(HTOTAL(cpu_transcoder));
8346         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8347         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8348
8349         if (!transcoder_is_dsi(cpu_transcoder)) {
8350                 tmp = I915_READ(HBLANK(cpu_transcoder));
8351                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8352                                                         (tmp & 0xffff) + 1;
8353                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8354                                                 ((tmp >> 16) & 0xffff) + 1;
8355         }
8356         tmp = I915_READ(HSYNC(cpu_transcoder));
8357         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8358         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8359
8360         tmp = I915_READ(VTOTAL(cpu_transcoder));
8361         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8362         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8363
8364         if (!transcoder_is_dsi(cpu_transcoder)) {
8365                 tmp = I915_READ(VBLANK(cpu_transcoder));
8366                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8367                                                         (tmp & 0xffff) + 1;
8368                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8369                                                 ((tmp >> 16) & 0xffff) + 1;
8370         }
8371         tmp = I915_READ(VSYNC(cpu_transcoder));
8372         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8373         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8374
8375         if (intel_pipe_is_interlaced(pipe_config)) {
8376                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8377                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8378                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8379         }
8380 }
8381
8382 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8383                                     struct intel_crtc_state *pipe_config)
8384 {
8385         struct drm_device *dev = crtc->base.dev;
8386         struct drm_i915_private *dev_priv = to_i915(dev);
8387         u32 tmp;
8388
8389         tmp = I915_READ(PIPESRC(crtc->pipe));
8390         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8391         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8392
8393         pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8394         pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8395 }
8396
8397 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8398                                  struct intel_crtc_state *pipe_config)
8399 {
8400         mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8401         mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8402         mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8403         mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8404
8405         mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8406         mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8407         mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8408         mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8409
8410         mode->flags = pipe_config->hw.adjusted_mode.flags;
8411         mode->type = DRM_MODE_TYPE_DRIVER;
8412
8413         mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8414
8415         mode->hsync = drm_mode_hsync(mode);
8416         mode->vrefresh = drm_mode_vrefresh(mode);
8417         drm_mode_set_name(mode);
8418 }
8419
8420 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8421 {
8422         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8423         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8424         u32 pipeconf;
8425
8426         pipeconf = 0;
8427
8428         /* we keep both pipes enabled on 830 */
8429         if (IS_I830(dev_priv))
8430                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8431
8432         if (crtc_state->double_wide)
8433                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8434
8435         /* only g4x and later have fancy bpc/dither controls */
8436         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8437             IS_CHERRYVIEW(dev_priv)) {
8438                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8439                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8440                         pipeconf |= PIPECONF_DITHER_EN |
8441                                     PIPECONF_DITHER_TYPE_SP;
8442
8443                 switch (crtc_state->pipe_bpp) {
8444                 case 18:
8445                         pipeconf |= PIPECONF_6BPC;
8446                         break;
8447                 case 24:
8448                         pipeconf |= PIPECONF_8BPC;
8449                         break;
8450                 case 30:
8451                         pipeconf |= PIPECONF_10BPC;
8452                         break;
8453                 default:
8454                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8455                         BUG();
8456                 }
8457         }
8458
8459         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8460                 if (INTEL_GEN(dev_priv) < 4 ||
8461                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8462                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8463                 else
8464                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8465         } else {
8466                 pipeconf |= PIPECONF_PROGRESSIVE;
8467         }
8468
8469         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8470              crtc_state->limited_color_range)
8471                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8472
8473         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8474
8475         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8476         POSTING_READ(PIPECONF(crtc->pipe));
8477 }
8478
8479 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8480                                    struct intel_crtc_state *crtc_state)
8481 {
8482         struct drm_device *dev = crtc->base.dev;
8483         struct drm_i915_private *dev_priv = to_i915(dev);
8484         const struct intel_limit *limit;
8485         int refclk = 48000;
8486
8487         memset(&crtc_state->dpll_hw_state, 0,
8488                sizeof(crtc_state->dpll_hw_state));
8489
8490         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8491                 if (intel_panel_use_ssc(dev_priv)) {
8492                         refclk = dev_priv->vbt.lvds_ssc_freq;
8493                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8494                 }
8495
8496                 limit = &intel_limits_i8xx_lvds;
8497         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8498                 limit = &intel_limits_i8xx_dvo;
8499         } else {
8500                 limit = &intel_limits_i8xx_dac;
8501         }
8502
8503         if (!crtc_state->clock_set &&
8504             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8505                                  refclk, NULL, &crtc_state->dpll)) {
8506                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8507                 return -EINVAL;
8508         }
8509
8510         i8xx_compute_dpll(crtc, crtc_state, NULL);
8511
8512         return 0;
8513 }
8514
8515 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8516                                   struct intel_crtc_state *crtc_state)
8517 {
8518         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8519         const struct intel_limit *limit;
8520         int refclk = 96000;
8521
8522         memset(&crtc_state->dpll_hw_state, 0,
8523                sizeof(crtc_state->dpll_hw_state));
8524
8525         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8526                 if (intel_panel_use_ssc(dev_priv)) {
8527                         refclk = dev_priv->vbt.lvds_ssc_freq;
8528                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8529                 }
8530
8531                 if (intel_is_dual_link_lvds(dev_priv))
8532                         limit = &intel_limits_g4x_dual_channel_lvds;
8533                 else
8534                         limit = &intel_limits_g4x_single_channel_lvds;
8535         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8536                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8537                 limit = &intel_limits_g4x_hdmi;
8538         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8539                 limit = &intel_limits_g4x_sdvo;
8540         } else {
8541                 /* The option is for other outputs */
8542                 limit = &intel_limits_i9xx_sdvo;
8543         }
8544
8545         if (!crtc_state->clock_set &&
8546             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8547                                 refclk, NULL, &crtc_state->dpll)) {
8548                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8549                 return -EINVAL;
8550         }
8551
8552         i9xx_compute_dpll(crtc, crtc_state, NULL);
8553
8554         return 0;
8555 }
8556
8557 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8558                                   struct intel_crtc_state *crtc_state)
8559 {
8560         struct drm_device *dev = crtc->base.dev;
8561         struct drm_i915_private *dev_priv = to_i915(dev);
8562         const struct intel_limit *limit;
8563         int refclk = 96000;
8564
8565         memset(&crtc_state->dpll_hw_state, 0,
8566                sizeof(crtc_state->dpll_hw_state));
8567
8568         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8569                 if (intel_panel_use_ssc(dev_priv)) {
8570                         refclk = dev_priv->vbt.lvds_ssc_freq;
8571                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8572                 }
8573
8574                 limit = &intel_limits_pineview_lvds;
8575         } else {
8576                 limit = &intel_limits_pineview_sdvo;
8577         }
8578
8579         if (!crtc_state->clock_set &&
8580             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8581                                 refclk, NULL, &crtc_state->dpll)) {
8582                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8583                 return -EINVAL;
8584         }
8585
8586         i9xx_compute_dpll(crtc, crtc_state, NULL);
8587
8588         return 0;
8589 }
8590
8591 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8592                                    struct intel_crtc_state *crtc_state)
8593 {
8594         struct drm_device *dev = crtc->base.dev;
8595         struct drm_i915_private *dev_priv = to_i915(dev);
8596         const struct intel_limit *limit;
8597         int refclk = 96000;
8598
8599         memset(&crtc_state->dpll_hw_state, 0,
8600                sizeof(crtc_state->dpll_hw_state));
8601
8602         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8603                 if (intel_panel_use_ssc(dev_priv)) {
8604                         refclk = dev_priv->vbt.lvds_ssc_freq;
8605                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8606                 }
8607
8608                 limit = &intel_limits_i9xx_lvds;
8609         } else {
8610                 limit = &intel_limits_i9xx_sdvo;
8611         }
8612
8613         if (!crtc_state->clock_set &&
8614             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8615                                  refclk, NULL, &crtc_state->dpll)) {
8616                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8617                 return -EINVAL;
8618         }
8619
8620         i9xx_compute_dpll(crtc, crtc_state, NULL);
8621
8622         return 0;
8623 }
8624
8625 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8626                                   struct intel_crtc_state *crtc_state)
8627 {
8628         int refclk = 100000;
8629         const struct intel_limit *limit = &intel_limits_chv;
8630
8631         memset(&crtc_state->dpll_hw_state, 0,
8632                sizeof(crtc_state->dpll_hw_state));
8633
8634         if (!crtc_state->clock_set &&
8635             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8636                                 refclk, NULL, &crtc_state->dpll)) {
8637                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8638                 return -EINVAL;
8639         }
8640
8641         chv_compute_dpll(crtc, crtc_state);
8642
8643         return 0;
8644 }
8645
8646 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8647                                   struct intel_crtc_state *crtc_state)
8648 {
8649         int refclk = 100000;
8650         const struct intel_limit *limit = &intel_limits_vlv;
8651
8652         memset(&crtc_state->dpll_hw_state, 0,
8653                sizeof(crtc_state->dpll_hw_state));
8654
8655         if (!crtc_state->clock_set &&
8656             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8657                                 refclk, NULL, &crtc_state->dpll)) {
8658                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8659                 return -EINVAL;
8660         }
8661
8662         vlv_compute_dpll(crtc, crtc_state);
8663
8664         return 0;
8665 }
8666
8667 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8668 {
8669         if (IS_I830(dev_priv))
8670                 return false;
8671
8672         return INTEL_GEN(dev_priv) >= 4 ||
8673                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8674 }
8675
8676 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8677                                  struct intel_crtc_state *pipe_config)
8678 {
8679         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8680         u32 tmp;
8681
8682         if (!i9xx_has_pfit(dev_priv))
8683                 return;
8684
8685         tmp = I915_READ(PFIT_CONTROL);
8686         if (!(tmp & PFIT_ENABLE))
8687                 return;
8688
8689         /* Check whether the pfit is attached to our pipe. */
8690         if (INTEL_GEN(dev_priv) < 4) {
8691                 if (crtc->pipe != PIPE_B)
8692                         return;
8693         } else {
8694                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8695                         return;
8696         }
8697
8698         pipe_config->gmch_pfit.control = tmp;
8699         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8700 }
8701
8702 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8703                                struct intel_crtc_state *pipe_config)
8704 {
8705         struct drm_device *dev = crtc->base.dev;
8706         struct drm_i915_private *dev_priv = to_i915(dev);
8707         enum pipe pipe = crtc->pipe;
8708         struct dpll clock;
8709         u32 mdiv;
8710         int refclk = 100000;
8711
8712         /* In case of DSI, DPLL will not be used */
8713         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8714                 return;
8715
8716         vlv_dpio_get(dev_priv);
8717         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8718         vlv_dpio_put(dev_priv);
8719
8720         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8721         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8722         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8723         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8724         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8725
8726         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8727 }
8728
8729 static void
8730 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8731                               struct intel_initial_plane_config *plane_config)
8732 {
8733         struct drm_device *dev = crtc->base.dev;
8734         struct drm_i915_private *dev_priv = to_i915(dev);
8735         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8736         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8737         enum pipe pipe;
8738         u32 val, base, offset;
8739         int fourcc, pixel_format;
8740         unsigned int aligned_height;
8741         struct drm_framebuffer *fb;
8742         struct intel_framebuffer *intel_fb;
8743
8744         if (!plane->get_hw_state(plane, &pipe))
8745                 return;
8746
8747         WARN_ON(pipe != crtc->pipe);
8748
8749         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8750         if (!intel_fb) {
8751                 DRM_DEBUG_KMS("failed to alloc fb\n");
8752                 return;
8753         }
8754
8755         fb = &intel_fb->base;
8756
8757         fb->dev = dev;
8758
8759         val = I915_READ(DSPCNTR(i9xx_plane));
8760
8761         if (INTEL_GEN(dev_priv) >= 4) {
8762                 if (val & DISPPLANE_TILED) {
8763                         plane_config->tiling = I915_TILING_X;
8764                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8765                 }
8766
8767                 if (val & DISPPLANE_ROTATE_180)
8768                         plane_config->rotation = DRM_MODE_ROTATE_180;
8769         }
8770
8771         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8772             val & DISPPLANE_MIRROR)
8773                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8774
8775         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8776         fourcc = i9xx_format_to_fourcc(pixel_format);
8777         fb->format = drm_format_info(fourcc);
8778
8779         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8780                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8781                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8782         } else if (INTEL_GEN(dev_priv) >= 4) {
8783                 if (plane_config->tiling)
8784                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8785                 else
8786                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8787                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8788         } else {
8789                 base = I915_READ(DSPADDR(i9xx_plane));
8790         }
8791         plane_config->base = base;
8792
8793         val = I915_READ(PIPESRC(pipe));
8794         fb->width = ((val >> 16) & 0xfff) + 1;
8795         fb->height = ((val >> 0) & 0xfff) + 1;
8796
8797         val = I915_READ(DSPSTRIDE(i9xx_plane));
8798         fb->pitches[0] = val & 0xffffffc0;
8799
8800         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8801
8802         plane_config->size = fb->pitches[0] * aligned_height;
8803
8804         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8805                       crtc->base.name, plane->base.name, fb->width, fb->height,
8806                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8807                       plane_config->size);
8808
8809         plane_config->fb = intel_fb;
8810 }
8811
8812 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8813                                struct intel_crtc_state *pipe_config)
8814 {
8815         struct drm_device *dev = crtc->base.dev;
8816         struct drm_i915_private *dev_priv = to_i915(dev);
8817         enum pipe pipe = crtc->pipe;
8818         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8819         struct dpll clock;
8820         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8821         int refclk = 100000;
8822
8823         /* In case of DSI, DPLL will not be used */
8824         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8825                 return;
8826
8827         vlv_dpio_get(dev_priv);
8828         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8829         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8830         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8831         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8832         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8833         vlv_dpio_put(dev_priv);
8834
8835         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8836         clock.m2 = (pll_dw0 & 0xff) << 22;
8837         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8838                 clock.m2 |= pll_dw2 & 0x3fffff;
8839         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8840         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8841         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8842
8843         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8844 }
8845
8846 static enum intel_output_format
8847 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
8848 {
8849         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8850         u32 tmp;
8851
8852         tmp = I915_READ(PIPEMISC(crtc->pipe));
8853
8854         if (tmp & PIPEMISC_YUV420_ENABLE) {
8855                 /* We support 4:2:0 in full blend mode only */
8856                 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
8857
8858                 return INTEL_OUTPUT_FORMAT_YCBCR420;
8859         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8860                 return INTEL_OUTPUT_FORMAT_YCBCR444;
8861         } else {
8862                 return INTEL_OUTPUT_FORMAT_RGB;
8863         }
8864 }
8865
8866 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8867 {
8868         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8869         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8870         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8871         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8872         u32 tmp;
8873
8874         tmp = I915_READ(DSPCNTR(i9xx_plane));
8875
8876         if (tmp & DISPPLANE_GAMMA_ENABLE)
8877                 crtc_state->gamma_enable = true;
8878
8879         if (!HAS_GMCH(dev_priv) &&
8880             tmp & DISPPLANE_PIPE_CSC_ENABLE)
8881                 crtc_state->csc_enable = true;
8882 }
8883
8884 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8885                                  struct intel_crtc_state *pipe_config)
8886 {
8887         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8888         enum intel_display_power_domain power_domain;
8889         intel_wakeref_t wakeref;
8890         u32 tmp;
8891         bool ret;
8892
8893         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8894         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8895         if (!wakeref)
8896                 return false;
8897
8898         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8899         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8900         pipe_config->shared_dpll = NULL;
8901         pipe_config->master_transcoder = INVALID_TRANSCODER;
8902
8903         ret = false;
8904
8905         tmp = I915_READ(PIPECONF(crtc->pipe));
8906         if (!(tmp & PIPECONF_ENABLE))
8907                 goto out;
8908
8909         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8910             IS_CHERRYVIEW(dev_priv)) {
8911                 switch (tmp & PIPECONF_BPC_MASK) {
8912                 case PIPECONF_6BPC:
8913                         pipe_config->pipe_bpp = 18;
8914                         break;
8915                 case PIPECONF_8BPC:
8916                         pipe_config->pipe_bpp = 24;
8917                         break;
8918                 case PIPECONF_10BPC:
8919                         pipe_config->pipe_bpp = 30;
8920                         break;
8921                 default:
8922                         break;
8923                 }
8924         }
8925
8926         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8927             (tmp & PIPECONF_COLOR_RANGE_SELECT))
8928                 pipe_config->limited_color_range = true;
8929
8930         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8931                 PIPECONF_GAMMA_MODE_SHIFT;
8932
8933         if (IS_CHERRYVIEW(dev_priv))
8934                 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8935
8936         i9xx_get_pipe_color_config(pipe_config);
8937         intel_color_get_config(pipe_config);
8938
8939         if (INTEL_GEN(dev_priv) < 4)
8940                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8941
8942         intel_get_pipe_timings(crtc, pipe_config);
8943         intel_get_pipe_src_size(crtc, pipe_config);
8944
8945         i9xx_get_pfit_config(crtc, pipe_config);
8946
8947         if (INTEL_GEN(dev_priv) >= 4) {
8948                 /* No way to read it out on pipes B and C */
8949                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8950                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
8951                 else
8952                         tmp = I915_READ(DPLL_MD(crtc->pipe));
8953                 pipe_config->pixel_multiplier =
8954                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8955                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8956                 pipe_config->dpll_hw_state.dpll_md = tmp;
8957         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8958                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8959                 tmp = I915_READ(DPLL(crtc->pipe));
8960                 pipe_config->pixel_multiplier =
8961                         ((tmp & SDVO_MULTIPLIER_MASK)
8962                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8963         } else {
8964                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8965                  * port and will be fixed up in the encoder->get_config
8966                  * function. */
8967                 pipe_config->pixel_multiplier = 1;
8968         }
8969         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8970         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8971                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8972                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8973         } else {
8974                 /* Mask out read-only status bits. */
8975                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8976                                                      DPLL_PORTC_READY_MASK |
8977                                                      DPLL_PORTB_READY_MASK);
8978         }
8979
8980         if (IS_CHERRYVIEW(dev_priv))
8981                 chv_crtc_clock_get(crtc, pipe_config);
8982         else if (IS_VALLEYVIEW(dev_priv))
8983                 vlv_crtc_clock_get(crtc, pipe_config);
8984         else
8985                 i9xx_crtc_clock_get(crtc, pipe_config);
8986
8987         /*
8988          * Normally the dotclock is filled in by the encoder .get_config()
8989          * but in case the pipe is enabled w/o any ports we need a sane
8990          * default.
8991          */
8992         pipe_config->hw.adjusted_mode.crtc_clock =
8993                 pipe_config->port_clock / pipe_config->pixel_multiplier;
8994
8995         ret = true;
8996
8997 out:
8998         intel_display_power_put(dev_priv, power_domain, wakeref);
8999
9000         return ret;
9001 }
9002
9003 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
9004 {
9005         struct intel_encoder *encoder;
9006         int i;
9007         u32 val, final;
9008         bool has_lvds = false;
9009         bool has_cpu_edp = false;
9010         bool has_panel = false;
9011         bool has_ck505 = false;
9012         bool can_ssc = false;
9013         bool using_ssc_source = false;
9014
9015         /* We need to take the global config into account */
9016         for_each_intel_encoder(&dev_priv->drm, encoder) {
9017                 switch (encoder->type) {
9018                 case INTEL_OUTPUT_LVDS:
9019                         has_panel = true;
9020                         has_lvds = true;
9021                         break;
9022                 case INTEL_OUTPUT_EDP:
9023                         has_panel = true;
9024                         if (encoder->port == PORT_A)
9025                                 has_cpu_edp = true;
9026                         break;
9027                 default:
9028                         break;
9029                 }
9030         }
9031
9032         if (HAS_PCH_IBX(dev_priv)) {
9033                 has_ck505 = dev_priv->vbt.display_clock_mode;
9034                 can_ssc = has_ck505;
9035         } else {
9036                 has_ck505 = false;
9037                 can_ssc = true;
9038         }
9039
9040         /* Check if any DPLLs are using the SSC source */
9041         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9042                 u32 temp = I915_READ(PCH_DPLL(i));
9043
9044                 if (!(temp & DPLL_VCO_ENABLE))
9045                         continue;
9046
9047                 if ((temp & PLL_REF_INPUT_MASK) ==
9048                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9049                         using_ssc_source = true;
9050                         break;
9051                 }
9052         }
9053
9054         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9055                       has_panel, has_lvds, has_ck505, using_ssc_source);
9056
9057         /* Ironlake: try to setup display ref clock before DPLL
9058          * enabling. This is only under driver's control after
9059          * PCH B stepping, previous chipset stepping should be
9060          * ignoring this setting.
9061          */
9062         val = I915_READ(PCH_DREF_CONTROL);
9063
9064         /* As we must carefully and slowly disable/enable each source in turn,
9065          * compute the final state we want first and check if we need to
9066          * make any changes at all.
9067          */
9068         final = val;
9069         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9070         if (has_ck505)
9071                 final |= DREF_NONSPREAD_CK505_ENABLE;
9072         else
9073                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9074
9075         final &= ~DREF_SSC_SOURCE_MASK;
9076         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9077         final &= ~DREF_SSC1_ENABLE;
9078
9079         if (has_panel) {
9080                 final |= DREF_SSC_SOURCE_ENABLE;
9081
9082                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9083                         final |= DREF_SSC1_ENABLE;
9084
9085                 if (has_cpu_edp) {
9086                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9087                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9088                         else
9089                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9090                 } else
9091                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9092         } else if (using_ssc_source) {
9093                 final |= DREF_SSC_SOURCE_ENABLE;
9094                 final |= DREF_SSC1_ENABLE;
9095         }
9096
9097         if (final == val)
9098                 return;
9099
9100         /* Always enable nonspread source */
9101         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9102
9103         if (has_ck505)
9104                 val |= DREF_NONSPREAD_CK505_ENABLE;
9105         else
9106                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9107
9108         if (has_panel) {
9109                 val &= ~DREF_SSC_SOURCE_MASK;
9110                 val |= DREF_SSC_SOURCE_ENABLE;
9111
9112                 /* SSC must be turned on before enabling the CPU output  */
9113                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9114                         DRM_DEBUG_KMS("Using SSC on panel\n");
9115                         val |= DREF_SSC1_ENABLE;
9116                 } else
9117                         val &= ~DREF_SSC1_ENABLE;
9118
9119                 /* Get SSC going before enabling the outputs */
9120                 I915_WRITE(PCH_DREF_CONTROL, val);
9121                 POSTING_READ(PCH_DREF_CONTROL);
9122                 udelay(200);
9123
9124                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9125
9126                 /* Enable CPU source on CPU attached eDP */
9127                 if (has_cpu_edp) {
9128                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9129                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
9130                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9131                         } else
9132                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9133                 } else
9134                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9135
9136                 I915_WRITE(PCH_DREF_CONTROL, val);
9137                 POSTING_READ(PCH_DREF_CONTROL);
9138                 udelay(200);
9139         } else {
9140                 DRM_DEBUG_KMS("Disabling CPU source output\n");
9141
9142                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9143
9144                 /* Turn off CPU output */
9145                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9146
9147                 I915_WRITE(PCH_DREF_CONTROL, val);
9148                 POSTING_READ(PCH_DREF_CONTROL);
9149                 udelay(200);
9150
9151                 if (!using_ssc_source) {
9152                         DRM_DEBUG_KMS("Disabling SSC source\n");
9153
9154                         /* Turn off the SSC source */
9155                         val &= ~DREF_SSC_SOURCE_MASK;
9156                         val |= DREF_SSC_SOURCE_DISABLE;
9157
9158                         /* Turn off SSC1 */
9159                         val &= ~DREF_SSC1_ENABLE;
9160
9161                         I915_WRITE(PCH_DREF_CONTROL, val);
9162                         POSTING_READ(PCH_DREF_CONTROL);
9163                         udelay(200);
9164                 }
9165         }
9166
9167         BUG_ON(val != final);
9168 }
9169
9170 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9171 {
9172         u32 tmp;
9173
9174         tmp = I915_READ(SOUTH_CHICKEN2);
9175         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9176         I915_WRITE(SOUTH_CHICKEN2, tmp);
9177
9178         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9179                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9180                 DRM_ERROR("FDI mPHY reset assert timeout\n");
9181
9182         tmp = I915_READ(SOUTH_CHICKEN2);
9183         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9184         I915_WRITE(SOUTH_CHICKEN2, tmp);
9185
9186         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9187                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9188                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9189 }
9190
9191 /* WaMPhyProgramming:hsw */
9192 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9193 {
9194         u32 tmp;
9195
9196         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9197         tmp &= ~(0xFF << 24);
9198         tmp |= (0x12 << 24);
9199         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9200
9201         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9202         tmp |= (1 << 11);
9203         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9204
9205         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9206         tmp |= (1 << 11);
9207         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9208
9209         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9210         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9211         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9212
9213         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9214         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9215         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9216
9217         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9218         tmp &= ~(7 << 13);
9219         tmp |= (5 << 13);
9220         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9221
9222         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9223         tmp &= ~(7 << 13);
9224         tmp |= (5 << 13);
9225         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9226
9227         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9228         tmp &= ~0xFF;
9229         tmp |= 0x1C;
9230         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9231
9232         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9233         tmp &= ~0xFF;
9234         tmp |= 0x1C;
9235         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9236
9237         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9238         tmp &= ~(0xFF << 16);
9239         tmp |= (0x1C << 16);
9240         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9241
9242         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9243         tmp &= ~(0xFF << 16);
9244         tmp |= (0x1C << 16);
9245         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9246
9247         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9248         tmp |= (1 << 27);
9249         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9250
9251         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9252         tmp |= (1 << 27);
9253         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9254
9255         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9256         tmp &= ~(0xF << 28);
9257         tmp |= (4 << 28);
9258         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9259
9260         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9261         tmp &= ~(0xF << 28);
9262         tmp |= (4 << 28);
9263         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9264 }
9265
9266 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9267  * Programming" based on the parameters passed:
9268  * - Sequence to enable CLKOUT_DP
9269  * - Sequence to enable CLKOUT_DP without spread
9270  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9271  */
9272 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9273                                  bool with_spread, bool with_fdi)
9274 {
9275         u32 reg, tmp;
9276
9277         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9278                 with_spread = true;
9279         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9280             with_fdi, "LP PCH doesn't have FDI\n"))
9281                 with_fdi = false;
9282
9283         mutex_lock(&dev_priv->sb_lock);
9284
9285         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9286         tmp &= ~SBI_SSCCTL_DISABLE;
9287         tmp |= SBI_SSCCTL_PATHALT;
9288         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9289
9290         udelay(24);
9291
9292         if (with_spread) {
9293                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9294                 tmp &= ~SBI_SSCCTL_PATHALT;
9295                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9296
9297                 if (with_fdi) {
9298                         lpt_reset_fdi_mphy(dev_priv);
9299                         lpt_program_fdi_mphy(dev_priv);
9300                 }
9301         }
9302
9303         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9304         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9305         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9306         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9307
9308         mutex_unlock(&dev_priv->sb_lock);
9309 }
9310
9311 /* Sequence to disable CLKOUT_DP */
9312 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9313 {
9314         u32 reg, tmp;
9315
9316         mutex_lock(&dev_priv->sb_lock);
9317
9318         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9319         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9320         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9321         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9322
9323         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9324         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9325                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9326                         tmp |= SBI_SSCCTL_PATHALT;
9327                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9328                         udelay(32);
9329                 }
9330                 tmp |= SBI_SSCCTL_DISABLE;
9331                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9332         }
9333
9334         mutex_unlock(&dev_priv->sb_lock);
9335 }
9336
9337 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9338
9339 static const u16 sscdivintphase[] = {
9340         [BEND_IDX( 50)] = 0x3B23,
9341         [BEND_IDX( 45)] = 0x3B23,
9342         [BEND_IDX( 40)] = 0x3C23,
9343         [BEND_IDX( 35)] = 0x3C23,
9344         [BEND_IDX( 30)] = 0x3D23,
9345         [BEND_IDX( 25)] = 0x3D23,
9346         [BEND_IDX( 20)] = 0x3E23,
9347         [BEND_IDX( 15)] = 0x3E23,
9348         [BEND_IDX( 10)] = 0x3F23,
9349         [BEND_IDX(  5)] = 0x3F23,
9350         [BEND_IDX(  0)] = 0x0025,
9351         [BEND_IDX( -5)] = 0x0025,
9352         [BEND_IDX(-10)] = 0x0125,
9353         [BEND_IDX(-15)] = 0x0125,
9354         [BEND_IDX(-20)] = 0x0225,
9355         [BEND_IDX(-25)] = 0x0225,
9356         [BEND_IDX(-30)] = 0x0325,
9357         [BEND_IDX(-35)] = 0x0325,
9358         [BEND_IDX(-40)] = 0x0425,
9359         [BEND_IDX(-45)] = 0x0425,
9360         [BEND_IDX(-50)] = 0x0525,
9361 };
9362
9363 /*
9364  * Bend CLKOUT_DP
9365  * steps -50 to 50 inclusive, in steps of 5
9366  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9367  * change in clock period = -(steps / 10) * 5.787 ps
9368  */
9369 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9370 {
9371         u32 tmp;
9372         int idx = BEND_IDX(steps);
9373
9374         if (WARN_ON(steps % 5 != 0))
9375                 return;
9376
9377         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9378                 return;
9379
9380         mutex_lock(&dev_priv->sb_lock);
9381
9382         if (steps % 10 != 0)
9383                 tmp = 0xAAAAAAAB;
9384         else
9385                 tmp = 0x00000000;
9386         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9387
9388         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9389         tmp &= 0xffff0000;
9390         tmp |= sscdivintphase[idx];
9391         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9392
9393         mutex_unlock(&dev_priv->sb_lock);
9394 }
9395
9396 #undef BEND_IDX
9397
9398 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9399 {
9400         u32 fuse_strap = I915_READ(FUSE_STRAP);
9401         u32 ctl = I915_READ(SPLL_CTL);
9402
9403         if ((ctl & SPLL_PLL_ENABLE) == 0)
9404                 return false;
9405
9406         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9407             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9408                 return true;
9409
9410         if (IS_BROADWELL(dev_priv) &&
9411             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9412                 return true;
9413
9414         return false;
9415 }
9416
9417 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9418                                enum intel_dpll_id id)
9419 {
9420         u32 fuse_strap = I915_READ(FUSE_STRAP);
9421         u32 ctl = I915_READ(WRPLL_CTL(id));
9422
9423         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9424                 return false;
9425
9426         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9427                 return true;
9428
9429         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9430             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9431             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9432                 return true;
9433
9434         return false;
9435 }
9436
9437 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9438 {
9439         struct intel_encoder *encoder;
9440         bool has_fdi = false;
9441
9442         for_each_intel_encoder(&dev_priv->drm, encoder) {
9443                 switch (encoder->type) {
9444                 case INTEL_OUTPUT_ANALOG:
9445                         has_fdi = true;
9446                         break;
9447                 default:
9448                         break;
9449                 }
9450         }
9451
9452         /*
9453          * The BIOS may have decided to use the PCH SSC
9454          * reference so we must not disable it until the
9455          * relevant PLLs have stopped relying on it. We'll
9456          * just leave the PCH SSC reference enabled in case
9457          * any active PLL is using it. It will get disabled
9458          * after runtime suspend if we don't have FDI.
9459          *
9460          * TODO: Move the whole reference clock handling
9461          * to the modeset sequence proper so that we can
9462          * actually enable/disable/reconfigure these things
9463          * safely. To do that we need to introduce a real
9464          * clock hierarchy. That would also allow us to do
9465          * clock bending finally.
9466          */
9467         dev_priv->pch_ssc_use = 0;
9468
9469         if (spll_uses_pch_ssc(dev_priv)) {
9470                 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9471                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9472         }
9473
9474         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9475                 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9476                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9477         }
9478
9479         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9480                 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9481                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9482         }
9483
9484         if (dev_priv->pch_ssc_use)
9485                 return;
9486
9487         if (has_fdi) {
9488                 lpt_bend_clkout_dp(dev_priv, 0);
9489                 lpt_enable_clkout_dp(dev_priv, true, true);
9490         } else {
9491                 lpt_disable_clkout_dp(dev_priv);
9492         }
9493 }
9494
9495 /*
9496  * Initialize reference clocks when the driver loads
9497  */
9498 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9499 {
9500         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9501                 ironlake_init_pch_refclk(dev_priv);
9502         else if (HAS_PCH_LPT(dev_priv))
9503                 lpt_init_pch_refclk(dev_priv);
9504 }
9505
9506 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9507 {
9508         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9509         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9510         enum pipe pipe = crtc->pipe;
9511         u32 val;
9512
9513         val = 0;
9514
9515         switch (crtc_state->pipe_bpp) {
9516         case 18:
9517                 val |= PIPECONF_6BPC;
9518                 break;
9519         case 24:
9520                 val |= PIPECONF_8BPC;
9521                 break;
9522         case 30:
9523                 val |= PIPECONF_10BPC;
9524                 break;
9525         case 36:
9526                 val |= PIPECONF_12BPC;
9527                 break;
9528         default:
9529                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9530                 BUG();
9531         }
9532
9533         if (crtc_state->dither)
9534                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9535
9536         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9537                 val |= PIPECONF_INTERLACED_ILK;
9538         else
9539                 val |= PIPECONF_PROGRESSIVE;
9540
9541         /*
9542          * This would end up with an odd purple hue over
9543          * the entire display. Make sure we don't do it.
9544          */
9545         WARN_ON(crtc_state->limited_color_range &&
9546                 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9547
9548         if (crtc_state->limited_color_range)
9549                 val |= PIPECONF_COLOR_RANGE_SELECT;
9550
9551         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9552                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9553
9554         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9555
9556         I915_WRITE(PIPECONF(pipe), val);
9557         POSTING_READ(PIPECONF(pipe));
9558 }
9559
9560 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9561 {
9562         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9563         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9564         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9565         u32 val = 0;
9566
9567         if (IS_HASWELL(dev_priv) && crtc_state->dither)
9568                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9569
9570         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9571                 val |= PIPECONF_INTERLACED_ILK;
9572         else
9573                 val |= PIPECONF_PROGRESSIVE;
9574
9575         if (IS_HASWELL(dev_priv) &&
9576             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9577                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9578
9579         I915_WRITE(PIPECONF(cpu_transcoder), val);
9580         POSTING_READ(PIPECONF(cpu_transcoder));
9581 }
9582
9583 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9584 {
9585         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9586         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9587         u32 val = 0;
9588
9589         switch (crtc_state->pipe_bpp) {
9590         case 18:
9591                 val |= PIPEMISC_DITHER_6_BPC;
9592                 break;
9593         case 24:
9594                 val |= PIPEMISC_DITHER_8_BPC;
9595                 break;
9596         case 30:
9597                 val |= PIPEMISC_DITHER_10_BPC;
9598                 break;
9599         case 36:
9600                 val |= PIPEMISC_DITHER_12_BPC;
9601                 break;
9602         default:
9603                 MISSING_CASE(crtc_state->pipe_bpp);
9604                 break;
9605         }
9606
9607         if (crtc_state->dither)
9608                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9609
9610         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9611             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9612                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9613
9614         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9615                 val |= PIPEMISC_YUV420_ENABLE |
9616                         PIPEMISC_YUV420_MODE_FULL_BLEND;
9617
9618         if (INTEL_GEN(dev_priv) >= 11 &&
9619             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9620                                            BIT(PLANE_CURSOR))) == 0)
9621                 val |= PIPEMISC_HDR_MODE_PRECISION;
9622
9623         I915_WRITE(PIPEMISC(crtc->pipe), val);
9624 }
9625
9626 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9627 {
9628         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9629         u32 tmp;
9630
9631         tmp = I915_READ(PIPEMISC(crtc->pipe));
9632
9633         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9634         case PIPEMISC_DITHER_6_BPC:
9635                 return 18;
9636         case PIPEMISC_DITHER_8_BPC:
9637                 return 24;
9638         case PIPEMISC_DITHER_10_BPC:
9639                 return 30;
9640         case PIPEMISC_DITHER_12_BPC:
9641                 return 36;
9642         default:
9643                 MISSING_CASE(tmp);
9644                 return 0;
9645         }
9646 }
9647
9648 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9649 {
9650         /*
9651          * Account for spread spectrum to avoid
9652          * oversubscribing the link. Max center spread
9653          * is 2.5%; use 5% for safety's sake.
9654          */
9655         u32 bps = target_clock * bpp * 21 / 20;
9656         return DIV_ROUND_UP(bps, link_bw * 8);
9657 }
9658
9659 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9660 {
9661         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9662 }
9663
9664 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9665                                   struct intel_crtc_state *crtc_state,
9666                                   struct dpll *reduced_clock)
9667 {
9668         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9669         u32 dpll, fp, fp2;
9670         int factor;
9671
9672         /* Enable autotuning of the PLL clock (if permissible) */
9673         factor = 21;
9674         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9675                 if ((intel_panel_use_ssc(dev_priv) &&
9676                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
9677                     (HAS_PCH_IBX(dev_priv) &&
9678                      intel_is_dual_link_lvds(dev_priv)))
9679                         factor = 25;
9680         } else if (crtc_state->sdvo_tv_clock) {
9681                 factor = 20;
9682         }
9683
9684         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9685
9686         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9687                 fp |= FP_CB_TUNE;
9688
9689         if (reduced_clock) {
9690                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9691
9692                 if (reduced_clock->m < factor * reduced_clock->n)
9693                         fp2 |= FP_CB_TUNE;
9694         } else {
9695                 fp2 = fp;
9696         }
9697
9698         dpll = 0;
9699
9700         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9701                 dpll |= DPLLB_MODE_LVDS;
9702         else
9703                 dpll |= DPLLB_MODE_DAC_SERIAL;
9704
9705         dpll |= (crtc_state->pixel_multiplier - 1)
9706                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9707
9708         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9709             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9710                 dpll |= DPLL_SDVO_HIGH_SPEED;
9711
9712         if (intel_crtc_has_dp_encoder(crtc_state))
9713                 dpll |= DPLL_SDVO_HIGH_SPEED;
9714
9715         /*
9716          * The high speed IO clock is only really required for
9717          * SDVO/HDMI/DP, but we also enable it for CRT to make it
9718          * possible to share the DPLL between CRT and HDMI. Enabling
9719          * the clock needlessly does no real harm, except use up a
9720          * bit of power potentially.
9721          *
9722          * We'll limit this to IVB with 3 pipes, since it has only two
9723          * DPLLs and so DPLL sharing is the only way to get three pipes
9724          * driving PCH ports at the same time. On SNB we could do this,
9725          * and potentially avoid enabling the second DPLL, but it's not
9726          * clear if it''s a win or loss power wise. No point in doing
9727          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9728          */
9729         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
9730             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9731                 dpll |= DPLL_SDVO_HIGH_SPEED;
9732
9733         /* compute bitmask from p1 value */
9734         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9735         /* also FPA1 */
9736         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9737
9738         switch (crtc_state->dpll.p2) {
9739         case 5:
9740                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9741                 break;
9742         case 7:
9743                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9744                 break;
9745         case 10:
9746                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9747                 break;
9748         case 14:
9749                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9750                 break;
9751         }
9752
9753         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9754             intel_panel_use_ssc(dev_priv))
9755                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9756         else
9757                 dpll |= PLL_REF_INPUT_DREFCLK;
9758
9759         dpll |= DPLL_VCO_ENABLE;
9760
9761         crtc_state->dpll_hw_state.dpll = dpll;
9762         crtc_state->dpll_hw_state.fp0 = fp;
9763         crtc_state->dpll_hw_state.fp1 = fp2;
9764 }
9765
9766 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9767                                        struct intel_crtc_state *crtc_state)
9768 {
9769         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9770         struct intel_atomic_state *state =
9771                 to_intel_atomic_state(crtc_state->uapi.state);
9772         const struct intel_limit *limit;
9773         int refclk = 120000;
9774
9775         memset(&crtc_state->dpll_hw_state, 0,
9776                sizeof(crtc_state->dpll_hw_state));
9777
9778         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9779         if (!crtc_state->has_pch_encoder)
9780                 return 0;
9781
9782         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9783                 if (intel_panel_use_ssc(dev_priv)) {
9784                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9785                                       dev_priv->vbt.lvds_ssc_freq);
9786                         refclk = dev_priv->vbt.lvds_ssc_freq;
9787                 }
9788
9789                 if (intel_is_dual_link_lvds(dev_priv)) {
9790                         if (refclk == 100000)
9791                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9792                         else
9793                                 limit = &intel_limits_ironlake_dual_lvds;
9794                 } else {
9795                         if (refclk == 100000)
9796                                 limit = &intel_limits_ironlake_single_lvds_100m;
9797                         else
9798                                 limit = &intel_limits_ironlake_single_lvds;
9799                 }
9800         } else {
9801                 limit = &intel_limits_ironlake_dac;
9802         }
9803
9804         if (!crtc_state->clock_set &&
9805             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9806                                 refclk, NULL, &crtc_state->dpll)) {
9807                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9808                 return -EINVAL;
9809         }
9810
9811         ironlake_compute_dpll(crtc, crtc_state, NULL);
9812
9813         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9814                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9815                               pipe_name(crtc->pipe));
9816                 return -EINVAL;
9817         }
9818
9819         return 0;
9820 }
9821
9822 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9823                                          struct intel_link_m_n *m_n)
9824 {
9825         struct drm_device *dev = crtc->base.dev;
9826         struct drm_i915_private *dev_priv = to_i915(dev);
9827         enum pipe pipe = crtc->pipe;
9828
9829         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9830         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9831         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9832                 & ~TU_SIZE_MASK;
9833         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9834         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9835                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9836 }
9837
9838 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9839                                          enum transcoder transcoder,
9840                                          struct intel_link_m_n *m_n,
9841                                          struct intel_link_m_n *m2_n2)
9842 {
9843         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9844         enum pipe pipe = crtc->pipe;
9845
9846         if (INTEL_GEN(dev_priv) >= 5) {
9847                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9848                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9849                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9850                         & ~TU_SIZE_MASK;
9851                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9852                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9853                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9854
9855                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9856                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9857                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9858                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9859                                         & ~TU_SIZE_MASK;
9860                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9861                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9862                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9863                 }
9864         } else {
9865                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9866                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9867                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9868                         & ~TU_SIZE_MASK;
9869                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9870                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9871                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9872         }
9873 }
9874
9875 void intel_dp_get_m_n(struct intel_crtc *crtc,
9876                       struct intel_crtc_state *pipe_config)
9877 {
9878         if (pipe_config->has_pch_encoder)
9879                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9880         else
9881                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9882                                              &pipe_config->dp_m_n,
9883                                              &pipe_config->dp_m2_n2);
9884 }
9885
9886 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9887                                         struct intel_crtc_state *pipe_config)
9888 {
9889         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9890                                      &pipe_config->fdi_m_n, NULL);
9891 }
9892
9893 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9894                                     struct intel_crtc_state *pipe_config)
9895 {
9896         struct drm_device *dev = crtc->base.dev;
9897         struct drm_i915_private *dev_priv = to_i915(dev);
9898         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9899         u32 ps_ctrl = 0;
9900         int id = -1;
9901         int i;
9902
9903         /* find scaler attached to this pipe */
9904         for (i = 0; i < crtc->num_scalers; i++) {
9905                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9906                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9907                         id = i;
9908                         pipe_config->pch_pfit.enabled = true;
9909                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9910                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9911                         scaler_state->scalers[i].in_use = true;
9912                         break;
9913                 }
9914         }
9915
9916         scaler_state->scaler_id = id;
9917         if (id >= 0) {
9918                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9919         } else {
9920                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9921         }
9922 }
9923
9924 static void
9925 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9926                                  struct intel_initial_plane_config *plane_config)
9927 {
9928         struct drm_device *dev = crtc->base.dev;
9929         struct drm_i915_private *dev_priv = to_i915(dev);
9930         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9931         enum plane_id plane_id = plane->id;
9932         enum pipe pipe;
9933         u32 val, base, offset, stride_mult, tiling, alpha;
9934         int fourcc, pixel_format;
9935         unsigned int aligned_height;
9936         struct drm_framebuffer *fb;
9937         struct intel_framebuffer *intel_fb;
9938
9939         if (!plane->get_hw_state(plane, &pipe))
9940                 return;
9941
9942         WARN_ON(pipe != crtc->pipe);
9943
9944         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9945         if (!intel_fb) {
9946                 DRM_DEBUG_KMS("failed to alloc fb\n");
9947                 return;
9948         }
9949
9950         fb = &intel_fb->base;
9951
9952         fb->dev = dev;
9953
9954         val = I915_READ(PLANE_CTL(pipe, plane_id));
9955
9956         if (INTEL_GEN(dev_priv) >= 11)
9957                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9958         else
9959                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9960
9961         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9962                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9963                 alpha &= PLANE_COLOR_ALPHA_MASK;
9964         } else {
9965                 alpha = val & PLANE_CTL_ALPHA_MASK;
9966         }
9967
9968         fourcc = skl_format_to_fourcc(pixel_format,
9969                                       val & PLANE_CTL_ORDER_RGBX, alpha);
9970         fb->format = drm_format_info(fourcc);
9971
9972         tiling = val & PLANE_CTL_TILED_MASK;
9973         switch (tiling) {
9974         case PLANE_CTL_TILED_LINEAR:
9975                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9976                 break;
9977         case PLANE_CTL_TILED_X:
9978                 plane_config->tiling = I915_TILING_X;
9979                 fb->modifier = I915_FORMAT_MOD_X_TILED;
9980                 break;
9981         case PLANE_CTL_TILED_Y:
9982                 plane_config->tiling = I915_TILING_Y;
9983                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9984                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9985                 else
9986                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
9987                 break;
9988         case PLANE_CTL_TILED_YF:
9989                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9990                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9991                 else
9992                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9993                 break;
9994         default:
9995                 MISSING_CASE(tiling);
9996                 goto error;
9997         }
9998
9999         /*
10000          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10001          * while i915 HW rotation is clockwise, thats why this swapping.
10002          */
10003         switch (val & PLANE_CTL_ROTATE_MASK) {
10004         case PLANE_CTL_ROTATE_0:
10005                 plane_config->rotation = DRM_MODE_ROTATE_0;
10006                 break;
10007         case PLANE_CTL_ROTATE_90:
10008                 plane_config->rotation = DRM_MODE_ROTATE_270;
10009                 break;
10010         case PLANE_CTL_ROTATE_180:
10011                 plane_config->rotation = DRM_MODE_ROTATE_180;
10012                 break;
10013         case PLANE_CTL_ROTATE_270:
10014                 plane_config->rotation = DRM_MODE_ROTATE_90;
10015                 break;
10016         }
10017
10018         if (INTEL_GEN(dev_priv) >= 10 &&
10019             val & PLANE_CTL_FLIP_HORIZONTAL)
10020                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10021
10022         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10023         plane_config->base = base;
10024
10025         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
10026
10027         val = I915_READ(PLANE_SIZE(pipe, plane_id));
10028         fb->height = ((val >> 16) & 0xffff) + 1;
10029         fb->width = ((val >> 0) & 0xffff) + 1;
10030
10031         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
10032         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10033         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10034
10035         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10036
10037         plane_config->size = fb->pitches[0] * aligned_height;
10038
10039         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10040                       crtc->base.name, plane->base.name, fb->width, fb->height,
10041                       fb->format->cpp[0] * 8, base, fb->pitches[0],
10042                       plane_config->size);
10043
10044         plane_config->fb = intel_fb;
10045         return;
10046
10047 error:
10048         kfree(intel_fb);
10049 }
10050
10051 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
10052                                      struct intel_crtc_state *pipe_config)
10053 {
10054         struct drm_device *dev = crtc->base.dev;
10055         struct drm_i915_private *dev_priv = to_i915(dev);
10056         u32 tmp;
10057
10058         tmp = I915_READ(PF_CTL(crtc->pipe));
10059
10060         if (tmp & PF_ENABLE) {
10061                 pipe_config->pch_pfit.enabled = true;
10062                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
10063                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
10064
10065                 /* We currently do not free assignements of panel fitters on
10066                  * ivb/hsw (since we don't use the higher upscaling modes which
10067                  * differentiates them) so just WARN about this case for now. */
10068                 if (IS_GEN(dev_priv, 7)) {
10069                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10070                                 PF_PIPE_SEL_IVB(crtc->pipe));
10071                 }
10072         }
10073 }
10074
10075 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
10076                                      struct intel_crtc_state *pipe_config)
10077 {
10078         struct drm_device *dev = crtc->base.dev;
10079         struct drm_i915_private *dev_priv = to_i915(dev);
10080         enum intel_display_power_domain power_domain;
10081         intel_wakeref_t wakeref;
10082         u32 tmp;
10083         bool ret;
10084
10085         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10086         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10087         if (!wakeref)
10088                 return false;
10089
10090         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10091         pipe_config->shared_dpll = NULL;
10092         pipe_config->master_transcoder = INVALID_TRANSCODER;
10093
10094         ret = false;
10095         tmp = I915_READ(PIPECONF(crtc->pipe));
10096         if (!(tmp & PIPECONF_ENABLE))
10097                 goto out;
10098
10099         switch (tmp & PIPECONF_BPC_MASK) {
10100         case PIPECONF_6BPC:
10101                 pipe_config->pipe_bpp = 18;
10102                 break;
10103         case PIPECONF_8BPC:
10104                 pipe_config->pipe_bpp = 24;
10105                 break;
10106         case PIPECONF_10BPC:
10107                 pipe_config->pipe_bpp = 30;
10108                 break;
10109         case PIPECONF_12BPC:
10110                 pipe_config->pipe_bpp = 36;
10111                 break;
10112         default:
10113                 break;
10114         }
10115
10116         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10117                 pipe_config->limited_color_range = true;
10118
10119         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10120         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10121         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10122                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10123                 break;
10124         default:
10125                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10126                 break;
10127         }
10128
10129         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10130                 PIPECONF_GAMMA_MODE_SHIFT;
10131
10132         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10133
10134         i9xx_get_pipe_color_config(pipe_config);
10135         intel_color_get_config(pipe_config);
10136
10137         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10138                 struct intel_shared_dpll *pll;
10139                 enum intel_dpll_id pll_id;
10140
10141                 pipe_config->has_pch_encoder = true;
10142
10143                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10144                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10145                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10146
10147                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10148
10149                 if (HAS_PCH_IBX(dev_priv)) {
10150                         /*
10151                          * The pipe->pch transcoder and pch transcoder->pll
10152                          * mapping is fixed.
10153                          */
10154                         pll_id = (enum intel_dpll_id) crtc->pipe;
10155                 } else {
10156                         tmp = I915_READ(PCH_DPLL_SEL);
10157                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10158                                 pll_id = DPLL_ID_PCH_PLL_B;
10159                         else
10160                                 pll_id= DPLL_ID_PCH_PLL_A;
10161                 }
10162
10163                 pipe_config->shared_dpll =
10164                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10165                 pll = pipe_config->shared_dpll;
10166
10167                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10168                                                 &pipe_config->dpll_hw_state));
10169
10170                 tmp = pipe_config->dpll_hw_state.dpll;
10171                 pipe_config->pixel_multiplier =
10172                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10173                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10174
10175                 ironlake_pch_clock_get(crtc, pipe_config);
10176         } else {
10177                 pipe_config->pixel_multiplier = 1;
10178         }
10179
10180         intel_get_pipe_timings(crtc, pipe_config);
10181         intel_get_pipe_src_size(crtc, pipe_config);
10182
10183         ironlake_get_pfit_config(crtc, pipe_config);
10184
10185         ret = true;
10186
10187 out:
10188         intel_display_power_put(dev_priv, power_domain, wakeref);
10189
10190         return ret;
10191 }
10192 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10193                                       struct intel_crtc_state *crtc_state)
10194 {
10195         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10196         struct intel_atomic_state *state =
10197                 to_intel_atomic_state(crtc_state->uapi.state);
10198
10199         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10200             INTEL_GEN(dev_priv) >= 11) {
10201                 struct intel_encoder *encoder =
10202                         intel_get_crtc_new_encoder(state, crtc_state);
10203
10204                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10205                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10206                                       pipe_name(crtc->pipe));
10207                         return -EINVAL;
10208                 }
10209         }
10210
10211         return 0;
10212 }
10213
10214 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10215                                    enum port port,
10216                                    struct intel_crtc_state *pipe_config)
10217 {
10218         enum intel_dpll_id id;
10219         u32 temp;
10220
10221         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10222         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10223
10224         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10225                 return;
10226
10227         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10228 }
10229
10230 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10231                                 enum port port,
10232                                 struct intel_crtc_state *pipe_config)
10233 {
10234         enum phy phy = intel_port_to_phy(dev_priv, port);
10235         enum icl_port_dpll_id port_dpll_id;
10236         enum intel_dpll_id id;
10237         u32 temp;
10238
10239         if (intel_phy_is_combo(dev_priv, phy)) {
10240                 temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10241                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10242                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10243                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10244         } else if (intel_phy_is_tc(dev_priv, phy)) {
10245                 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10246
10247                 if (clk_sel == DDI_CLK_SEL_MG) {
10248                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10249                                                                     port));
10250                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10251                 } else {
10252                         WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10253                         id = DPLL_ID_ICL_TBTPLL;
10254                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10255                 }
10256         } else {
10257                 WARN(1, "Invalid port %x\n", port);
10258                 return;
10259         }
10260
10261         pipe_config->icl_port_dplls[port_dpll_id].pll =
10262                 intel_get_shared_dpll_by_id(dev_priv, id);
10263
10264         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10265 }
10266
10267 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10268                                 enum port port,
10269                                 struct intel_crtc_state *pipe_config)
10270 {
10271         enum intel_dpll_id id;
10272
10273         switch (port) {
10274         case PORT_A:
10275                 id = DPLL_ID_SKL_DPLL0;
10276                 break;
10277         case PORT_B:
10278                 id = DPLL_ID_SKL_DPLL1;
10279                 break;
10280         case PORT_C:
10281                 id = DPLL_ID_SKL_DPLL2;
10282                 break;
10283         default:
10284                 DRM_ERROR("Incorrect port type\n");
10285                 return;
10286         }
10287
10288         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10289 }
10290
10291 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10292                                 enum port port,
10293                                 struct intel_crtc_state *pipe_config)
10294 {
10295         enum intel_dpll_id id;
10296         u32 temp;
10297
10298         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10299         id = temp >> (port * 3 + 1);
10300
10301         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10302                 return;
10303
10304         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10305 }
10306
10307 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10308                                 enum port port,
10309                                 struct intel_crtc_state *pipe_config)
10310 {
10311         enum intel_dpll_id id;
10312         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10313
10314         switch (ddi_pll_sel) {
10315         case PORT_CLK_SEL_WRPLL1:
10316                 id = DPLL_ID_WRPLL1;
10317                 break;
10318         case PORT_CLK_SEL_WRPLL2:
10319                 id = DPLL_ID_WRPLL2;
10320                 break;
10321         case PORT_CLK_SEL_SPLL:
10322                 id = DPLL_ID_SPLL;
10323                 break;
10324         case PORT_CLK_SEL_LCPLL_810:
10325                 id = DPLL_ID_LCPLL_810;
10326                 break;
10327         case PORT_CLK_SEL_LCPLL_1350:
10328                 id = DPLL_ID_LCPLL_1350;
10329                 break;
10330         case PORT_CLK_SEL_LCPLL_2700:
10331                 id = DPLL_ID_LCPLL_2700;
10332                 break;
10333         default:
10334                 MISSING_CASE(ddi_pll_sel);
10335                 /* fall through */
10336         case PORT_CLK_SEL_NONE:
10337                 return;
10338         }
10339
10340         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10341 }
10342
10343 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10344                                      struct intel_crtc_state *pipe_config,
10345                                      u64 *power_domain_mask,
10346                                      intel_wakeref_t *wakerefs)
10347 {
10348         struct drm_device *dev = crtc->base.dev;
10349         struct drm_i915_private *dev_priv = to_i915(dev);
10350         enum intel_display_power_domain power_domain;
10351         unsigned long panel_transcoder_mask = 0;
10352         unsigned long enabled_panel_transcoders = 0;
10353         enum transcoder panel_transcoder;
10354         intel_wakeref_t wf;
10355         u32 tmp;
10356
10357         if (INTEL_GEN(dev_priv) >= 11)
10358                 panel_transcoder_mask |=
10359                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10360
10361         if (HAS_TRANSCODER_EDP(dev_priv))
10362                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10363
10364         /*
10365          * The pipe->transcoder mapping is fixed with the exception of the eDP
10366          * and DSI transcoders handled below.
10367          */
10368         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10369
10370         /*
10371          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10372          * consistency and less surprising code; it's in always on power).
10373          */
10374         for_each_set_bit(panel_transcoder,
10375                          &panel_transcoder_mask,
10376                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10377                 bool force_thru = false;
10378                 enum pipe trans_pipe;
10379
10380                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10381                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10382                         continue;
10383
10384                 /*
10385                  * Log all enabled ones, only use the first one.
10386                  *
10387                  * FIXME: This won't work for two separate DSI displays.
10388                  */
10389                 enabled_panel_transcoders |= BIT(panel_transcoder);
10390                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10391                         continue;
10392
10393                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10394                 default:
10395                         WARN(1, "unknown pipe linked to transcoder %s\n",
10396                              transcoder_name(panel_transcoder));
10397                         /* fall through */
10398                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10399                         force_thru = true;
10400                         /* fall through */
10401                 case TRANS_DDI_EDP_INPUT_A_ON:
10402                         trans_pipe = PIPE_A;
10403                         break;
10404                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10405                         trans_pipe = PIPE_B;
10406                         break;
10407                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10408                         trans_pipe = PIPE_C;
10409                         break;
10410                 }
10411
10412                 if (trans_pipe == crtc->pipe) {
10413                         pipe_config->cpu_transcoder = panel_transcoder;
10414                         pipe_config->pch_pfit.force_thru = force_thru;
10415                 }
10416         }
10417
10418         /*
10419          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10420          */
10421         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10422                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10423
10424         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10425         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10426
10427         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10428         if (!wf)
10429                 return false;
10430
10431         wakerefs[power_domain] = wf;
10432         *power_domain_mask |= BIT_ULL(power_domain);
10433
10434         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10435
10436         return tmp & PIPECONF_ENABLE;
10437 }
10438
10439 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10440                                          struct intel_crtc_state *pipe_config,
10441                                          u64 *power_domain_mask,
10442                                          intel_wakeref_t *wakerefs)
10443 {
10444         struct drm_device *dev = crtc->base.dev;
10445         struct drm_i915_private *dev_priv = to_i915(dev);
10446         enum intel_display_power_domain power_domain;
10447         enum transcoder cpu_transcoder;
10448         intel_wakeref_t wf;
10449         enum port port;
10450         u32 tmp;
10451
10452         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10453                 if (port == PORT_A)
10454                         cpu_transcoder = TRANSCODER_DSI_A;
10455                 else
10456                         cpu_transcoder = TRANSCODER_DSI_C;
10457
10458                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10459                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10460
10461                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10462                 if (!wf)
10463                         continue;
10464
10465                 wakerefs[power_domain] = wf;
10466                 *power_domain_mask |= BIT_ULL(power_domain);
10467
10468                 /*
10469                  * The PLL needs to be enabled with a valid divider
10470                  * configuration, otherwise accessing DSI registers will hang
10471                  * the machine. See BSpec North Display Engine
10472                  * registers/MIPI[BXT]. We can break out here early, since we
10473                  * need the same DSI PLL to be enabled for both DSI ports.
10474                  */
10475                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10476                         break;
10477
10478                 /* XXX: this works for video mode only */
10479                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10480                 if (!(tmp & DPI_ENABLE))
10481                         continue;
10482
10483                 tmp = I915_READ(MIPI_CTRL(port));
10484                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10485                         continue;
10486
10487                 pipe_config->cpu_transcoder = cpu_transcoder;
10488                 break;
10489         }
10490
10491         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10492 }
10493
10494 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10495                                        struct intel_crtc_state *pipe_config)
10496 {
10497         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10498         struct intel_shared_dpll *pll;
10499         enum port port;
10500         u32 tmp;
10501
10502         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10503
10504         if (INTEL_GEN(dev_priv) >= 12)
10505                 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10506         else
10507                 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10508
10509         if (INTEL_GEN(dev_priv) >= 11)
10510                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10511         else if (IS_CANNONLAKE(dev_priv))
10512                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10513         else if (IS_GEN9_BC(dev_priv))
10514                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10515         else if (IS_GEN9_LP(dev_priv))
10516                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10517         else
10518                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10519
10520         pll = pipe_config->shared_dpll;
10521         if (pll) {
10522                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10523                                                 &pipe_config->dpll_hw_state));
10524         }
10525
10526         /*
10527          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10528          * DDI E. So just check whether this pipe is wired to DDI E and whether
10529          * the PCH transcoder is on.
10530          */
10531         if (INTEL_GEN(dev_priv) < 9 &&
10532             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10533                 pipe_config->has_pch_encoder = true;
10534
10535                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10536                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10537                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10538
10539                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10540         }
10541 }
10542
10543 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
10544                                                  enum transcoder cpu_transcoder)
10545 {
10546         u32 trans_port_sync, master_select;
10547
10548         trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
10549
10550         if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
10551                 return INVALID_TRANSCODER;
10552
10553         master_select = trans_port_sync &
10554                         PORT_SYNC_MODE_MASTER_SELECT_MASK;
10555         if (master_select == 0)
10556                 return TRANSCODER_EDP;
10557         else
10558                 return master_select - 1;
10559 }
10560
10561 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
10562 {
10563         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10564         u32 transcoders;
10565         enum transcoder cpu_transcoder;
10566
10567         crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
10568                                                                   crtc_state->cpu_transcoder);
10569
10570         transcoders = BIT(TRANSCODER_A) |
10571                 BIT(TRANSCODER_B) |
10572                 BIT(TRANSCODER_C) |
10573                 BIT(TRANSCODER_D);
10574         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
10575                 enum intel_display_power_domain power_domain;
10576                 intel_wakeref_t trans_wakeref;
10577
10578                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10579                 trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
10580                                                                    power_domain);
10581
10582                 if (!trans_wakeref)
10583                         continue;
10584
10585                 if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
10586                     crtc_state->cpu_transcoder)
10587                         crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
10588
10589                 intel_display_power_put(dev_priv, power_domain, trans_wakeref);
10590         }
10591
10592         WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
10593                 crtc_state->sync_mode_slaves_mask);
10594 }
10595
10596 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10597                                     struct intel_crtc_state *pipe_config)
10598 {
10599         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10600         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10601         enum intel_display_power_domain power_domain;
10602         u64 power_domain_mask;
10603         bool active;
10604
10605         intel_crtc_init_scalers(crtc, pipe_config);
10606
10607         pipe_config->master_transcoder = INVALID_TRANSCODER;
10608
10609         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10610         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10611         if (!wf)
10612                 return false;
10613
10614         wakerefs[power_domain] = wf;
10615         power_domain_mask = BIT_ULL(power_domain);
10616
10617         pipe_config->shared_dpll = NULL;
10618
10619         active = hsw_get_transcoder_state(crtc, pipe_config,
10620                                           &power_domain_mask, wakerefs);
10621
10622         if (IS_GEN9_LP(dev_priv) &&
10623             bxt_get_dsi_transcoder_state(crtc, pipe_config,
10624                                          &power_domain_mask, wakerefs)) {
10625                 WARN_ON(active);
10626                 active = true;
10627         }
10628
10629         if (!active)
10630                 goto out;
10631
10632         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10633             INTEL_GEN(dev_priv) >= 11) {
10634                 haswell_get_ddi_port_state(crtc, pipe_config);
10635                 intel_get_pipe_timings(crtc, pipe_config);
10636         }
10637
10638         intel_get_pipe_src_size(crtc, pipe_config);
10639
10640         if (IS_HASWELL(dev_priv)) {
10641                 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10642
10643                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10644                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10645                 else
10646                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10647         } else {
10648                 pipe_config->output_format =
10649                         bdw_get_pipemisc_output_format(crtc);
10650
10651                 /*
10652                  * Currently there is no interface defined to
10653                  * check user preference between RGB/YCBCR444
10654                  * or YCBCR420. So the only possible case for
10655                  * YCBCR444 usage is driving YCBCR420 output
10656                  * with LSPCON, when pipe is configured for
10657                  * YCBCR444 output and LSPCON takes care of
10658                  * downsampling it.
10659                  */
10660                 pipe_config->lspcon_downsampling =
10661                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
10662         }
10663
10664         pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10665
10666         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10667
10668         if (INTEL_GEN(dev_priv) >= 9) {
10669                 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10670
10671                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10672                         pipe_config->gamma_enable = true;
10673
10674                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10675                         pipe_config->csc_enable = true;
10676         } else {
10677                 i9xx_get_pipe_color_config(pipe_config);
10678         }
10679
10680         intel_color_get_config(pipe_config);
10681
10682         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10683         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10684
10685         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10686         if (wf) {
10687                 wakerefs[power_domain] = wf;
10688                 power_domain_mask |= BIT_ULL(power_domain);
10689
10690                 if (INTEL_GEN(dev_priv) >= 9)
10691                         skylake_get_pfit_config(crtc, pipe_config);
10692                 else
10693                         ironlake_get_pfit_config(crtc, pipe_config);
10694         }
10695
10696         if (hsw_crtc_supports_ips(crtc)) {
10697                 if (IS_HASWELL(dev_priv))
10698                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10699                 else {
10700                         /*
10701                          * We cannot readout IPS state on broadwell, set to
10702                          * true so we can set it to a defined state on first
10703                          * commit.
10704                          */
10705                         pipe_config->ips_enabled = true;
10706                 }
10707         }
10708
10709         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10710             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10711                 pipe_config->pixel_multiplier =
10712                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10713         } else {
10714                 pipe_config->pixel_multiplier = 1;
10715         }
10716
10717         if (INTEL_GEN(dev_priv) >= 11 &&
10718             !transcoder_is_dsi(pipe_config->cpu_transcoder))
10719                 icelake_get_trans_port_sync_config(pipe_config);
10720
10721 out:
10722         for_each_power_domain(power_domain, power_domain_mask)
10723                 intel_display_power_put(dev_priv,
10724                                         power_domain, wakerefs[power_domain]);
10725
10726         return active;
10727 }
10728
10729 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10730 {
10731         struct drm_i915_private *dev_priv =
10732                 to_i915(plane_state->uapi.plane->dev);
10733         const struct drm_framebuffer *fb = plane_state->hw.fb;
10734         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10735         u32 base;
10736
10737         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10738                 base = obj->phys_handle->busaddr;
10739         else
10740                 base = intel_plane_ggtt_offset(plane_state);
10741
10742         return base + plane_state->color_plane[0].offset;
10743 }
10744
10745 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10746 {
10747         int x = plane_state->uapi.dst.x1;
10748         int y = plane_state->uapi.dst.y1;
10749         u32 pos = 0;
10750
10751         if (x < 0) {
10752                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10753                 x = -x;
10754         }
10755         pos |= x << CURSOR_X_SHIFT;
10756
10757         if (y < 0) {
10758                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10759                 y = -y;
10760         }
10761         pos |= y << CURSOR_Y_SHIFT;
10762
10763         return pos;
10764 }
10765
10766 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10767 {
10768         const struct drm_mode_config *config =
10769                 &plane_state->uapi.plane->dev->mode_config;
10770         int width = drm_rect_width(&plane_state->uapi.dst);
10771         int height = drm_rect_height(&plane_state->uapi.dst);
10772
10773         return width > 0 && width <= config->cursor_width &&
10774                 height > 0 && height <= config->cursor_height;
10775 }
10776
10777 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10778 {
10779         struct drm_i915_private *dev_priv =
10780                 to_i915(plane_state->uapi.plane->dev);
10781         unsigned int rotation = plane_state->hw.rotation;
10782         int src_x, src_y;
10783         u32 offset;
10784         int ret;
10785
10786         ret = intel_plane_compute_gtt(plane_state);
10787         if (ret)
10788                 return ret;
10789
10790         if (!plane_state->uapi.visible)
10791                 return 0;
10792
10793         src_x = plane_state->uapi.src.x1 >> 16;
10794         src_y = plane_state->uapi.src.y1 >> 16;
10795
10796         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10797         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10798                                                     plane_state, 0);
10799
10800         if (src_x != 0 || src_y != 0) {
10801                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10802                 return -EINVAL;
10803         }
10804
10805         /*
10806          * Put the final coordinates back so that the src
10807          * coordinate checks will see the right values.
10808          */
10809         drm_rect_translate_to(&plane_state->uapi.src,
10810                               src_x << 16, src_y << 16);
10811
10812         /* ILK+ do this automagically in hardware */
10813         if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
10814                 const struct drm_framebuffer *fb = plane_state->hw.fb;
10815                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
10816                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
10817
10818                 offset += (src_h * src_w - 1) * fb->format->cpp[0];
10819         }
10820
10821         plane_state->color_plane[0].offset = offset;
10822         plane_state->color_plane[0].x = src_x;
10823         plane_state->color_plane[0].y = src_y;
10824
10825         return 0;
10826 }
10827
10828 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10829                               struct intel_plane_state *plane_state)
10830 {
10831         const struct drm_framebuffer *fb = plane_state->hw.fb;
10832         int ret;
10833
10834         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10835                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10836                 return -EINVAL;
10837         }
10838
10839         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
10840                                                   &crtc_state->uapi,
10841                                                   DRM_PLANE_HELPER_NO_SCALING,
10842                                                   DRM_PLANE_HELPER_NO_SCALING,
10843                                                   true, true);
10844         if (ret)
10845                 return ret;
10846
10847         /* Use the unclipped src/dst rectangles, which we program to hw */
10848         plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
10849         plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
10850
10851         ret = intel_cursor_check_surface(plane_state);
10852         if (ret)
10853                 return ret;
10854
10855         if (!plane_state->uapi.visible)
10856                 return 0;
10857
10858         ret = intel_plane_check_src_coordinates(plane_state);
10859         if (ret)
10860                 return ret;
10861
10862         return 0;
10863 }
10864
10865 static unsigned int
10866 i845_cursor_max_stride(struct intel_plane *plane,
10867                        u32 pixel_format, u64 modifier,
10868                        unsigned int rotation)
10869 {
10870         return 2048;
10871 }
10872
10873 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10874 {
10875         u32 cntl = 0;
10876
10877         if (crtc_state->gamma_enable)
10878                 cntl |= CURSOR_GAMMA_ENABLE;
10879
10880         return cntl;
10881 }
10882
10883 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10884                            const struct intel_plane_state *plane_state)
10885 {
10886         return CURSOR_ENABLE |
10887                 CURSOR_FORMAT_ARGB |
10888                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10889 }
10890
10891 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10892 {
10893         int width = drm_rect_width(&plane_state->uapi.dst);
10894
10895         /*
10896          * 845g/865g are only limited by the width of their cursors,
10897          * the height is arbitrary up to the precision of the register.
10898          */
10899         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10900 }
10901
10902 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10903                              struct intel_plane_state *plane_state)
10904 {
10905         const struct drm_framebuffer *fb = plane_state->hw.fb;
10906         int ret;
10907
10908         ret = intel_check_cursor(crtc_state, plane_state);
10909         if (ret)
10910                 return ret;
10911
10912         /* if we want to turn off the cursor ignore width and height */
10913         if (!fb)
10914                 return 0;
10915
10916         /* Check for which cursor types we support */
10917         if (!i845_cursor_size_ok(plane_state)) {
10918                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10919                           drm_rect_width(&plane_state->uapi.dst),
10920                           drm_rect_height(&plane_state->uapi.dst));
10921                 return -EINVAL;
10922         }
10923
10924         WARN_ON(plane_state->uapi.visible &&
10925                 plane_state->color_plane[0].stride != fb->pitches[0]);
10926
10927         switch (fb->pitches[0]) {
10928         case 256:
10929         case 512:
10930         case 1024:
10931         case 2048:
10932                 break;
10933         default:
10934                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10935                               fb->pitches[0]);
10936                 return -EINVAL;
10937         }
10938
10939         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10940
10941         return 0;
10942 }
10943
10944 static void i845_update_cursor(struct intel_plane *plane,
10945                                const struct intel_crtc_state *crtc_state,
10946                                const struct intel_plane_state *plane_state)
10947 {
10948         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10949         u32 cntl = 0, base = 0, pos = 0, size = 0;
10950         unsigned long irqflags;
10951
10952         if (plane_state && plane_state->uapi.visible) {
10953                 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
10954                 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
10955
10956                 cntl = plane_state->ctl |
10957                         i845_cursor_ctl_crtc(crtc_state);
10958
10959                 size = (height << 12) | width;
10960
10961                 base = intel_cursor_base(plane_state);
10962                 pos = intel_cursor_position(plane_state);
10963         }
10964
10965         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10966
10967         /* On these chipsets we can only modify the base/size/stride
10968          * whilst the cursor is disabled.
10969          */
10970         if (plane->cursor.base != base ||
10971             plane->cursor.size != size ||
10972             plane->cursor.cntl != cntl) {
10973                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10974                 I915_WRITE_FW(CURBASE(PIPE_A), base);
10975                 I915_WRITE_FW(CURSIZE, size);
10976                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10977                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10978
10979                 plane->cursor.base = base;
10980                 plane->cursor.size = size;
10981                 plane->cursor.cntl = cntl;
10982         } else {
10983                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10984         }
10985
10986         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10987 }
10988
10989 static void i845_disable_cursor(struct intel_plane *plane,
10990                                 const struct intel_crtc_state *crtc_state)
10991 {
10992         i845_update_cursor(plane, crtc_state, NULL);
10993 }
10994
10995 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10996                                      enum pipe *pipe)
10997 {
10998         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10999         enum intel_display_power_domain power_domain;
11000         intel_wakeref_t wakeref;
11001         bool ret;
11002
11003         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11004         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11005         if (!wakeref)
11006                 return false;
11007
11008         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11009
11010         *pipe = PIPE_A;
11011
11012         intel_display_power_put(dev_priv, power_domain, wakeref);
11013
11014         return ret;
11015 }
11016
11017 static unsigned int
11018 i9xx_cursor_max_stride(struct intel_plane *plane,
11019                        u32 pixel_format, u64 modifier,
11020                        unsigned int rotation)
11021 {
11022         return plane->base.dev->mode_config.cursor_width * 4;
11023 }
11024
11025 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11026 {
11027         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11028         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11029         u32 cntl = 0;
11030
11031         if (INTEL_GEN(dev_priv) >= 11)
11032                 return cntl;
11033
11034         if (crtc_state->gamma_enable)
11035                 cntl = MCURSOR_GAMMA_ENABLE;
11036
11037         if (crtc_state->csc_enable)
11038                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11039
11040         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11041                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11042
11043         return cntl;
11044 }
11045
11046 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11047                            const struct intel_plane_state *plane_state)
11048 {
11049         struct drm_i915_private *dev_priv =
11050                 to_i915(plane_state->uapi.plane->dev);
11051         u32 cntl = 0;
11052
11053         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11054                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11055
11056         switch (drm_rect_width(&plane_state->uapi.dst)) {
11057         case 64:
11058                 cntl |= MCURSOR_MODE_64_ARGB_AX;
11059                 break;
11060         case 128:
11061                 cntl |= MCURSOR_MODE_128_ARGB_AX;
11062                 break;
11063         case 256:
11064                 cntl |= MCURSOR_MODE_256_ARGB_AX;
11065                 break;
11066         default:
11067                 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11068                 return 0;
11069         }
11070
11071         if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11072                 cntl |= MCURSOR_ROTATE_180;
11073
11074         return cntl;
11075 }
11076
11077 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11078 {
11079         struct drm_i915_private *dev_priv =
11080                 to_i915(plane_state->uapi.plane->dev);
11081         int width = drm_rect_width(&plane_state->uapi.dst);
11082         int height = drm_rect_height(&plane_state->uapi.dst);
11083
11084         if (!intel_cursor_size_ok(plane_state))
11085                 return false;
11086
11087         /* Cursor width is limited to a few power-of-two sizes */
11088         switch (width) {
11089         case 256:
11090         case 128:
11091         case 64:
11092                 break;
11093         default:
11094                 return false;
11095         }
11096
11097         /*
11098          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11099          * height from 8 lines up to the cursor width, when the
11100          * cursor is not rotated. Everything else requires square
11101          * cursors.
11102          */
11103         if (HAS_CUR_FBC(dev_priv) &&
11104             plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11105                 if (height < 8 || height > width)
11106                         return false;
11107         } else {
11108                 if (height != width)
11109                         return false;
11110         }
11111
11112         return true;
11113 }
11114
11115 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11116                              struct intel_plane_state *plane_state)
11117 {
11118         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11119         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11120         const struct drm_framebuffer *fb = plane_state->hw.fb;
11121         enum pipe pipe = plane->pipe;
11122         int ret;
11123
11124         ret = intel_check_cursor(crtc_state, plane_state);
11125         if (ret)
11126                 return ret;
11127
11128         /* if we want to turn off the cursor ignore width and height */
11129         if (!fb)
11130                 return 0;
11131
11132         /* Check for which cursor types we support */
11133         if (!i9xx_cursor_size_ok(plane_state)) {
11134                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11135                           drm_rect_width(&plane_state->uapi.dst),
11136                           drm_rect_height(&plane_state->uapi.dst));
11137                 return -EINVAL;
11138         }
11139
11140         WARN_ON(plane_state->uapi.visible &&
11141                 plane_state->color_plane[0].stride != fb->pitches[0]);
11142
11143         if (fb->pitches[0] !=
11144             drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11145                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
11146                               fb->pitches[0],
11147                               drm_rect_width(&plane_state->uapi.dst));
11148                 return -EINVAL;
11149         }
11150
11151         /*
11152          * There's something wrong with the cursor on CHV pipe C.
11153          * If it straddles the left edge of the screen then
11154          * moving it away from the edge or disabling it often
11155          * results in a pipe underrun, and often that can lead to
11156          * dead pipe (constant underrun reported, and it scans
11157          * out just a solid color). To recover from that, the
11158          * display power well must be turned off and on again.
11159          * Refuse the put the cursor into that compromised position.
11160          */
11161         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11162             plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11163                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
11164                 return -EINVAL;
11165         }
11166
11167         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11168
11169         return 0;
11170 }
11171
11172 static void i9xx_update_cursor(struct intel_plane *plane,
11173                                const struct intel_crtc_state *crtc_state,
11174                                const struct intel_plane_state *plane_state)
11175 {
11176         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11177         enum pipe pipe = plane->pipe;
11178         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11179         unsigned long irqflags;
11180
11181         if (plane_state && plane_state->uapi.visible) {
11182                 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11183                 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11184
11185                 cntl = plane_state->ctl |
11186                         i9xx_cursor_ctl_crtc(crtc_state);
11187
11188                 if (width != height)
11189                         fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11190
11191                 base = intel_cursor_base(plane_state);
11192                 pos = intel_cursor_position(plane_state);
11193         }
11194
11195         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11196
11197         /*
11198          * On some platforms writing CURCNTR first will also
11199          * cause CURPOS to be armed by the CURBASE write.
11200          * Without the CURCNTR write the CURPOS write would
11201          * arm itself. Thus we always update CURCNTR before
11202          * CURPOS.
11203          *
11204          * On other platforms CURPOS always requires the
11205          * CURBASE write to arm the update. Additonally
11206          * a write to any of the cursor register will cancel
11207          * an already armed cursor update. Thus leaving out
11208          * the CURBASE write after CURPOS could lead to a
11209          * cursor that doesn't appear to move, or even change
11210          * shape. Thus we always write CURBASE.
11211          *
11212          * The other registers are armed by by the CURBASE write
11213          * except when the plane is getting enabled at which time
11214          * the CURCNTR write arms the update.
11215          */
11216
11217         if (INTEL_GEN(dev_priv) >= 9)
11218                 skl_write_cursor_wm(plane, crtc_state);
11219
11220         if (plane->cursor.base != base ||
11221             plane->cursor.size != fbc_ctl ||
11222             plane->cursor.cntl != cntl) {
11223                 if (HAS_CUR_FBC(dev_priv))
11224                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
11225                 I915_WRITE_FW(CURCNTR(pipe), cntl);
11226                 I915_WRITE_FW(CURPOS(pipe), pos);
11227                 I915_WRITE_FW(CURBASE(pipe), base);
11228
11229                 plane->cursor.base = base;
11230                 plane->cursor.size = fbc_ctl;
11231                 plane->cursor.cntl = cntl;
11232         } else {
11233                 I915_WRITE_FW(CURPOS(pipe), pos);
11234                 I915_WRITE_FW(CURBASE(pipe), base);
11235         }
11236
11237         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11238 }
11239
11240 static void i9xx_disable_cursor(struct intel_plane *plane,
11241                                 const struct intel_crtc_state *crtc_state)
11242 {
11243         i9xx_update_cursor(plane, crtc_state, NULL);
11244 }
11245
11246 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11247                                      enum pipe *pipe)
11248 {
11249         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11250         enum intel_display_power_domain power_domain;
11251         intel_wakeref_t wakeref;
11252         bool ret;
11253         u32 val;
11254
11255         /*
11256          * Not 100% correct for planes that can move between pipes,
11257          * but that's only the case for gen2-3 which don't have any
11258          * display power wells.
11259          */
11260         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11261         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11262         if (!wakeref)
11263                 return false;
11264
11265         val = I915_READ(CURCNTR(plane->pipe));
11266
11267         ret = val & MCURSOR_MODE;
11268
11269         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11270                 *pipe = plane->pipe;
11271         else
11272                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11273                         MCURSOR_PIPE_SELECT_SHIFT;
11274
11275         intel_display_power_put(dev_priv, power_domain, wakeref);
11276
11277         return ret;
11278 }
11279
11280 /* VESA 640x480x72Hz mode to set on the pipe */
11281 static const struct drm_display_mode load_detect_mode = {
11282         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11283                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11284 };
11285
11286 struct drm_framebuffer *
11287 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11288                          struct drm_mode_fb_cmd2 *mode_cmd)
11289 {
11290         struct intel_framebuffer *intel_fb;
11291         int ret;
11292
11293         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11294         if (!intel_fb)
11295                 return ERR_PTR(-ENOMEM);
11296
11297         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11298         if (ret)
11299                 goto err;
11300
11301         return &intel_fb->base;
11302
11303 err:
11304         kfree(intel_fb);
11305         return ERR_PTR(ret);
11306 }
11307
11308 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11309                                         struct drm_crtc *crtc)
11310 {
11311         struct drm_plane *plane;
11312         struct drm_plane_state *plane_state;
11313         int ret, i;
11314
11315         ret = drm_atomic_add_affected_planes(state, crtc);
11316         if (ret)
11317                 return ret;
11318
11319         for_each_new_plane_in_state(state, plane, plane_state, i) {
11320                 if (plane_state->crtc != crtc)
11321                         continue;
11322
11323                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11324                 if (ret)
11325                         return ret;
11326
11327                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11328         }
11329
11330         return 0;
11331 }
11332
11333 int intel_get_load_detect_pipe(struct drm_connector *connector,
11334                                struct intel_load_detect_pipe *old,
11335                                struct drm_modeset_acquire_ctx *ctx)
11336 {
11337         struct intel_crtc *intel_crtc;
11338         struct intel_encoder *intel_encoder =
11339                 intel_attached_encoder(connector);
11340         struct drm_crtc *possible_crtc;
11341         struct drm_encoder *encoder = &intel_encoder->base;
11342         struct drm_crtc *crtc = NULL;
11343         struct drm_device *dev = encoder->dev;
11344         struct drm_i915_private *dev_priv = to_i915(dev);
11345         struct drm_mode_config *config = &dev->mode_config;
11346         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11347         struct drm_connector_state *connector_state;
11348         struct intel_crtc_state *crtc_state;
11349         int ret, i = -1;
11350
11351         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11352                       connector->base.id, connector->name,
11353                       encoder->base.id, encoder->name);
11354
11355         old->restore_state = NULL;
11356
11357         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11358
11359         /*
11360          * Algorithm gets a little messy:
11361          *
11362          *   - if the connector already has an assigned crtc, use it (but make
11363          *     sure it's on first)
11364          *
11365          *   - try to find the first unused crtc that can drive this connector,
11366          *     and use that if we find one
11367          */
11368
11369         /* See if we already have a CRTC for this connector */
11370         if (connector->state->crtc) {
11371                 crtc = connector->state->crtc;
11372
11373                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11374                 if (ret)
11375                         goto fail;
11376
11377                 /* Make sure the crtc and connector are running */
11378                 goto found;
11379         }
11380
11381         /* Find an unused one (if possible) */
11382         for_each_crtc(dev, possible_crtc) {
11383                 i++;
11384                 if (!(encoder->possible_crtcs & (1 << i)))
11385                         continue;
11386
11387                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11388                 if (ret)
11389                         goto fail;
11390
11391                 if (possible_crtc->state->enable) {
11392                         drm_modeset_unlock(&possible_crtc->mutex);
11393                         continue;
11394                 }
11395
11396                 crtc = possible_crtc;
11397                 break;
11398         }
11399
11400         /*
11401          * If we didn't find an unused CRTC, don't use any.
11402          */
11403         if (!crtc) {
11404                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11405                 ret = -ENODEV;
11406                 goto fail;
11407         }
11408
11409 found:
11410         intel_crtc = to_intel_crtc(crtc);
11411
11412         state = drm_atomic_state_alloc(dev);
11413         restore_state = drm_atomic_state_alloc(dev);
11414         if (!state || !restore_state) {
11415                 ret = -ENOMEM;
11416                 goto fail;
11417         }
11418
11419         state->acquire_ctx = ctx;
11420         restore_state->acquire_ctx = ctx;
11421
11422         connector_state = drm_atomic_get_connector_state(state, connector);
11423         if (IS_ERR(connector_state)) {
11424                 ret = PTR_ERR(connector_state);
11425                 goto fail;
11426         }
11427
11428         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11429         if (ret)
11430                 goto fail;
11431
11432         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11433         if (IS_ERR(crtc_state)) {
11434                 ret = PTR_ERR(crtc_state);
11435                 goto fail;
11436         }
11437
11438         crtc_state->uapi.active = true;
11439
11440         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11441                                            &load_detect_mode);
11442         if (ret)
11443                 goto fail;
11444
11445         ret = intel_modeset_disable_planes(state, crtc);
11446         if (ret)
11447                 goto fail;
11448
11449         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11450         if (!ret)
11451                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11452         if (!ret)
11453                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11454         if (ret) {
11455                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11456                 goto fail;
11457         }
11458
11459         ret = drm_atomic_commit(state);
11460         if (ret) {
11461                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11462                 goto fail;
11463         }
11464
11465         old->restore_state = restore_state;
11466         drm_atomic_state_put(state);
11467
11468         /* let the connector get through one full cycle before testing */
11469         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11470         return true;
11471
11472 fail:
11473         if (state) {
11474                 drm_atomic_state_put(state);
11475                 state = NULL;
11476         }
11477         if (restore_state) {
11478                 drm_atomic_state_put(restore_state);
11479                 restore_state = NULL;
11480         }
11481
11482         if (ret == -EDEADLK)
11483                 return ret;
11484
11485         return false;
11486 }
11487
11488 void intel_release_load_detect_pipe(struct drm_connector *connector,
11489                                     struct intel_load_detect_pipe *old,
11490                                     struct drm_modeset_acquire_ctx *ctx)
11491 {
11492         struct intel_encoder *intel_encoder =
11493                 intel_attached_encoder(connector);
11494         struct drm_encoder *encoder = &intel_encoder->base;
11495         struct drm_atomic_state *state = old->restore_state;
11496         int ret;
11497
11498         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11499                       connector->base.id, connector->name,
11500                       encoder->base.id, encoder->name);
11501
11502         if (!state)
11503                 return;
11504
11505         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11506         if (ret)
11507                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11508         drm_atomic_state_put(state);
11509 }
11510
11511 static int i9xx_pll_refclk(struct drm_device *dev,
11512                            const struct intel_crtc_state *pipe_config)
11513 {
11514         struct drm_i915_private *dev_priv = to_i915(dev);
11515         u32 dpll = pipe_config->dpll_hw_state.dpll;
11516
11517         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11518                 return dev_priv->vbt.lvds_ssc_freq;
11519         else if (HAS_PCH_SPLIT(dev_priv))
11520                 return 120000;
11521         else if (!IS_GEN(dev_priv, 2))
11522                 return 96000;
11523         else
11524                 return 48000;
11525 }
11526
11527 /* Returns the clock of the currently programmed mode of the given pipe. */
11528 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11529                                 struct intel_crtc_state *pipe_config)
11530 {
11531         struct drm_device *dev = crtc->base.dev;
11532         struct drm_i915_private *dev_priv = to_i915(dev);
11533         enum pipe pipe = crtc->pipe;
11534         u32 dpll = pipe_config->dpll_hw_state.dpll;
11535         u32 fp;
11536         struct dpll clock;
11537         int port_clock;
11538         int refclk = i9xx_pll_refclk(dev, pipe_config);
11539
11540         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11541                 fp = pipe_config->dpll_hw_state.fp0;
11542         else
11543                 fp = pipe_config->dpll_hw_state.fp1;
11544
11545         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11546         if (IS_PINEVIEW(dev_priv)) {
11547                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11548                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11549         } else {
11550                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11551                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11552         }
11553
11554         if (!IS_GEN(dev_priv, 2)) {
11555                 if (IS_PINEVIEW(dev_priv))
11556                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11557                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11558                 else
11559                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11560                                DPLL_FPA01_P1_POST_DIV_SHIFT);
11561
11562                 switch (dpll & DPLL_MODE_MASK) {
11563                 case DPLLB_MODE_DAC_SERIAL:
11564                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11565                                 5 : 10;
11566                         break;
11567                 case DPLLB_MODE_LVDS:
11568                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11569                                 7 : 14;
11570                         break;
11571                 default:
11572                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11573                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
11574                         return;
11575                 }
11576
11577                 if (IS_PINEVIEW(dev_priv))
11578                         port_clock = pnv_calc_dpll_params(refclk, &clock);
11579                 else
11580                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
11581         } else {
11582                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11583                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11584
11585                 if (is_lvds) {
11586                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11587                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
11588
11589                         if (lvds & LVDS_CLKB_POWER_UP)
11590                                 clock.p2 = 7;
11591                         else
11592                                 clock.p2 = 14;
11593                 } else {
11594                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
11595                                 clock.p1 = 2;
11596                         else {
11597                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11598                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11599                         }
11600                         if (dpll & PLL_P2_DIVIDE_BY_4)
11601                                 clock.p2 = 4;
11602                         else
11603                                 clock.p2 = 2;
11604                 }
11605
11606                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11607         }
11608
11609         /*
11610          * This value includes pixel_multiplier. We will use
11611          * port_clock to compute adjusted_mode.crtc_clock in the
11612          * encoder's get_config() function.
11613          */
11614         pipe_config->port_clock = port_clock;
11615 }
11616
11617 int intel_dotclock_calculate(int link_freq,
11618                              const struct intel_link_m_n *m_n)
11619 {
11620         /*
11621          * The calculation for the data clock is:
11622          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11623          * But we want to avoid losing precison if possible, so:
11624          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11625          *
11626          * and the link clock is simpler:
11627          * link_clock = (m * link_clock) / n
11628          */
11629
11630         if (!m_n->link_n)
11631                 return 0;
11632
11633         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11634 }
11635
11636 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11637                                    struct intel_crtc_state *pipe_config)
11638 {
11639         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11640
11641         /* read out port_clock from the DPLL */
11642         i9xx_crtc_clock_get(crtc, pipe_config);
11643
11644         /*
11645          * In case there is an active pipe without active ports,
11646          * we may need some idea for the dotclock anyway.
11647          * Calculate one based on the FDI configuration.
11648          */
11649         pipe_config->hw.adjusted_mode.crtc_clock =
11650                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11651                                          &pipe_config->fdi_m_n);
11652 }
11653
11654 /* Returns the currently programmed mode of the given encoder. */
11655 struct drm_display_mode *
11656 intel_encoder_current_mode(struct intel_encoder *encoder)
11657 {
11658         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11659         struct intel_crtc_state *crtc_state;
11660         struct drm_display_mode *mode;
11661         struct intel_crtc *crtc;
11662         enum pipe pipe;
11663
11664         if (!encoder->get_hw_state(encoder, &pipe))
11665                 return NULL;
11666
11667         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11668
11669         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11670         if (!mode)
11671                 return NULL;
11672
11673         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11674         if (!crtc_state) {
11675                 kfree(mode);
11676                 return NULL;
11677         }
11678
11679         crtc_state->uapi.crtc = &crtc->base;
11680
11681         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11682                 kfree(crtc_state);
11683                 kfree(mode);
11684                 return NULL;
11685         }
11686
11687         encoder->get_config(encoder, crtc_state);
11688
11689         intel_mode_from_pipe_config(mode, crtc_state);
11690
11691         kfree(crtc_state);
11692
11693         return mode;
11694 }
11695
11696 static void intel_crtc_destroy(struct drm_crtc *crtc)
11697 {
11698         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11699
11700         drm_crtc_cleanup(crtc);
11701         kfree(intel_crtc);
11702 }
11703
11704 /**
11705  * intel_wm_need_update - Check whether watermarks need updating
11706  * @cur: current plane state
11707  * @new: new plane state
11708  *
11709  * Check current plane state versus the new one to determine whether
11710  * watermarks need to be recalculated.
11711  *
11712  * Returns true or false.
11713  */
11714 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11715                                  struct intel_plane_state *new)
11716 {
11717         /* Update watermarks on tiling or size changes. */
11718         if (new->uapi.visible != cur->uapi.visible)
11719                 return true;
11720
11721         if (!cur->hw.fb || !new->hw.fb)
11722                 return false;
11723
11724         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
11725             cur->hw.rotation != new->hw.rotation ||
11726             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
11727             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
11728             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
11729             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
11730                 return true;
11731
11732         return false;
11733 }
11734
11735 static bool needs_scaling(const struct intel_plane_state *state)
11736 {
11737         int src_w = drm_rect_width(&state->uapi.src) >> 16;
11738         int src_h = drm_rect_height(&state->uapi.src) >> 16;
11739         int dst_w = drm_rect_width(&state->uapi.dst);
11740         int dst_h = drm_rect_height(&state->uapi.dst);
11741
11742         return (src_w != dst_w || src_h != dst_h);
11743 }
11744
11745 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11746                                     struct intel_crtc_state *crtc_state,
11747                                     const struct intel_plane_state *old_plane_state,
11748                                     struct intel_plane_state *plane_state)
11749 {
11750         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11751         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11752         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11753         bool mode_changed = needs_modeset(crtc_state);
11754         bool was_crtc_enabled = old_crtc_state->hw.active;
11755         bool is_crtc_enabled = crtc_state->hw.active;
11756         bool turn_off, turn_on, visible, was_visible;
11757         int ret;
11758
11759         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11760                 ret = skl_update_scaler_plane(crtc_state, plane_state);
11761                 if (ret)
11762                         return ret;
11763         }
11764
11765         was_visible = old_plane_state->uapi.visible;
11766         visible = plane_state->uapi.visible;
11767
11768         if (!was_crtc_enabled && WARN_ON(was_visible))
11769                 was_visible = false;
11770
11771         /*
11772          * Visibility is calculated as if the crtc was on, but
11773          * after scaler setup everything depends on it being off
11774          * when the crtc isn't active.
11775          *
11776          * FIXME this is wrong for watermarks. Watermarks should also
11777          * be computed as if the pipe would be active. Perhaps move
11778          * per-plane wm computation to the .check_plane() hook, and
11779          * only combine the results from all planes in the current place?
11780          */
11781         if (!is_crtc_enabled) {
11782                 plane_state->uapi.visible = visible = false;
11783                 crtc_state->active_planes &= ~BIT(plane->id);
11784                 crtc_state->data_rate[plane->id] = 0;
11785                 crtc_state->min_cdclk[plane->id] = 0;
11786         }
11787
11788         if (!was_visible && !visible)
11789                 return 0;
11790
11791         turn_off = was_visible && (!visible || mode_changed);
11792         turn_on = visible && (!was_visible || mode_changed);
11793
11794         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11795                          crtc->base.base.id, crtc->base.name,
11796                          plane->base.base.id, plane->base.name,
11797                          was_visible, visible,
11798                          turn_off, turn_on, mode_changed);
11799
11800         if (turn_on) {
11801                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11802                         crtc_state->update_wm_pre = true;
11803
11804                 /* must disable cxsr around plane enable/disable */
11805                 if (plane->id != PLANE_CURSOR)
11806                         crtc_state->disable_cxsr = true;
11807         } else if (turn_off) {
11808                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11809                         crtc_state->update_wm_post = true;
11810
11811                 /* must disable cxsr around plane enable/disable */
11812                 if (plane->id != PLANE_CURSOR)
11813                         crtc_state->disable_cxsr = true;
11814         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
11815                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11816                         /* FIXME bollocks */
11817                         crtc_state->update_wm_pre = true;
11818                         crtc_state->update_wm_post = true;
11819                 }
11820         }
11821
11822         if (visible || was_visible)
11823                 crtc_state->fb_bits |= plane->frontbuffer_bit;
11824
11825         /*
11826          * ILK/SNB DVSACNTR/Sprite Enable
11827          * IVB SPR_CTL/Sprite Enable
11828          * "When in Self Refresh Big FIFO mode, a write to enable the
11829          *  plane will be internally buffered and delayed while Big FIFO
11830          *  mode is exiting."
11831          *
11832          * Which means that enabling the sprite can take an extra frame
11833          * when we start in big FIFO mode (LP1+). Thus we need to drop
11834          * down to LP0 and wait for vblank in order to make sure the
11835          * sprite gets enabled on the next vblank after the register write.
11836          * Doing otherwise would risk enabling the sprite one frame after
11837          * we've already signalled flip completion. We can resume LP1+
11838          * once the sprite has been enabled.
11839          *
11840          *
11841          * WaCxSRDisabledForSpriteScaling:ivb
11842          * IVB SPR_SCALE/Scaling Enable
11843          * "Low Power watermarks must be disabled for at least one
11844          *  frame before enabling sprite scaling, and kept disabled
11845          *  until sprite scaling is disabled."
11846          *
11847          * ILK/SNB DVSASCALE/Scaling Enable
11848          * "When in Self Refresh Big FIFO mode, scaling enable will be
11849          *  masked off while Big FIFO mode is exiting."
11850          *
11851          * Despite the w/a only being listed for IVB we assume that
11852          * the ILK/SNB note has similar ramifications, hence we apply
11853          * the w/a on all three platforms.
11854          *
11855          * With experimental results seems this is needed also for primary
11856          * plane, not only sprite plane.
11857          */
11858         if (plane->id != PLANE_CURSOR &&
11859             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11860              IS_IVYBRIDGE(dev_priv)) &&
11861             (turn_on || (!needs_scaling(old_plane_state) &&
11862                          needs_scaling(plane_state))))
11863                 crtc_state->disable_lp_wm = true;
11864
11865         return 0;
11866 }
11867
11868 static bool encoders_cloneable(const struct intel_encoder *a,
11869                                const struct intel_encoder *b)
11870 {
11871         /* masks could be asymmetric, so check both ways */
11872         return a == b || (a->cloneable & (1 << b->type) &&
11873                           b->cloneable & (1 << a->type));
11874 }
11875
11876 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11877                                          struct intel_crtc *crtc,
11878                                          struct intel_encoder *encoder)
11879 {
11880         struct intel_encoder *source_encoder;
11881         struct drm_connector *connector;
11882         struct drm_connector_state *connector_state;
11883         int i;
11884
11885         for_each_new_connector_in_state(state, connector, connector_state, i) {
11886                 if (connector_state->crtc != &crtc->base)
11887                         continue;
11888
11889                 source_encoder =
11890                         to_intel_encoder(connector_state->best_encoder);
11891                 if (!encoders_cloneable(encoder, source_encoder))
11892                         return false;
11893         }
11894
11895         return true;
11896 }
11897
11898 static int icl_add_linked_planes(struct intel_atomic_state *state)
11899 {
11900         struct intel_plane *plane, *linked;
11901         struct intel_plane_state *plane_state, *linked_plane_state;
11902         int i;
11903
11904         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11905                 linked = plane_state->planar_linked_plane;
11906
11907                 if (!linked)
11908                         continue;
11909
11910                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11911                 if (IS_ERR(linked_plane_state))
11912                         return PTR_ERR(linked_plane_state);
11913
11914                 WARN_ON(linked_plane_state->planar_linked_plane != plane);
11915                 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
11916         }
11917
11918         return 0;
11919 }
11920
11921 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11922 {
11923         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11924         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11925         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
11926         struct intel_plane *plane, *linked;
11927         struct intel_plane_state *plane_state;
11928         int i;
11929
11930         if (INTEL_GEN(dev_priv) < 11)
11931                 return 0;
11932
11933         /*
11934          * Destroy all old plane links and make the slave plane invisible
11935          * in the crtc_state->active_planes mask.
11936          */
11937         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11938                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
11939                         continue;
11940
11941                 plane_state->planar_linked_plane = NULL;
11942                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
11943                         crtc_state->active_planes &= ~BIT(plane->id);
11944                         crtc_state->update_planes |= BIT(plane->id);
11945                 }
11946
11947                 plane_state->planar_slave = false;
11948         }
11949
11950         if (!crtc_state->nv12_planes)
11951                 return 0;
11952
11953         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11954                 struct intel_plane_state *linked_state = NULL;
11955
11956                 if (plane->pipe != crtc->pipe ||
11957                     !(crtc_state->nv12_planes & BIT(plane->id)))
11958                         continue;
11959
11960                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11961                         if (!icl_is_nv12_y_plane(linked->id))
11962                                 continue;
11963
11964                         if (crtc_state->active_planes & BIT(linked->id))
11965                                 continue;
11966
11967                         linked_state = intel_atomic_get_plane_state(state, linked);
11968                         if (IS_ERR(linked_state))
11969                                 return PTR_ERR(linked_state);
11970
11971                         break;
11972                 }
11973
11974                 if (!linked_state) {
11975                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11976                                       hweight8(crtc_state->nv12_planes));
11977
11978                         return -EINVAL;
11979                 }
11980
11981                 plane_state->planar_linked_plane = linked;
11982
11983                 linked_state->planar_slave = true;
11984                 linked_state->planar_linked_plane = plane;
11985                 crtc_state->active_planes |= BIT(linked->id);
11986                 crtc_state->update_planes |= BIT(linked->id);
11987                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11988
11989                 /* Copy parameters to slave plane */
11990                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
11991                 linked_state->color_ctl = plane_state->color_ctl;
11992                 linked_state->color_plane[0] = plane_state->color_plane[0];
11993
11994                 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
11995                 linked_state->uapi.src = plane_state->uapi.src;
11996                 linked_state->uapi.dst = plane_state->uapi.dst;
11997
11998                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
11999                         if (linked->id == PLANE_SPRITE5)
12000                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12001                         else if (linked->id == PLANE_SPRITE4)
12002                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12003                         else
12004                                 MISSING_CASE(linked->id);
12005                 }
12006         }
12007
12008         return 0;
12009 }
12010
12011 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12012 {
12013         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12014         struct intel_atomic_state *state =
12015                 to_intel_atomic_state(new_crtc_state->uapi.state);
12016         const struct intel_crtc_state *old_crtc_state =
12017                 intel_atomic_get_old_crtc_state(state, crtc);
12018
12019         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12020 }
12021
12022 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
12023 {
12024         struct drm_crtc *crtc = crtc_state->uapi.crtc;
12025         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12026         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12027         struct drm_connector *master_connector, *connector;
12028         struct drm_connector_state *connector_state;
12029         struct drm_connector_list_iter conn_iter;
12030         struct drm_crtc *master_crtc = NULL;
12031         struct drm_crtc_state *master_crtc_state;
12032         struct intel_crtc_state *master_pipe_config;
12033         int i, tile_group_id;
12034
12035         if (INTEL_GEN(dev_priv) < 11)
12036                 return 0;
12037
12038         /*
12039          * In case of tiled displays there could be one or more slaves but there is
12040          * only one master. Lets make the CRTC used by the connector corresponding
12041          * to the last horizonal and last vertical tile a master/genlock CRTC.
12042          * All the other CRTCs corresponding to other tiles of the same Tile group
12043          * are the slave CRTCs and hold a pointer to their genlock CRTC.
12044          */
12045         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12046                 if (connector_state->crtc != crtc)
12047                         continue;
12048                 if (!connector->has_tile)
12049                         continue;
12050                 if (crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
12051                     crtc_state->hw.mode.vdisplay != connector->tile_v_size)
12052                         return 0;
12053                 if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12054                     connector->tile_v_loc == connector->num_v_tile - 1)
12055                         continue;
12056                 crtc_state->sync_mode_slaves_mask = 0;
12057                 tile_group_id = connector->tile_group->id;
12058                 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12059                 drm_for_each_connector_iter(master_connector, &conn_iter) {
12060                         struct drm_connector_state *master_conn_state = NULL;
12061
12062                         if (!master_connector->has_tile)
12063                                 continue;
12064                         if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12065                             master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12066                                 continue;
12067                         if (master_connector->tile_group->id != tile_group_id)
12068                                 continue;
12069
12070                         master_conn_state = drm_atomic_get_connector_state(&state->base,
12071                                                                            master_connector);
12072                         if (IS_ERR(master_conn_state)) {
12073                                 drm_connector_list_iter_end(&conn_iter);
12074                                 return PTR_ERR(master_conn_state);
12075                         }
12076                         if (master_conn_state->crtc) {
12077                                 master_crtc = master_conn_state->crtc;
12078                                 break;
12079                         }
12080                 }
12081                 drm_connector_list_iter_end(&conn_iter);
12082
12083                 if (!master_crtc) {
12084                         DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
12085                                       connector_state->crtc->base.id);
12086                         return -EINVAL;
12087                 }
12088
12089                 master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12090                                                               master_crtc);
12091                 if (IS_ERR(master_crtc_state))
12092                         return PTR_ERR(master_crtc_state);
12093
12094                 master_pipe_config = to_intel_crtc_state(master_crtc_state);
12095                 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12096                 master_pipe_config->sync_mode_slaves_mask |=
12097                         BIT(crtc_state->cpu_transcoder);
12098                 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12099                               transcoder_name(crtc_state->master_transcoder),
12100                               crtc_state->uapi.crtc->base.id,
12101                               master_pipe_config->sync_mode_slaves_mask);
12102         }
12103
12104         return 0;
12105 }
12106
12107 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12108                                    struct intel_crtc *crtc)
12109 {
12110         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12111         struct intel_crtc_state *crtc_state =
12112                 intel_atomic_get_new_crtc_state(state, crtc);
12113         bool mode_changed = needs_modeset(crtc_state);
12114         int ret;
12115
12116         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12117             mode_changed && !crtc_state->hw.active)
12118                 crtc_state->update_wm_post = true;
12119
12120         if (mode_changed && crtc_state->hw.enable &&
12121             dev_priv->display.crtc_compute_clock &&
12122             !WARN_ON(crtc_state->shared_dpll)) {
12123                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12124                 if (ret)
12125                         return ret;
12126         }
12127
12128         /*
12129          * May need to update pipe gamma enable bits
12130          * when C8 planes are getting enabled/disabled.
12131          */
12132         if (c8_planes_changed(crtc_state))
12133                 crtc_state->uapi.color_mgmt_changed = true;
12134
12135         if (mode_changed || crtc_state->update_pipe ||
12136             crtc_state->uapi.color_mgmt_changed) {
12137                 ret = intel_color_check(crtc_state);
12138                 if (ret)
12139                         return ret;
12140         }
12141
12142         ret = 0;
12143         if (dev_priv->display.compute_pipe_wm) {
12144                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12145                 if (ret) {
12146                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12147                         return ret;
12148                 }
12149         }
12150
12151         if (dev_priv->display.compute_intermediate_wm) {
12152                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12153                         return 0;
12154
12155                 /*
12156                  * Calculate 'intermediate' watermarks that satisfy both the
12157                  * old state and the new state.  We can program these
12158                  * immediately.
12159                  */
12160                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12161                 if (ret) {
12162                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12163                         return ret;
12164                 }
12165         }
12166
12167         if (INTEL_GEN(dev_priv) >= 9) {
12168                 if (mode_changed || crtc_state->update_pipe)
12169                         ret = skl_update_scaler_crtc(crtc_state);
12170                 if (!ret)
12171                         ret = intel_atomic_setup_scalers(dev_priv, crtc,
12172                                                          crtc_state);
12173         }
12174
12175         if (HAS_IPS(dev_priv))
12176                 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
12177
12178         return ret;
12179 }
12180
12181 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12182 {
12183         struct intel_connector *connector;
12184         struct drm_connector_list_iter conn_iter;
12185
12186         drm_connector_list_iter_begin(dev, &conn_iter);
12187         for_each_intel_connector_iter(connector, &conn_iter) {
12188                 if (connector->base.state->crtc)
12189                         drm_connector_put(&connector->base);
12190
12191                 if (connector->base.encoder) {
12192                         connector->base.state->best_encoder =
12193                                 connector->base.encoder;
12194                         connector->base.state->crtc =
12195                                 connector->base.encoder->crtc;
12196
12197                         drm_connector_get(&connector->base);
12198                 } else {
12199                         connector->base.state->best_encoder = NULL;
12200                         connector->base.state->crtc = NULL;
12201                 }
12202         }
12203         drm_connector_list_iter_end(&conn_iter);
12204 }
12205
12206 static int
12207 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12208                       struct intel_crtc_state *pipe_config)
12209 {
12210         struct drm_connector *connector = conn_state->connector;
12211         const struct drm_display_info *info = &connector->display_info;
12212         int bpp;
12213
12214         switch (conn_state->max_bpc) {
12215         case 6 ... 7:
12216                 bpp = 6 * 3;
12217                 break;
12218         case 8 ... 9:
12219                 bpp = 8 * 3;
12220                 break;
12221         case 10 ... 11:
12222                 bpp = 10 * 3;
12223                 break;
12224         case 12:
12225                 bpp = 12 * 3;
12226                 break;
12227         default:
12228                 return -EINVAL;
12229         }
12230
12231         if (bpp < pipe_config->pipe_bpp) {
12232                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12233                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12234                               connector->base.id, connector->name,
12235                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
12236                               pipe_config->pipe_bpp);
12237
12238                 pipe_config->pipe_bpp = bpp;
12239         }
12240
12241         return 0;
12242 }
12243
12244 static int
12245 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12246                           struct intel_crtc_state *pipe_config)
12247 {
12248         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12249         struct drm_atomic_state *state = pipe_config->uapi.state;
12250         struct drm_connector *connector;
12251         struct drm_connector_state *connector_state;
12252         int bpp, i;
12253
12254         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12255             IS_CHERRYVIEW(dev_priv)))
12256                 bpp = 10*3;
12257         else if (INTEL_GEN(dev_priv) >= 5)
12258                 bpp = 12*3;
12259         else
12260                 bpp = 8*3;
12261
12262         pipe_config->pipe_bpp = bpp;
12263
12264         /* Clamp display bpp to connector max bpp */
12265         for_each_new_connector_in_state(state, connector, connector_state, i) {
12266                 int ret;
12267
12268                 if (connector_state->crtc != &crtc->base)
12269                         continue;
12270
12271                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12272                 if (ret)
12273                         return ret;
12274         }
12275
12276         return 0;
12277 }
12278
12279 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12280 {
12281         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12282                       "type: 0x%x flags: 0x%x\n",
12283                       mode->crtc_clock,
12284                       mode->crtc_hdisplay, mode->crtc_hsync_start,
12285                       mode->crtc_hsync_end, mode->crtc_htotal,
12286                       mode->crtc_vdisplay, mode->crtc_vsync_start,
12287                       mode->crtc_vsync_end, mode->crtc_vtotal,
12288                       mode->type, mode->flags);
12289 }
12290
12291 static inline void
12292 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12293                       const char *id, unsigned int lane_count,
12294                       const struct intel_link_m_n *m_n)
12295 {
12296         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12297                       id, lane_count,
12298                       m_n->gmch_m, m_n->gmch_n,
12299                       m_n->link_m, m_n->link_n, m_n->tu);
12300 }
12301
12302 static void
12303 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12304                      const union hdmi_infoframe *frame)
12305 {
12306         if ((drm_debug & DRM_UT_KMS) == 0)
12307                 return;
12308
12309         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12310 }
12311
12312 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12313
12314 static const char * const output_type_str[] = {
12315         OUTPUT_TYPE(UNUSED),
12316         OUTPUT_TYPE(ANALOG),
12317         OUTPUT_TYPE(DVO),
12318         OUTPUT_TYPE(SDVO),
12319         OUTPUT_TYPE(LVDS),
12320         OUTPUT_TYPE(TVOUT),
12321         OUTPUT_TYPE(HDMI),
12322         OUTPUT_TYPE(DP),
12323         OUTPUT_TYPE(EDP),
12324         OUTPUT_TYPE(DSI),
12325         OUTPUT_TYPE(DDI),
12326         OUTPUT_TYPE(DP_MST),
12327 };
12328
12329 #undef OUTPUT_TYPE
12330
12331 static void snprintf_output_types(char *buf, size_t len,
12332                                   unsigned int output_types)
12333 {
12334         char *str = buf;
12335         int i;
12336
12337         str[0] = '\0';
12338
12339         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12340                 int r;
12341
12342                 if ((output_types & BIT(i)) == 0)
12343                         continue;
12344
12345                 r = snprintf(str, len, "%s%s",
12346                              str != buf ? "," : "", output_type_str[i]);
12347                 if (r >= len)
12348                         break;
12349                 str += r;
12350                 len -= r;
12351
12352                 output_types &= ~BIT(i);
12353         }
12354
12355         WARN_ON_ONCE(output_types != 0);
12356 }
12357
12358 static const char * const output_format_str[] = {
12359         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12360         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12361         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12362         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12363 };
12364
12365 static const char *output_formats(enum intel_output_format format)
12366 {
12367         if (format >= ARRAY_SIZE(output_format_str))
12368                 format = INTEL_OUTPUT_FORMAT_INVALID;
12369         return output_format_str[format];
12370 }
12371
12372 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12373 {
12374         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12375         const struct drm_framebuffer *fb = plane_state->hw.fb;
12376         struct drm_format_name_buf format_name;
12377
12378         if (!fb) {
12379                 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12380                               plane->base.base.id, plane->base.name,
12381                               yesno(plane_state->uapi.visible));
12382                 return;
12383         }
12384
12385         DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12386                       plane->base.base.id, plane->base.name,
12387                       fb->base.id, fb->width, fb->height,
12388                       drm_get_format_name(fb->format->format, &format_name),
12389                       yesno(plane_state->uapi.visible));
12390         DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12391                       plane_state->hw.rotation, plane_state->scaler_id);
12392         if (plane_state->uapi.visible)
12393                 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12394                               DRM_RECT_FP_ARG(&plane_state->uapi.src),
12395                               DRM_RECT_ARG(&plane_state->uapi.dst));
12396 }
12397
12398 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12399                                    struct intel_atomic_state *state,
12400                                    const char *context)
12401 {
12402         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12403         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12404         const struct intel_plane_state *plane_state;
12405         struct intel_plane *plane;
12406         char buf[64];
12407         int i;
12408
12409         DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12410                       crtc->base.base.id, crtc->base.name,
12411                       yesno(pipe_config->hw.enable), context);
12412
12413         if (!pipe_config->hw.enable)
12414                 goto dump_planes;
12415
12416         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12417         DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12418                       yesno(pipe_config->hw.active),
12419                       buf, pipe_config->output_types,
12420                       output_formats(pipe_config->output_format));
12421
12422         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12423                       transcoder_name(pipe_config->cpu_transcoder),
12424                       pipe_config->pipe_bpp, pipe_config->dither);
12425
12426         if (pipe_config->has_pch_encoder)
12427                 intel_dump_m_n_config(pipe_config, "fdi",
12428                                       pipe_config->fdi_lanes,
12429                                       &pipe_config->fdi_m_n);
12430
12431         if (intel_crtc_has_dp_encoder(pipe_config)) {
12432                 intel_dump_m_n_config(pipe_config, "dp m_n",
12433                                 pipe_config->lane_count, &pipe_config->dp_m_n);
12434                 if (pipe_config->has_drrs)
12435                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
12436                                               pipe_config->lane_count,
12437                                               &pipe_config->dp_m2_n2);
12438         }
12439
12440         DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12441                       pipe_config->has_audio, pipe_config->has_infoframe,
12442                       pipe_config->infoframes.enable);
12443
12444         if (pipe_config->infoframes.enable &
12445             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12446                 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12447         if (pipe_config->infoframes.enable &
12448             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12449                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12450         if (pipe_config->infoframes.enable &
12451             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12452                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12453         if (pipe_config->infoframes.enable &
12454             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12455                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12456
12457         DRM_DEBUG_KMS("requested mode:\n");
12458         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12459         DRM_DEBUG_KMS("adjusted mode:\n");
12460         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12461         intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
12462         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12463                       pipe_config->port_clock,
12464                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12465                       pipe_config->pixel_rate);
12466
12467         if (INTEL_GEN(dev_priv) >= 9)
12468                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12469                               crtc->num_scalers,
12470                               pipe_config->scaler_state.scaler_users,
12471                               pipe_config->scaler_state.scaler_id);
12472
12473         if (HAS_GMCH(dev_priv))
12474                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12475                               pipe_config->gmch_pfit.control,
12476                               pipe_config->gmch_pfit.pgm_ratios,
12477                               pipe_config->gmch_pfit.lvds_border_bits);
12478         else
12479                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12480                               pipe_config->pch_pfit.pos,
12481                               pipe_config->pch_pfit.size,
12482                               enableddisabled(pipe_config->pch_pfit.enabled),
12483                               yesno(pipe_config->pch_pfit.force_thru));
12484
12485         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12486                       pipe_config->ips_enabled, pipe_config->double_wide);
12487
12488         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12489
12490         if (IS_CHERRYVIEW(dev_priv))
12491                 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12492                               pipe_config->cgm_mode, pipe_config->gamma_mode,
12493                               pipe_config->gamma_enable, pipe_config->csc_enable);
12494         else
12495                 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12496                               pipe_config->csc_mode, pipe_config->gamma_mode,
12497                               pipe_config->gamma_enable, pipe_config->csc_enable);
12498
12499 dump_planes:
12500         if (!state)
12501                 return;
12502
12503         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12504                 if (plane->pipe == crtc->pipe)
12505                         intel_dump_plane_state(plane_state);
12506         }
12507 }
12508
12509 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12510 {
12511         struct drm_device *dev = state->base.dev;
12512         struct drm_connector *connector;
12513         struct drm_connector_list_iter conn_iter;
12514         unsigned int used_ports = 0;
12515         unsigned int used_mst_ports = 0;
12516         bool ret = true;
12517
12518         /*
12519          * We're going to peek into connector->state,
12520          * hence connection_mutex must be held.
12521          */
12522         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12523
12524         /*
12525          * Walk the connector list instead of the encoder
12526          * list to detect the problem on ddi platforms
12527          * where there's just one encoder per digital port.
12528          */
12529         drm_connector_list_iter_begin(dev, &conn_iter);
12530         drm_for_each_connector_iter(connector, &conn_iter) {
12531                 struct drm_connector_state *connector_state;
12532                 struct intel_encoder *encoder;
12533
12534                 connector_state =
12535                         drm_atomic_get_new_connector_state(&state->base,
12536                                                            connector);
12537                 if (!connector_state)
12538                         connector_state = connector->state;
12539
12540                 if (!connector_state->best_encoder)
12541                         continue;
12542
12543                 encoder = to_intel_encoder(connector_state->best_encoder);
12544
12545                 WARN_ON(!connector_state->crtc);
12546
12547                 switch (encoder->type) {
12548                         unsigned int port_mask;
12549                 case INTEL_OUTPUT_DDI:
12550                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
12551                                 break;
12552                         /* else, fall through */
12553                 case INTEL_OUTPUT_DP:
12554                 case INTEL_OUTPUT_HDMI:
12555                 case INTEL_OUTPUT_EDP:
12556                         port_mask = 1 << encoder->port;
12557
12558                         /* the same port mustn't appear more than once */
12559                         if (used_ports & port_mask)
12560                                 ret = false;
12561
12562                         used_ports |= port_mask;
12563                         break;
12564                 case INTEL_OUTPUT_DP_MST:
12565                         used_mst_ports |=
12566                                 1 << encoder->port;
12567                         break;
12568                 default:
12569                         break;
12570                 }
12571         }
12572         drm_connector_list_iter_end(&conn_iter);
12573
12574         /* can't mix MST and SST/HDMI on the same port */
12575         if (used_ports & used_mst_ports)
12576                 return false;
12577
12578         return ret;
12579 }
12580
12581 static void
12582 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
12583 {
12584         intel_crtc_copy_color_blobs(crtc_state);
12585 }
12586
12587 static void
12588 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
12589 {
12590         crtc_state->hw.enable = crtc_state->uapi.enable;
12591         crtc_state->hw.active = crtc_state->uapi.active;
12592         crtc_state->hw.mode = crtc_state->uapi.mode;
12593         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
12594         intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
12595 }
12596
12597 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
12598 {
12599         crtc_state->uapi.enable = crtc_state->hw.enable;
12600         crtc_state->uapi.active = crtc_state->hw.active;
12601         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
12602
12603         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
12604
12605         /* copy color blobs to uapi */
12606         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
12607                                   crtc_state->hw.degamma_lut);
12608         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
12609                                   crtc_state->hw.gamma_lut);
12610         drm_property_replace_blob(&crtc_state->uapi.ctm,
12611                                   crtc_state->hw.ctm);
12612 }
12613
12614 static int
12615 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
12616 {
12617         struct drm_i915_private *dev_priv =
12618                 to_i915(crtc_state->uapi.crtc->dev);
12619         struct intel_crtc_state *saved_state;
12620
12621         saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12622         if (!saved_state)
12623                 return -ENOMEM;
12624
12625         /* free the old crtc_state->hw members */
12626         intel_crtc_free_hw_state(crtc_state);
12627
12628         /* FIXME: before the switch to atomic started, a new pipe_config was
12629          * kzalloc'd. Code that depends on any field being zero should be
12630          * fixed, so that the crtc_state can be safely duplicated. For now,
12631          * only fields that are know to not cause problems are preserved. */
12632
12633         saved_state->uapi = crtc_state->uapi;
12634         saved_state->scaler_state = crtc_state->scaler_state;
12635         saved_state->shared_dpll = crtc_state->shared_dpll;
12636         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12637         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12638                sizeof(saved_state->icl_port_dplls));
12639         saved_state->crc_enabled = crtc_state->crc_enabled;
12640         if (IS_G4X(dev_priv) ||
12641             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12642                 saved_state->wm = crtc_state->wm;
12643         /*
12644          * Save the slave bitmask which gets filled for master crtc state during
12645          * slave atomic check call.
12646          */
12647         if (is_trans_port_sync_master(crtc_state))
12648                 saved_state->sync_mode_slaves_mask =
12649                         crtc_state->sync_mode_slaves_mask;
12650
12651         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12652         kfree(saved_state);
12653
12654         intel_crtc_copy_uapi_to_hw_state(crtc_state);
12655
12656         return 0;
12657 }
12658
12659 static int
12660 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12661 {
12662         struct drm_crtc *crtc = pipe_config->uapi.crtc;
12663         struct drm_atomic_state *state = pipe_config->uapi.state;
12664         struct intel_encoder *encoder;
12665         struct drm_connector *connector;
12666         struct drm_connector_state *connector_state;
12667         int base_bpp, ret;
12668         int i;
12669         bool retry = true;
12670
12671         pipe_config->cpu_transcoder =
12672                 (enum transcoder) to_intel_crtc(crtc)->pipe;
12673
12674         /*
12675          * Sanitize sync polarity flags based on requested ones. If neither
12676          * positive or negative polarity is requested, treat this as meaning
12677          * negative polarity.
12678          */
12679         if (!(pipe_config->hw.adjusted_mode.flags &
12680               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12681                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12682
12683         if (!(pipe_config->hw.adjusted_mode.flags &
12684               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12685                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12686
12687         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12688                                         pipe_config);
12689         if (ret)
12690                 return ret;
12691
12692         base_bpp = pipe_config->pipe_bpp;
12693
12694         /*
12695          * Determine the real pipe dimensions. Note that stereo modes can
12696          * increase the actual pipe size due to the frame doubling and
12697          * insertion of additional space for blanks between the frame. This
12698          * is stored in the crtc timings. We use the requested mode to do this
12699          * computation to clearly distinguish it from the adjusted mode, which
12700          * can be changed by the connectors in the below retry loop.
12701          */
12702         drm_mode_get_hv_timing(&pipe_config->hw.mode,
12703                                &pipe_config->pipe_src_w,
12704                                &pipe_config->pipe_src_h);
12705
12706         for_each_new_connector_in_state(state, connector, connector_state, i) {
12707                 if (connector_state->crtc != crtc)
12708                         continue;
12709
12710                 encoder = to_intel_encoder(connector_state->best_encoder);
12711
12712                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12713                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12714                         return -EINVAL;
12715                 }
12716
12717                 /*
12718                  * Determine output_types before calling the .compute_config()
12719                  * hooks so that the hooks can use this information safely.
12720                  */
12721                 if (encoder->compute_output_type)
12722                         pipe_config->output_types |=
12723                                 BIT(encoder->compute_output_type(encoder, pipe_config,
12724                                                                  connector_state));
12725                 else
12726                         pipe_config->output_types |= BIT(encoder->type);
12727         }
12728
12729 encoder_retry:
12730         /* Ensure the port clock defaults are reset when retrying. */
12731         pipe_config->port_clock = 0;
12732         pipe_config->pixel_multiplier = 1;
12733
12734         /* Fill in default crtc timings, allow encoders to overwrite them. */
12735         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
12736                               CRTC_STEREO_DOUBLE);
12737
12738         /* Set the crtc_state defaults for trans_port_sync */
12739         pipe_config->master_transcoder = INVALID_TRANSCODER;
12740         ret = icl_add_sync_mode_crtcs(pipe_config);
12741         if (ret) {
12742                 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
12743                               ret);
12744                 return ret;
12745         }
12746
12747         /* Pass our mode to the connectors and the CRTC to give them a chance to
12748          * adjust it according to limitations or connector properties, and also
12749          * a chance to reject the mode entirely.
12750          */
12751         for_each_new_connector_in_state(state, connector, connector_state, i) {
12752                 if (connector_state->crtc != crtc)
12753                         continue;
12754
12755                 encoder = to_intel_encoder(connector_state->best_encoder);
12756                 ret = encoder->compute_config(encoder, pipe_config,
12757                                               connector_state);
12758                 if (ret < 0) {
12759                         if (ret != -EDEADLK)
12760                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12761                                               ret);
12762                         return ret;
12763                 }
12764         }
12765
12766         /* Set default port clock if not overwritten by the encoder. Needs to be
12767          * done afterwards in case the encoder adjusts the mode. */
12768         if (!pipe_config->port_clock)
12769                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
12770                         * pipe_config->pixel_multiplier;
12771
12772         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12773         if (ret == -EDEADLK)
12774                 return ret;
12775         if (ret < 0) {
12776                 DRM_DEBUG_KMS("CRTC fixup failed\n");
12777                 return ret;
12778         }
12779
12780         if (ret == RETRY) {
12781                 if (WARN(!retry, "loop in pipe configuration computation\n"))
12782                         return -EINVAL;
12783
12784                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12785                 retry = false;
12786                 goto encoder_retry;
12787         }
12788
12789         /* Dithering seems to not pass-through bits correctly when it should, so
12790          * only enable it on 6bpc panels and when its not a compliance
12791          * test requesting 6bpc video pattern.
12792          */
12793         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12794                 !pipe_config->dither_force_disable;
12795         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12796                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12797
12798         /*
12799          * Make drm_calc_timestamping_constants in
12800          * drm_atomic_helper_update_legacy_modeset_state() happy
12801          */
12802         pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
12803
12804         return 0;
12805 }
12806
12807 bool intel_fuzzy_clock_check(int clock1, int clock2)
12808 {
12809         int diff;
12810
12811         if (clock1 == clock2)
12812                 return true;
12813
12814         if (!clock1 || !clock2)
12815                 return false;
12816
12817         diff = abs(clock1 - clock2);
12818
12819         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12820                 return true;
12821
12822         return false;
12823 }
12824
12825 static bool
12826 intel_compare_m_n(unsigned int m, unsigned int n,
12827                   unsigned int m2, unsigned int n2,
12828                   bool exact)
12829 {
12830         if (m == m2 && n == n2)
12831                 return true;
12832
12833         if (exact || !m || !n || !m2 || !n2)
12834                 return false;
12835
12836         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12837
12838         if (n > n2) {
12839                 while (n > n2) {
12840                         m2 <<= 1;
12841                         n2 <<= 1;
12842                 }
12843         } else if (n < n2) {
12844                 while (n < n2) {
12845                         m <<= 1;
12846                         n <<= 1;
12847                 }
12848         }
12849
12850         if (n != n2)
12851                 return false;
12852
12853         return intel_fuzzy_clock_check(m, m2);
12854 }
12855
12856 static bool
12857 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12858                        const struct intel_link_m_n *m2_n2,
12859                        bool exact)
12860 {
12861         return m_n->tu == m2_n2->tu &&
12862                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12863                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12864                 intel_compare_m_n(m_n->link_m, m_n->link_n,
12865                                   m2_n2->link_m, m2_n2->link_n, exact);
12866 }
12867
12868 static bool
12869 intel_compare_infoframe(const union hdmi_infoframe *a,
12870                         const union hdmi_infoframe *b)
12871 {
12872         return memcmp(a, b, sizeof(*a)) == 0;
12873 }
12874
12875 static void
12876 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12877                                bool fastset, const char *name,
12878                                const union hdmi_infoframe *a,
12879                                const union hdmi_infoframe *b)
12880 {
12881         if (fastset) {
12882                 if ((drm_debug & DRM_UT_KMS) == 0)
12883                         return;
12884
12885                 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
12886                 DRM_DEBUG_KMS("expected:\n");
12887                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12888                 DRM_DEBUG_KMS("found:\n");
12889                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12890         } else {
12891                 DRM_ERROR("mismatch in %s infoframe\n", name);
12892                 DRM_ERROR("expected:\n");
12893                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12894                 DRM_ERROR("found:\n");
12895                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12896         }
12897 }
12898
12899 static void __printf(4, 5)
12900 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
12901                      const char *name, const char *format, ...)
12902 {
12903         struct va_format vaf;
12904         va_list args;
12905
12906         va_start(args, format);
12907         vaf.fmt = format;
12908         vaf.va = &args;
12909
12910         if (fastset)
12911                 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
12912                               crtc->base.base.id, crtc->base.name, name, &vaf);
12913         else
12914                 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
12915                           crtc->base.base.id, crtc->base.name, name, &vaf);
12916
12917         va_end(args);
12918 }
12919
12920 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12921 {
12922         if (i915_modparams.fastboot != -1)
12923                 return i915_modparams.fastboot;
12924
12925         /* Enable fastboot by default on Skylake and newer */
12926         if (INTEL_GEN(dev_priv) >= 9)
12927                 return true;
12928
12929         /* Enable fastboot by default on VLV and CHV */
12930         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12931                 return true;
12932
12933         /* Disabled by default on all others */
12934         return false;
12935 }
12936
12937 static bool
12938 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
12939                           const struct intel_crtc_state *pipe_config,
12940                           bool fastset)
12941 {
12942         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
12943         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12944         bool ret = true;
12945         u32 bp_gamma = 0;
12946         bool fixup_inherited = fastset &&
12947                 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12948                 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
12949
12950         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12951                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12952                 ret = false;
12953         }
12954
12955 #define PIPE_CONF_CHECK_X(name) do { \
12956         if (current_config->name != pipe_config->name) { \
12957                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
12958                                      "(expected 0x%08x, found 0x%08x)", \
12959                                      current_config->name, \
12960                                      pipe_config->name); \
12961                 ret = false; \
12962         } \
12963 } while (0)
12964
12965 #define PIPE_CONF_CHECK_I(name) do { \
12966         if (current_config->name != pipe_config->name) { \
12967                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
12968                                      "(expected %i, found %i)", \
12969                                      current_config->name, \
12970                                      pipe_config->name); \
12971                 ret = false; \
12972         } \
12973 } while (0)
12974
12975 #define PIPE_CONF_CHECK_BOOL(name) do { \
12976         if (current_config->name != pipe_config->name) { \
12977                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
12978                                      "(expected %s, found %s)", \
12979                                      yesno(current_config->name), \
12980                                      yesno(pipe_config->name)); \
12981                 ret = false; \
12982         } \
12983 } while (0)
12984
12985 /*
12986  * Checks state where we only read out the enabling, but not the entire
12987  * state itself (like full infoframes or ELD for audio). These states
12988  * require a full modeset on bootup to fix up.
12989  */
12990 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12991         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12992                 PIPE_CONF_CHECK_BOOL(name); \
12993         } else { \
12994                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
12995                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
12996                                      yesno(current_config->name), \
12997                                      yesno(pipe_config->name)); \
12998                 ret = false; \
12999         } \
13000 } while (0)
13001
13002 #define PIPE_CONF_CHECK_P(name) do { \
13003         if (current_config->name != pipe_config->name) { \
13004                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13005                                      "(expected %p, found %p)", \
13006                                      current_config->name, \
13007                                      pipe_config->name); \
13008                 ret = false; \
13009         } \
13010 } while (0)
13011
13012 #define PIPE_CONF_CHECK_M_N(name) do { \
13013         if (!intel_compare_link_m_n(&current_config->name, \
13014                                     &pipe_config->name,\
13015                                     !fastset)) { \
13016                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13017                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13018                                      "found tu %i, gmch %i/%i link %i/%i)", \
13019                                      current_config->name.tu, \
13020                                      current_config->name.gmch_m, \
13021                                      current_config->name.gmch_n, \
13022                                      current_config->name.link_m, \
13023                                      current_config->name.link_n, \
13024                                      pipe_config->name.tu, \
13025                                      pipe_config->name.gmch_m, \
13026                                      pipe_config->name.gmch_n, \
13027                                      pipe_config->name.link_m, \
13028                                      pipe_config->name.link_n); \
13029                 ret = false; \
13030         } \
13031 } while (0)
13032
13033 /* This is required for BDW+ where there is only one set of registers for
13034  * switching between high and low RR.
13035  * This macro can be used whenever a comparison has to be made between one
13036  * hw state and multiple sw state variables.
13037  */
13038 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13039         if (!intel_compare_link_m_n(&current_config->name, \
13040                                     &pipe_config->name, !fastset) && \
13041             !intel_compare_link_m_n(&current_config->alt_name, \
13042                                     &pipe_config->name, !fastset)) { \
13043                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13044                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13045                                      "or tu %i gmch %i/%i link %i/%i, " \
13046                                      "found tu %i, gmch %i/%i link %i/%i)", \
13047                                      current_config->name.tu, \
13048                                      current_config->name.gmch_m, \
13049                                      current_config->name.gmch_n, \
13050                                      current_config->name.link_m, \
13051                                      current_config->name.link_n, \
13052                                      current_config->alt_name.tu, \
13053                                      current_config->alt_name.gmch_m, \
13054                                      current_config->alt_name.gmch_n, \
13055                                      current_config->alt_name.link_m, \
13056                                      current_config->alt_name.link_n, \
13057                                      pipe_config->name.tu, \
13058                                      pipe_config->name.gmch_m, \
13059                                      pipe_config->name.gmch_n, \
13060                                      pipe_config->name.link_m, \
13061                                      pipe_config->name.link_n); \
13062                 ret = false; \
13063         } \
13064 } while (0)
13065
13066 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13067         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13068                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13069                                      "(%x) (expected %i, found %i)", \
13070                                      (mask), \
13071                                      current_config->name & (mask), \
13072                                      pipe_config->name & (mask)); \
13073                 ret = false; \
13074         } \
13075 } while (0)
13076
13077 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13078         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13079                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13080                                      "(expected %i, found %i)", \
13081                                      current_config->name, \
13082                                      pipe_config->name); \
13083                 ret = false; \
13084         } \
13085 } while (0)
13086
13087 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13088         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13089                                      &pipe_config->infoframes.name)) { \
13090                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13091                                                &current_config->infoframes.name, \
13092                                                &pipe_config->infoframes.name); \
13093                 ret = false; \
13094         } \
13095 } while (0)
13096
13097 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13098         if (current_config->name1 != pipe_config->name1) { \
13099                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13100                                 "(expected %i, found %i, won't compare lut values)", \
13101                                 current_config->name1, \
13102                                 pipe_config->name1); \
13103                 ret = false;\
13104         } else { \
13105                 if (!intel_color_lut_equal(current_config->name2, \
13106                                         pipe_config->name2, pipe_config->name1, \
13107                                         bit_precision)) { \
13108                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13109                                         "hw_state doesn't match sw_state"); \
13110                         ret = false; \
13111                 } \
13112         } \
13113 } while (0)
13114
13115 #define PIPE_CONF_QUIRK(quirk) \
13116         ((current_config->quirks | pipe_config->quirks) & (quirk))
13117
13118         PIPE_CONF_CHECK_I(cpu_transcoder);
13119
13120         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13121         PIPE_CONF_CHECK_I(fdi_lanes);
13122         PIPE_CONF_CHECK_M_N(fdi_m_n);
13123
13124         PIPE_CONF_CHECK_I(lane_count);
13125         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13126
13127         if (INTEL_GEN(dev_priv) < 8) {
13128                 PIPE_CONF_CHECK_M_N(dp_m_n);
13129
13130                 if (current_config->has_drrs)
13131                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13132         } else
13133                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13134
13135         PIPE_CONF_CHECK_X(output_types);
13136
13137         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13138         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13139         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13140         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13141         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13142         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13143
13144         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13145         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13146         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13147         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13148         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13149         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13150
13151         PIPE_CONF_CHECK_I(pixel_multiplier);
13152         PIPE_CONF_CHECK_I(output_format);
13153         PIPE_CONF_CHECK_I(dc3co_exitline);
13154         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13155         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13156             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13157                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13158
13159         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13160         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13161         PIPE_CONF_CHECK_BOOL(has_infoframe);
13162         PIPE_CONF_CHECK_BOOL(fec_enable);
13163
13164         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13165
13166         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13167                               DRM_MODE_FLAG_INTERLACE);
13168
13169         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13170                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13171                                       DRM_MODE_FLAG_PHSYNC);
13172                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13173                                       DRM_MODE_FLAG_NHSYNC);
13174                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13175                                       DRM_MODE_FLAG_PVSYNC);
13176                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13177                                       DRM_MODE_FLAG_NVSYNC);
13178         }
13179
13180         PIPE_CONF_CHECK_X(gmch_pfit.control);
13181         /* pfit ratios are autocomputed by the hw on gen4+ */
13182         if (INTEL_GEN(dev_priv) < 4)
13183                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13184         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13185
13186         /*
13187          * Changing the EDP transcoder input mux
13188          * (A_ONOFF vs. A_ON) requires a full modeset.
13189          */
13190         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13191
13192         if (!fastset) {
13193                 PIPE_CONF_CHECK_I(pipe_src_w);
13194                 PIPE_CONF_CHECK_I(pipe_src_h);
13195
13196                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13197                 if (current_config->pch_pfit.enabled) {
13198                         PIPE_CONF_CHECK_X(pch_pfit.pos);
13199                         PIPE_CONF_CHECK_X(pch_pfit.size);
13200                 }
13201
13202                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13203                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13204
13205                 PIPE_CONF_CHECK_X(gamma_mode);
13206                 if (IS_CHERRYVIEW(dev_priv))
13207                         PIPE_CONF_CHECK_X(cgm_mode);
13208                 else
13209                         PIPE_CONF_CHECK_X(csc_mode);
13210                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13211                 PIPE_CONF_CHECK_BOOL(csc_enable);
13212
13213                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13214                 if (bp_gamma)
13215                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13216
13217         }
13218
13219         PIPE_CONF_CHECK_BOOL(double_wide);
13220
13221         PIPE_CONF_CHECK_P(shared_dpll);
13222         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13223         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13224         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13225         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13226         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13227         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13228         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13229         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13230         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13231         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13232         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13233         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13234         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13235         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13236         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13237         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13238         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13239         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13240         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13241         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13242         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13243         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13244         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13245         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13246         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13247         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13248         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13249         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13250         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13251         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13252         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13253
13254         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13255         PIPE_CONF_CHECK_X(dsi_pll.div);
13256
13257         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13258                 PIPE_CONF_CHECK_I(pipe_bpp);
13259
13260         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13261         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13262
13263         PIPE_CONF_CHECK_I(min_voltage_level);
13264
13265         PIPE_CONF_CHECK_X(infoframes.enable);
13266         PIPE_CONF_CHECK_X(infoframes.gcp);
13267         PIPE_CONF_CHECK_INFOFRAME(avi);
13268         PIPE_CONF_CHECK_INFOFRAME(spd);
13269         PIPE_CONF_CHECK_INFOFRAME(hdmi);
13270         PIPE_CONF_CHECK_INFOFRAME(drm);
13271
13272         PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
13273         PIPE_CONF_CHECK_I(master_transcoder);
13274
13275 #undef PIPE_CONF_CHECK_X
13276 #undef PIPE_CONF_CHECK_I
13277 #undef PIPE_CONF_CHECK_BOOL
13278 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13279 #undef PIPE_CONF_CHECK_P
13280 #undef PIPE_CONF_CHECK_FLAGS
13281 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13282 #undef PIPE_CONF_CHECK_COLOR_LUT
13283 #undef PIPE_CONF_QUIRK
13284
13285         return ret;
13286 }
13287
13288 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13289                                            const struct intel_crtc_state *pipe_config)
13290 {
13291         if (pipe_config->has_pch_encoder) {
13292                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13293                                                             &pipe_config->fdi_m_n);
13294                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13295
13296                 /*
13297                  * FDI already provided one idea for the dotclock.
13298                  * Yell if the encoder disagrees.
13299                  */
13300                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13301                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13302                      fdi_dotclock, dotclock);
13303         }
13304 }
13305
13306 static void verify_wm_state(struct intel_crtc *crtc,
13307                             struct intel_crtc_state *new_crtc_state)
13308 {
13309         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13310         struct skl_hw_state {
13311                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13312                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13313                 struct skl_ddb_allocation ddb;
13314                 struct skl_pipe_wm wm;
13315         } *hw;
13316         struct skl_ddb_allocation *sw_ddb;
13317         struct skl_pipe_wm *sw_wm;
13318         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13319         const enum pipe pipe = crtc->pipe;
13320         int plane, level, max_level = ilk_wm_max_level(dev_priv);
13321
13322         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13323                 return;
13324
13325         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13326         if (!hw)
13327                 return;
13328
13329         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13330         sw_wm = &new_crtc_state->wm.skl.optimal;
13331
13332         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13333
13334         skl_ddb_get_hw_state(dev_priv, &hw->ddb);
13335         sw_ddb = &dev_priv->wm.skl_hw.ddb;
13336
13337         if (INTEL_GEN(dev_priv) >= 11 &&
13338             hw->ddb.enabled_slices != sw_ddb->enabled_slices)
13339                 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
13340                           sw_ddb->enabled_slices,
13341                           hw->ddb.enabled_slices);
13342
13343         /* planes */
13344         for_each_universal_plane(dev_priv, pipe, plane) {
13345                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13346
13347                 hw_plane_wm = &hw->wm.planes[plane];
13348                 sw_plane_wm = &sw_wm->planes[plane];
13349
13350                 /* Watermarks */
13351                 for (level = 0; level <= max_level; level++) {
13352                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13353                                                 &sw_plane_wm->wm[level]))
13354                                 continue;
13355
13356                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13357                                   pipe_name(pipe), plane + 1, level,
13358                                   sw_plane_wm->wm[level].plane_en,
13359                                   sw_plane_wm->wm[level].plane_res_b,
13360                                   sw_plane_wm->wm[level].plane_res_l,
13361                                   hw_plane_wm->wm[level].plane_en,
13362                                   hw_plane_wm->wm[level].plane_res_b,
13363                                   hw_plane_wm->wm[level].plane_res_l);
13364                 }
13365
13366                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13367                                          &sw_plane_wm->trans_wm)) {
13368                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13369                                   pipe_name(pipe), plane + 1,
13370                                   sw_plane_wm->trans_wm.plane_en,
13371                                   sw_plane_wm->trans_wm.plane_res_b,
13372                                   sw_plane_wm->trans_wm.plane_res_l,
13373                                   hw_plane_wm->trans_wm.plane_en,
13374                                   hw_plane_wm->trans_wm.plane_res_b,
13375                                   hw_plane_wm->trans_wm.plane_res_l);
13376                 }
13377
13378                 /* DDB */
13379                 hw_ddb_entry = &hw->ddb_y[plane];
13380                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13381
13382                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13383                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13384                                   pipe_name(pipe), plane + 1,
13385                                   sw_ddb_entry->start, sw_ddb_entry->end,
13386                                   hw_ddb_entry->start, hw_ddb_entry->end);
13387                 }
13388         }
13389
13390         /*
13391          * cursor
13392          * If the cursor plane isn't active, we may not have updated it's ddb
13393          * allocation. In that case since the ddb allocation will be updated
13394          * once the plane becomes visible, we can skip this check
13395          */
13396         if (1) {
13397                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13398
13399                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13400                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13401
13402                 /* Watermarks */
13403                 for (level = 0; level <= max_level; level++) {
13404                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13405                                                 &sw_plane_wm->wm[level]))
13406                                 continue;
13407
13408                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13409                                   pipe_name(pipe), level,
13410                                   sw_plane_wm->wm[level].plane_en,
13411                                   sw_plane_wm->wm[level].plane_res_b,
13412                                   sw_plane_wm->wm[level].plane_res_l,
13413                                   hw_plane_wm->wm[level].plane_en,
13414                                   hw_plane_wm->wm[level].plane_res_b,
13415                                   hw_plane_wm->wm[level].plane_res_l);
13416                 }
13417
13418                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13419                                          &sw_plane_wm->trans_wm)) {
13420                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13421                                   pipe_name(pipe),
13422                                   sw_plane_wm->trans_wm.plane_en,
13423                                   sw_plane_wm->trans_wm.plane_res_b,
13424                                   sw_plane_wm->trans_wm.plane_res_l,
13425                                   hw_plane_wm->trans_wm.plane_en,
13426                                   hw_plane_wm->trans_wm.plane_res_b,
13427                                   hw_plane_wm->trans_wm.plane_res_l);
13428                 }
13429
13430                 /* DDB */
13431                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13432                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13433
13434                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13435                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13436                                   pipe_name(pipe),
13437                                   sw_ddb_entry->start, sw_ddb_entry->end,
13438                                   hw_ddb_entry->start, hw_ddb_entry->end);
13439                 }
13440         }
13441
13442         kfree(hw);
13443 }
13444
13445 static void
13446 verify_connector_state(struct intel_atomic_state *state,
13447                        struct intel_crtc *crtc)
13448 {
13449         struct drm_connector *connector;
13450         struct drm_connector_state *new_conn_state;
13451         int i;
13452
13453         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13454                 struct drm_encoder *encoder = connector->encoder;
13455                 struct intel_crtc_state *crtc_state = NULL;
13456
13457                 if (new_conn_state->crtc != &crtc->base)
13458                         continue;
13459
13460                 if (crtc)
13461                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13462
13463                 intel_connector_verify_state(crtc_state, new_conn_state);
13464
13465                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13466                      "connector's atomic encoder doesn't match legacy encoder\n");
13467         }
13468 }
13469
13470 static void
13471 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13472 {
13473         struct intel_encoder *encoder;
13474         struct drm_connector *connector;
13475         struct drm_connector_state *old_conn_state, *new_conn_state;
13476         int i;
13477
13478         for_each_intel_encoder(&dev_priv->drm, encoder) {
13479                 bool enabled = false, found = false;
13480                 enum pipe pipe;
13481
13482                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13483                               encoder->base.base.id,
13484                               encoder->base.name);
13485
13486                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13487                                                    new_conn_state, i) {
13488                         if (old_conn_state->best_encoder == &encoder->base)
13489                                 found = true;
13490
13491                         if (new_conn_state->best_encoder != &encoder->base)
13492                                 continue;
13493                         found = enabled = true;
13494
13495                         I915_STATE_WARN(new_conn_state->crtc !=
13496                                         encoder->base.crtc,
13497                              "connector's crtc doesn't match encoder crtc\n");
13498                 }
13499
13500                 if (!found)
13501                         continue;
13502
13503                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13504                      "encoder's enabled state mismatch "
13505                      "(expected %i, found %i)\n",
13506                      !!encoder->base.crtc, enabled);
13507
13508                 if (!encoder->base.crtc) {
13509                         bool active;
13510
13511                         active = encoder->get_hw_state(encoder, &pipe);
13512                         I915_STATE_WARN(active,
13513                              "encoder detached but still enabled on pipe %c.\n",
13514                              pipe_name(pipe));
13515                 }
13516         }
13517 }
13518
13519 static void
13520 verify_crtc_state(struct intel_crtc *crtc,
13521                   struct intel_crtc_state *old_crtc_state,
13522                   struct intel_crtc_state *new_crtc_state)
13523 {
13524         struct drm_device *dev = crtc->base.dev;
13525         struct drm_i915_private *dev_priv = to_i915(dev);
13526         struct intel_encoder *encoder;
13527         struct intel_crtc_state *pipe_config;
13528         struct drm_atomic_state *state;
13529         bool active;
13530
13531         state = old_crtc_state->uapi.state;
13532         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
13533         intel_crtc_free_hw_state(old_crtc_state);
13534
13535         pipe_config = old_crtc_state;
13536         memset(pipe_config, 0, sizeof(*pipe_config));
13537         pipe_config->uapi.crtc = &crtc->base;
13538         pipe_config->uapi.state = state;
13539
13540         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13541
13542         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13543
13544         /* we keep both pipes enabled on 830 */
13545         if (IS_I830(dev_priv))
13546                 active = new_crtc_state->hw.active;
13547
13548         I915_STATE_WARN(new_crtc_state->hw.active != active,
13549                         "crtc active state doesn't match with hw state "
13550                         "(expected %i, found %i)\n",
13551                         new_crtc_state->hw.active, active);
13552
13553         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
13554                         "transitional active state does not match atomic hw state "
13555                         "(expected %i, found %i)\n",
13556                         new_crtc_state->hw.active, crtc->active);
13557
13558         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13559                 enum pipe pipe;
13560
13561                 active = encoder->get_hw_state(encoder, &pipe);
13562                 I915_STATE_WARN(active != new_crtc_state->hw.active,
13563                                 "[ENCODER:%i] active %i with crtc active %i\n",
13564                                 encoder->base.base.id, active,
13565                                 new_crtc_state->hw.active);
13566
13567                 I915_STATE_WARN(active && crtc->pipe != pipe,
13568                                 "Encoder connected to wrong pipe %c\n",
13569                                 pipe_name(pipe));
13570
13571                 if (active)
13572                         encoder->get_config(encoder, pipe_config);
13573         }
13574
13575         intel_crtc_compute_pixel_rate(pipe_config);
13576
13577         if (!new_crtc_state->hw.active)
13578                 return;
13579
13580         intel_pipe_config_sanity_check(dev_priv, pipe_config);
13581
13582         if (!intel_pipe_config_compare(new_crtc_state,
13583                                        pipe_config, false)) {
13584                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13585                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13586                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13587         }
13588 }
13589
13590 static void
13591 intel_verify_planes(struct intel_atomic_state *state)
13592 {
13593         struct intel_plane *plane;
13594         const struct intel_plane_state *plane_state;
13595         int i;
13596
13597         for_each_new_intel_plane_in_state(state, plane,
13598                                           plane_state, i)
13599                 assert_plane(plane, plane_state->planar_slave ||
13600                              plane_state->uapi.visible);
13601 }
13602
13603 static void
13604 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13605                          struct intel_shared_dpll *pll,
13606                          struct intel_crtc *crtc,
13607                          struct intel_crtc_state *new_crtc_state)
13608 {
13609         struct intel_dpll_hw_state dpll_hw_state;
13610         unsigned int crtc_mask;
13611         bool active;
13612
13613         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13614
13615         DRM_DEBUG_KMS("%s\n", pll->info->name);
13616
13617         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13618
13619         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13620                 I915_STATE_WARN(!pll->on && pll->active_mask,
13621                      "pll in active use but not on in sw tracking\n");
13622                 I915_STATE_WARN(pll->on && !pll->active_mask,
13623                      "pll is on but not used by any active crtc\n");
13624                 I915_STATE_WARN(pll->on != active,
13625                      "pll on state mismatch (expected %i, found %i)\n",
13626                      pll->on, active);
13627         }
13628
13629         if (!crtc) {
13630                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13631                                 "more active pll users than references: %x vs %x\n",
13632                                 pll->active_mask, pll->state.crtc_mask);
13633
13634                 return;
13635         }
13636
13637         crtc_mask = drm_crtc_mask(&crtc->base);
13638
13639         if (new_crtc_state->hw.active)
13640                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13641                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13642                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13643         else
13644                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13645                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13646                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13647
13648         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13649                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13650                         crtc_mask, pll->state.crtc_mask);
13651
13652         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13653                                           &dpll_hw_state,
13654                                           sizeof(dpll_hw_state)),
13655                         "pll hw state mismatch\n");
13656 }
13657
13658 static void
13659 verify_shared_dpll_state(struct intel_crtc *crtc,
13660                          struct intel_crtc_state *old_crtc_state,
13661                          struct intel_crtc_state *new_crtc_state)
13662 {
13663         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13664
13665         if (new_crtc_state->shared_dpll)
13666                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13667
13668         if (old_crtc_state->shared_dpll &&
13669             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13670                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13671                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13672
13673                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13674                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13675                                 pipe_name(drm_crtc_index(&crtc->base)));
13676                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13677                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13678                                 pipe_name(drm_crtc_index(&crtc->base)));
13679         }
13680 }
13681
13682 static void
13683 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13684                           struct intel_atomic_state *state,
13685                           struct intel_crtc_state *old_crtc_state,
13686                           struct intel_crtc_state *new_crtc_state)
13687 {
13688         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13689                 return;
13690
13691         verify_wm_state(crtc, new_crtc_state);
13692         verify_connector_state(state, crtc);
13693         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13694         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13695 }
13696
13697 static void
13698 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13699 {
13700         int i;
13701
13702         for (i = 0; i < dev_priv->num_shared_dpll; i++)
13703                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13704 }
13705
13706 static void
13707 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13708                               struct intel_atomic_state *state)
13709 {
13710         verify_encoder_state(dev_priv, state);
13711         verify_connector_state(state, NULL);
13712         verify_disabled_dpll_state(dev_priv);
13713 }
13714
13715 static void
13716 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13717 {
13718         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13719         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13720         const struct drm_display_mode *adjusted_mode =
13721                 &crtc_state->hw.adjusted_mode;
13722
13723         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13724
13725         /*
13726          * The scanline counter increments at the leading edge of hsync.
13727          *
13728          * On most platforms it starts counting from vtotal-1 on the
13729          * first active line. That means the scanline counter value is
13730          * always one less than what we would expect. Ie. just after
13731          * start of vblank, which also occurs at start of hsync (on the
13732          * last active line), the scanline counter will read vblank_start-1.
13733          *
13734          * On gen2 the scanline counter starts counting from 1 instead
13735          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13736          * to keep the value positive), instead of adding one.
13737          *
13738          * On HSW+ the behaviour of the scanline counter depends on the output
13739          * type. For DP ports it behaves like most other platforms, but on HDMI
13740          * there's an extra 1 line difference. So we need to add two instead of
13741          * one to the value.
13742          *
13743          * On VLV/CHV DSI the scanline counter would appear to increment
13744          * approx. 1/3 of a scanline before start of vblank. Unfortunately
13745          * that means we can't tell whether we're in vblank or not while
13746          * we're on that particular line. We must still set scanline_offset
13747          * to 1 so that the vblank timestamps come out correct when we query
13748          * the scanline counter from within the vblank interrupt handler.
13749          * However if queried just before the start of vblank we'll get an
13750          * answer that's slightly in the future.
13751          */
13752         if (IS_GEN(dev_priv, 2)) {
13753                 int vtotal;
13754
13755                 vtotal = adjusted_mode->crtc_vtotal;
13756                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13757                         vtotal /= 2;
13758
13759                 crtc->scanline_offset = vtotal - 1;
13760         } else if (HAS_DDI(dev_priv) &&
13761                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13762                 crtc->scanline_offset = 2;
13763         } else {
13764                 crtc->scanline_offset = 1;
13765         }
13766 }
13767
13768 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13769 {
13770         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13771         struct intel_crtc_state *new_crtc_state;
13772         struct intel_crtc *crtc;
13773         int i;
13774
13775         if (!dev_priv->display.crtc_compute_clock)
13776                 return;
13777
13778         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13779                 if (!needs_modeset(new_crtc_state))
13780                         continue;
13781
13782                 intel_release_shared_dplls(state, crtc);
13783         }
13784 }
13785
13786 /*
13787  * This implements the workaround described in the "notes" section of the mode
13788  * set sequence documentation. When going from no pipes or single pipe to
13789  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13790  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13791  */
13792 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13793 {
13794         struct intel_crtc_state *crtc_state;
13795         struct intel_crtc *crtc;
13796         struct intel_crtc_state *first_crtc_state = NULL;
13797         struct intel_crtc_state *other_crtc_state = NULL;
13798         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13799         int i;
13800
13801         /* look at all crtc's that are going to be enabled in during modeset */
13802         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13803                 if (!crtc_state->hw.active ||
13804                     !needs_modeset(crtc_state))
13805                         continue;
13806
13807                 if (first_crtc_state) {
13808                         other_crtc_state = crtc_state;
13809                         break;
13810                 } else {
13811                         first_crtc_state = crtc_state;
13812                         first_pipe = crtc->pipe;
13813                 }
13814         }
13815
13816         /* No workaround needed? */
13817         if (!first_crtc_state)
13818                 return 0;
13819
13820         /* w/a possibly needed, check how many crtc's are already enabled. */
13821         for_each_intel_crtc(state->base.dev, crtc) {
13822                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13823                 if (IS_ERR(crtc_state))
13824                         return PTR_ERR(crtc_state);
13825
13826                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13827
13828                 if (!crtc_state->hw.active ||
13829                     needs_modeset(crtc_state))
13830                         continue;
13831
13832                 /* 2 or more enabled crtcs means no need for w/a */
13833                 if (enabled_pipe != INVALID_PIPE)
13834                         return 0;
13835
13836                 enabled_pipe = crtc->pipe;
13837         }
13838
13839         if (enabled_pipe != INVALID_PIPE)
13840                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13841         else if (other_crtc_state)
13842                 other_crtc_state->hsw_workaround_pipe = first_pipe;
13843
13844         return 0;
13845 }
13846
13847 static int intel_modeset_checks(struct intel_atomic_state *state)
13848 {
13849         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13850         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13851         struct intel_crtc *crtc;
13852         int ret, i;
13853
13854         /* keep the current setting */
13855         if (!state->cdclk.force_min_cdclk_changed)
13856                 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13857
13858         state->modeset = true;
13859         state->active_pipes = dev_priv->active_pipes;
13860         state->cdclk.logical = dev_priv->cdclk.logical;
13861         state->cdclk.actual = dev_priv->cdclk.actual;
13862
13863         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13864                                             new_crtc_state, i) {
13865                 if (new_crtc_state->hw.active)
13866                         state->active_pipes |= BIT(crtc->pipe);
13867                 else
13868                         state->active_pipes &= ~BIT(crtc->pipe);
13869
13870                 if (old_crtc_state->hw.active != new_crtc_state->hw.active)
13871                         state->active_pipe_changes |= BIT(crtc->pipe);
13872         }
13873
13874         if (state->active_pipe_changes) {
13875                 ret = intel_atomic_lock_global_state(state);
13876                 if (ret)
13877                         return ret;
13878         }
13879
13880         ret = intel_modeset_calc_cdclk(state);
13881         if (ret)
13882                 return ret;
13883
13884         intel_modeset_clear_plls(state);
13885
13886         if (IS_HASWELL(dev_priv))
13887                 return haswell_mode_set_planes_workaround(state);
13888
13889         return 0;
13890 }
13891
13892 /*
13893  * Handle calculation of various watermark data at the end of the atomic check
13894  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13895  * handlers to ensure that all derived state has been updated.
13896  */
13897 static int calc_watermark_data(struct intel_atomic_state *state)
13898 {
13899         struct drm_device *dev = state->base.dev;
13900         struct drm_i915_private *dev_priv = to_i915(dev);
13901
13902         /* Is there platform-specific watermark information to calculate? */
13903         if (dev_priv->display.compute_global_watermarks)
13904                 return dev_priv->display.compute_global_watermarks(state);
13905
13906         return 0;
13907 }
13908
13909 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
13910                                      struct intel_crtc_state *new_crtc_state)
13911 {
13912         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
13913                 return;
13914
13915         new_crtc_state->uapi.mode_changed = false;
13916         new_crtc_state->update_pipe = true;
13917
13918         /*
13919          * If we're not doing the full modeset we want to
13920          * keep the current M/N values as they may be
13921          * sufficiently different to the computed values
13922          * to cause problems.
13923          *
13924          * FIXME: should really copy more fuzzy state here
13925          */
13926         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
13927         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
13928         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
13929         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
13930 }
13931
13932 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
13933                                           struct intel_crtc *crtc,
13934                                           u8 plane_ids_mask)
13935 {
13936         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13937         struct intel_plane *plane;
13938
13939         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
13940                 struct intel_plane_state *plane_state;
13941
13942                 if ((plane_ids_mask & BIT(plane->id)) == 0)
13943                         continue;
13944
13945                 plane_state = intel_atomic_get_plane_state(state, plane);
13946                 if (IS_ERR(plane_state))
13947                         return PTR_ERR(plane_state);
13948         }
13949
13950         return 0;
13951 }
13952
13953 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
13954 {
13955         /* See {hsw,vlv,ivb}_plane_ratio() */
13956         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
13957                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
13958                 IS_IVYBRIDGE(dev_priv);
13959 }
13960
13961 static int intel_atomic_check_planes(struct intel_atomic_state *state,
13962                                      bool *need_modeset)
13963 {
13964         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13965         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13966         struct intel_plane_state *plane_state;
13967         struct intel_plane *plane;
13968         struct intel_crtc *crtc;
13969         int i, ret;
13970
13971         ret = icl_add_linked_planes(state);
13972         if (ret)
13973                 return ret;
13974
13975         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
13976                 ret = intel_plane_atomic_check(state, plane);
13977                 if (ret) {
13978                         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
13979                                          plane->base.base.id, plane->base.name);
13980                         return ret;
13981                 }
13982         }
13983
13984         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13985                                             new_crtc_state, i) {
13986                 u8 old_active_planes, new_active_planes;
13987
13988                 ret = icl_check_nv12_planes(new_crtc_state);
13989                 if (ret)
13990                         return ret;
13991
13992                 /*
13993                  * On some platforms the number of active planes affects
13994                  * the planes' minimum cdclk calculation. Add such planes
13995                  * to the state before we compute the minimum cdclk.
13996                  */
13997                 if (!active_planes_affects_min_cdclk(dev_priv))
13998                         continue;
13999
14000                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14001                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14002
14003                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14004                         continue;
14005
14006                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14007                 if (ret)
14008                         return ret;
14009         }
14010
14011         /*
14012          * active_planes bitmask has been updated, and potentially
14013          * affected planes are part of the state. We can now
14014          * compute the minimum cdclk for each plane.
14015          */
14016         for_each_new_intel_plane_in_state(state, plane, plane_state, i)
14017                 *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
14018
14019         return 0;
14020 }
14021
14022 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14023 {
14024         struct intel_crtc_state *crtc_state;
14025         struct intel_crtc *crtc;
14026         int i;
14027
14028         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14029                 int ret = intel_crtc_atomic_check(state, crtc);
14030                 if (ret) {
14031                         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
14032                                          crtc->base.base.id, crtc->base.name);
14033                         return ret;
14034                 }
14035         }
14036
14037         return 0;
14038 }
14039
14040 /**
14041  * intel_atomic_check - validate state object
14042  * @dev: drm device
14043  * @_state: state to validate
14044  */
14045 static int intel_atomic_check(struct drm_device *dev,
14046                               struct drm_atomic_state *_state)
14047 {
14048         struct drm_i915_private *dev_priv = to_i915(dev);
14049         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14050         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14051         struct intel_crtc *crtc;
14052         int ret, i;
14053         bool any_ms = false;
14054
14055         /* Catch I915_MODE_FLAG_INHERITED */
14056         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14057                                             new_crtc_state, i) {
14058                 if (new_crtc_state->hw.mode.private_flags !=
14059                     old_crtc_state->hw.mode.private_flags)
14060                         new_crtc_state->uapi.mode_changed = true;
14061         }
14062
14063         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14064         if (ret)
14065                 goto fail;
14066
14067         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14068                                             new_crtc_state, i) {
14069                 if (!needs_modeset(new_crtc_state)) {
14070                         /* Light copy */
14071                         intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14072
14073                         continue;
14074                 }
14075
14076                 if (!new_crtc_state->uapi.enable) {
14077                         intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
14078
14079                         any_ms = true;
14080                         continue;
14081                 }
14082
14083                 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14084                 if (ret)
14085                         goto fail;
14086
14087                 ret = intel_modeset_pipe_config(new_crtc_state);
14088                 if (ret)
14089                         goto fail;
14090
14091                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14092
14093                 if (needs_modeset(new_crtc_state))
14094                         any_ms = true;
14095         }
14096
14097         if (any_ms && !check_digital_port_conflicts(state)) {
14098                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
14099                 ret = EINVAL;
14100                 goto fail;
14101         }
14102
14103         ret = drm_dp_mst_atomic_check(&state->base);
14104         if (ret)
14105                 goto fail;
14106
14107         any_ms |= state->cdclk.force_min_cdclk_changed;
14108
14109         ret = intel_atomic_check_planes(state, &any_ms);
14110         if (ret)
14111                 goto fail;
14112
14113         if (any_ms) {
14114                 ret = intel_modeset_checks(state);
14115                 if (ret)
14116                         goto fail;
14117         } else {
14118                 state->cdclk.logical = dev_priv->cdclk.logical;
14119         }
14120
14121         ret = intel_atomic_check_crtcs(state);
14122         if (ret)
14123                 goto fail;
14124
14125         intel_fbc_choose_crtc(dev_priv, state);
14126         ret = calc_watermark_data(state);
14127         if (ret)
14128                 goto fail;
14129
14130         ret = intel_bw_atomic_check(state);
14131         if (ret)
14132                 goto fail;
14133
14134         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14135                                             new_crtc_state, i) {
14136                 if (!needs_modeset(new_crtc_state) &&
14137                     !new_crtc_state->update_pipe)
14138                         continue;
14139
14140                 intel_dump_pipe_config(new_crtc_state, state,
14141                                        needs_modeset(new_crtc_state) ?
14142                                        "[modeset]" : "[fastset]");
14143         }
14144
14145         return 0;
14146
14147  fail:
14148         if (ret == -EDEADLK)
14149                 return ret;
14150
14151         /*
14152          * FIXME would probably be nice to know which crtc specifically
14153          * caused the failure, in cases where we can pinpoint it.
14154          */
14155         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14156                                             new_crtc_state, i)
14157                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14158
14159         return ret;
14160 }
14161
14162 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14163 {
14164         return drm_atomic_helper_prepare_planes(state->base.dev,
14165                                                 &state->base);
14166 }
14167
14168 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14169 {
14170         struct drm_device *dev = crtc->base.dev;
14171         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14172
14173         if (!vblank->max_vblank_count)
14174                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14175
14176         return crtc->base.funcs->get_vblank_counter(&crtc->base);
14177 }
14178
14179 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14180                                   struct intel_crtc_state *crtc_state)
14181 {
14182         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14183
14184         if (!IS_GEN(dev_priv, 2))
14185                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14186
14187         if (crtc_state->has_pch_encoder) {
14188                 enum pipe pch_transcoder =
14189                         intel_crtc_pch_transcoder(crtc);
14190
14191                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14192         }
14193 }
14194
14195 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14196                                const struct intel_crtc_state *new_crtc_state)
14197 {
14198         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14199         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14200
14201         /*
14202          * Update pipe size and adjust fitter if needed: the reason for this is
14203          * that in compute_mode_changes we check the native mode (not the pfit
14204          * mode) to see if we can flip rather than do a full mode set. In the
14205          * fastboot case, we'll flip, but if we don't update the pipesrc and
14206          * pfit state, we'll end up with a big fb scanned out into the wrong
14207          * sized surface.
14208          */
14209         intel_set_pipe_src_size(new_crtc_state);
14210
14211         /* on skylake this is done by detaching scalers */
14212         if (INTEL_GEN(dev_priv) >= 9) {
14213                 skl_detach_scalers(new_crtc_state);
14214
14215                 if (new_crtc_state->pch_pfit.enabled)
14216                         skylake_pfit_enable(new_crtc_state);
14217         } else if (HAS_PCH_SPLIT(dev_priv)) {
14218                 if (new_crtc_state->pch_pfit.enabled)
14219                         ironlake_pfit_enable(new_crtc_state);
14220                 else if (old_crtc_state->pch_pfit.enabled)
14221                         ironlake_pfit_disable(old_crtc_state);
14222         }
14223
14224         if (INTEL_GEN(dev_priv) >= 11)
14225                 icl_set_pipe_chicken(crtc);
14226 }
14227
14228 static void commit_pipe_config(struct intel_atomic_state *state,
14229                                struct intel_crtc_state *old_crtc_state,
14230                                struct intel_crtc_state *new_crtc_state)
14231 {
14232         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14233         bool modeset = needs_modeset(new_crtc_state);
14234
14235         /*
14236          * During modesets pipe configuration was programmed as the
14237          * CRTC was enabled.
14238          */
14239         if (!modeset) {
14240                 if (new_crtc_state->uapi.color_mgmt_changed ||
14241                     new_crtc_state->update_pipe)
14242                         intel_color_commit(new_crtc_state);
14243
14244                 if (INTEL_GEN(dev_priv) >= 9)
14245                         skl_detach_scalers(new_crtc_state);
14246
14247                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14248                         bdw_set_pipemisc(new_crtc_state);
14249
14250                 if (new_crtc_state->update_pipe)
14251                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
14252         }
14253
14254         if (dev_priv->display.atomic_update_watermarks)
14255                 dev_priv->display.atomic_update_watermarks(state,
14256                                                            new_crtc_state);
14257 }
14258
14259 static void intel_update_crtc(struct intel_crtc *crtc,
14260                               struct intel_atomic_state *state,
14261                               struct intel_crtc_state *old_crtc_state,
14262                               struct intel_crtc_state *new_crtc_state)
14263 {
14264         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14265         bool modeset = needs_modeset(new_crtc_state);
14266         struct intel_plane_state *new_plane_state =
14267                 intel_atomic_get_new_plane_state(state,
14268                                                  to_intel_plane(crtc->base.primary));
14269
14270         if (modeset) {
14271                 intel_crtc_update_active_timings(new_crtc_state);
14272
14273                 dev_priv->display.crtc_enable(new_crtc_state, state);
14274
14275                 /* vblanks work again, re-enable pipe CRC. */
14276                 intel_crtc_enable_pipe_crc(crtc);
14277         } else {
14278                 intel_pre_plane_update(old_crtc_state, new_crtc_state);
14279
14280                 if (new_crtc_state->update_pipe)
14281                         intel_encoders_update_pipe(crtc, new_crtc_state, state);
14282         }
14283
14284         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14285                 intel_fbc_disable(crtc);
14286         else if (new_plane_state)
14287                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14288
14289         /* Perform vblank evasion around commit operation */
14290         intel_pipe_update_start(new_crtc_state);
14291
14292         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14293
14294         if (INTEL_GEN(dev_priv) >= 9)
14295                 skl_update_planes_on_crtc(state, crtc);
14296         else
14297                 i9xx_update_planes_on_crtc(state, crtc);
14298
14299         intel_pipe_update_end(new_crtc_state);
14300
14301         /*
14302          * We usually enable FIFO underrun interrupts as part of the
14303          * CRTC enable sequence during modesets.  But when we inherit a
14304          * valid pipe configuration from the BIOS we need to take care
14305          * of enabling them on the CRTC's first fastset.
14306          */
14307         if (new_crtc_state->update_pipe && !modeset &&
14308             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14309                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14310 }
14311
14312 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
14313 {
14314         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
14315         enum transcoder slave_transcoder;
14316
14317         WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
14318
14319         slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
14320         return intel_get_crtc_for_pipe(dev_priv,
14321                                        (enum pipe)slave_transcoder);
14322 }
14323
14324 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14325                                           struct intel_crtc_state *old_crtc_state,
14326                                           struct intel_crtc_state *new_crtc_state,
14327                                           struct intel_crtc *crtc)
14328 {
14329         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14330
14331         intel_crtc_disable_planes(state, crtc);
14332
14333         /*
14334          * We need to disable pipe CRC before disabling the pipe,
14335          * or we race against vblank off.
14336          */
14337         intel_crtc_disable_pipe_crc(crtc);
14338
14339         dev_priv->display.crtc_disable(old_crtc_state, state);
14340         crtc->active = false;
14341         intel_fbc_disable(crtc);
14342         intel_disable_shared_dpll(old_crtc_state);
14343
14344         /*
14345          * Underruns don't always raise interrupts,
14346          * so check manually.
14347          */
14348         intel_check_cpu_fifo_underruns(dev_priv);
14349         intel_check_pch_fifo_underruns(dev_priv);
14350
14351         /* FIXME unify this for all platforms */
14352         if (!new_crtc_state->hw.active &&
14353             !HAS_GMCH(dev_priv) &&
14354             dev_priv->display.initial_watermarks)
14355                 dev_priv->display.initial_watermarks(state,
14356                                                      new_crtc_state);
14357 }
14358
14359 static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
14360                                                    struct intel_crtc *crtc,
14361                                                    struct intel_crtc_state *old_crtc_state,
14362                                                    struct intel_crtc_state *new_crtc_state)
14363 {
14364         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14365         struct intel_crtc_state *new_slave_crtc_state =
14366                 intel_atomic_get_new_crtc_state(state, slave_crtc);
14367         struct intel_crtc_state *old_slave_crtc_state =
14368                 intel_atomic_get_old_crtc_state(state, slave_crtc);
14369
14370         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14371                 !old_slave_crtc_state);
14372
14373         /* Disable Slave first */
14374         intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
14375         if (old_slave_crtc_state->hw.active)
14376                 intel_old_crtc_state_disables(state,
14377                                               old_slave_crtc_state,
14378                                               new_slave_crtc_state,
14379                                               slave_crtc);
14380
14381         /* Disable Master */
14382         intel_pre_plane_update(old_crtc_state, new_crtc_state);
14383         if (old_crtc_state->hw.active)
14384                 intel_old_crtc_state_disables(state,
14385                                               old_crtc_state,
14386                                               new_crtc_state,
14387                                               crtc);
14388 }
14389
14390 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14391 {
14392         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14393         struct intel_crtc *crtc;
14394         int i;
14395
14396         /*
14397          * Disable CRTC/pipes in reverse order because some features(MST in
14398          * TGL+) requires master and slave relationship between pipes, so it
14399          * should always pick the lowest pipe as master as it will be enabled
14400          * first and disable in the reverse order so the master will be the
14401          * last one to be disabled.
14402          */
14403         for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
14404                                                     new_crtc_state, i) {
14405                 if (!needs_modeset(new_crtc_state))
14406                         continue;
14407
14408                 /* In case of Transcoder port Sync master slave CRTCs can be
14409                  * assigned in any order and we need to make sure that
14410                  * slave CRTCs are disabled first and then master CRTC since
14411                  * Slave vblanks are masked till Master Vblanks.
14412                  */
14413                 if (is_trans_port_sync_mode(new_crtc_state)) {
14414                         if (is_trans_port_sync_master(new_crtc_state))
14415                                 intel_trans_port_sync_modeset_disables(state,
14416                                                                        crtc,
14417                                                                        old_crtc_state,
14418                                                                        new_crtc_state);
14419                         else
14420                                 continue;
14421                 } else {
14422                         intel_pre_plane_update(old_crtc_state, new_crtc_state);
14423
14424                         if (old_crtc_state->hw.active)
14425                                 intel_old_crtc_state_disables(state,
14426                                                               old_crtc_state,
14427                                                               new_crtc_state,
14428                                                               crtc);
14429                 }
14430         }
14431 }
14432
14433 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
14434 {
14435         struct intel_crtc *crtc;
14436         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14437         int i;
14438
14439         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14440                 if (!new_crtc_state->hw.active)
14441                         continue;
14442
14443                 intel_update_crtc(crtc, state, old_crtc_state,
14444                                   new_crtc_state);
14445         }
14446 }
14447
14448 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
14449                                               struct intel_atomic_state *state,
14450                                               struct intel_crtc_state *new_crtc_state)
14451 {
14452         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14453
14454         intel_crtc_update_active_timings(new_crtc_state);
14455         dev_priv->display.crtc_enable(new_crtc_state, state);
14456         intel_crtc_enable_pipe_crc(crtc);
14457 }
14458
14459 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
14460                                        struct intel_atomic_state *state)
14461 {
14462         struct drm_connector *uninitialized_var(conn);
14463         struct drm_connector_state *conn_state;
14464         struct intel_dp *intel_dp;
14465         int i;
14466
14467         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
14468                 if (conn_state->crtc == &crtc->base)
14469                         break;
14470         }
14471         intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
14472         intel_dp_stop_link_train(intel_dp);
14473 }
14474
14475 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
14476                                            struct intel_atomic_state *state)
14477 {
14478         struct intel_crtc_state *new_crtc_state =
14479                 intel_atomic_get_new_crtc_state(state, crtc);
14480         struct intel_crtc_state *old_crtc_state =
14481                 intel_atomic_get_old_crtc_state(state, crtc);
14482         struct intel_plane_state *new_plane_state =
14483                 intel_atomic_get_new_plane_state(state,
14484                                                  to_intel_plane(crtc->base.primary));
14485         bool modeset = needs_modeset(new_crtc_state);
14486
14487         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14488                 intel_fbc_disable(crtc);
14489         else if (new_plane_state)
14490                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14491
14492         /* Perform vblank evasion around commit operation */
14493         intel_pipe_update_start(new_crtc_state);
14494         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14495         skl_update_planes_on_crtc(state, crtc);
14496         intel_pipe_update_end(new_crtc_state);
14497
14498         /*
14499          * We usually enable FIFO underrun interrupts as part of the
14500          * CRTC enable sequence during modesets.  But when we inherit a
14501          * valid pipe configuration from the BIOS we need to take care
14502          * of enabling them on the CRTC's first fastset.
14503          */
14504         if (new_crtc_state->update_pipe && !modeset &&
14505             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14506                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14507 }
14508
14509 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
14510                                                struct intel_atomic_state *state,
14511                                                struct intel_crtc_state *old_crtc_state,
14512                                                struct intel_crtc_state *new_crtc_state)
14513 {
14514         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14515         struct intel_crtc_state *new_slave_crtc_state =
14516                 intel_atomic_get_new_crtc_state(state, slave_crtc);
14517         struct intel_crtc_state *old_slave_crtc_state =
14518                 intel_atomic_get_old_crtc_state(state, slave_crtc);
14519
14520         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14521                 !old_slave_crtc_state);
14522
14523         DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
14524                       crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
14525                       slave_crtc->base.name);
14526
14527         /* Enable seq for slave with with DP_TP_CTL left Idle until the
14528          * master is ready
14529          */
14530         intel_crtc_enable_trans_port_sync(slave_crtc,
14531                                           state,
14532                                           new_slave_crtc_state);
14533
14534         /* Enable seq for master with with DP_TP_CTL left Idle */
14535         intel_crtc_enable_trans_port_sync(crtc,
14536                                           state,
14537                                           new_crtc_state);
14538
14539         /* Set Slave's DP_TP_CTL to Normal */
14540         intel_set_dp_tp_ctl_normal(slave_crtc,
14541                                    state);
14542
14543         /* Set Master's DP_TP_CTL To Normal */
14544         usleep_range(200, 400);
14545         intel_set_dp_tp_ctl_normal(crtc,
14546                                    state);
14547
14548         /* Now do the post crtc enable for all master and slaves */
14549         intel_post_crtc_enable_updates(slave_crtc,
14550                                        state);
14551         intel_post_crtc_enable_updates(crtc,
14552                                        state);
14553 }
14554
14555 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
14556 {
14557         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14558         struct intel_crtc *crtc;
14559         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14560         unsigned int updated = 0;
14561         bool progress;
14562         int i;
14563         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
14564         u8 required_slices = state->wm_results.ddb.enabled_slices;
14565         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
14566
14567         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
14568                 /* ignore allocations for crtc's that have been turned off. */
14569                 if (new_crtc_state->hw.active)
14570                         entries[i] = old_crtc_state->wm.skl.ddb;
14571
14572         /* If 2nd DBuf slice required, enable it here */
14573         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
14574                 icl_dbuf_slices_update(dev_priv, required_slices);
14575
14576         /*
14577          * Whenever the number of active pipes changes, we need to make sure we
14578          * update the pipes in the right order so that their ddb allocations
14579          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
14580          * cause pipe underruns and other bad stuff.
14581          */
14582         do {
14583                 progress = false;
14584
14585                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14586                         enum pipe pipe = crtc->pipe;
14587                         bool vbl_wait = false;
14588                         bool modeset = needs_modeset(new_crtc_state);
14589
14590                         if (updated & BIT(crtc->pipe) || !new_crtc_state->hw.active)
14591                                 continue;
14592
14593                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
14594                                                         entries,
14595                                                         INTEL_NUM_PIPES(dev_priv), i))
14596                                 continue;
14597
14598                         updated |= BIT(pipe);
14599                         entries[i] = new_crtc_state->wm.skl.ddb;
14600
14601                         /*
14602                          * If this is an already active pipe, it's DDB changed,
14603                          * and this isn't the last pipe that needs updating
14604                          * then we need to wait for a vblank to pass for the
14605                          * new ddb allocation to take effect.
14606                          */
14607                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
14608                                                  &old_crtc_state->wm.skl.ddb) &&
14609                             !modeset &&
14610                             state->wm_results.dirty_pipes != updated)
14611                                 vbl_wait = true;
14612
14613                         if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
14614                                 if (is_trans_port_sync_master(new_crtc_state))
14615                                         intel_update_trans_port_sync_crtcs(crtc,
14616                                                                            state,
14617                                                                            old_crtc_state,
14618                                                                            new_crtc_state);
14619                                 else
14620                                         continue;
14621                         } else {
14622                                 intel_update_crtc(crtc, state, old_crtc_state,
14623                                                   new_crtc_state);
14624                         }
14625
14626                         if (vbl_wait)
14627                                 intel_wait_for_vblank(dev_priv, pipe);
14628
14629                         progress = true;
14630                 }
14631         } while (progress);
14632
14633         /* If 2nd DBuf slice is no more required disable it */
14634         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
14635                 icl_dbuf_slices_update(dev_priv, required_slices);
14636 }
14637
14638 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14639 {
14640         struct intel_atomic_state *state, *next;
14641         struct llist_node *freed;
14642
14643         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14644         llist_for_each_entry_safe(state, next, freed, freed)
14645                 drm_atomic_state_put(&state->base);
14646 }
14647
14648 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14649 {
14650         struct drm_i915_private *dev_priv =
14651                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14652
14653         intel_atomic_helper_free_state(dev_priv);
14654 }
14655
14656 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
14657 {
14658         struct wait_queue_entry wait_fence, wait_reset;
14659         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
14660
14661         init_wait_entry(&wait_fence, 0);
14662         init_wait_entry(&wait_reset, 0);
14663         for (;;) {
14664                 prepare_to_wait(&intel_state->commit_ready.wait,
14665                                 &wait_fence, TASK_UNINTERRUPTIBLE);
14666                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14667                                               I915_RESET_MODESET),
14668                                 &wait_reset, TASK_UNINTERRUPTIBLE);
14669
14670
14671                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
14672                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
14673                         break;
14674
14675                 schedule();
14676         }
14677         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
14678         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14679                                   I915_RESET_MODESET),
14680                     &wait_reset);
14681 }
14682
14683 static void intel_atomic_cleanup_work(struct work_struct *work)
14684 {
14685         struct drm_atomic_state *state =
14686                 container_of(work, struct drm_atomic_state, commit_work);
14687         struct drm_i915_private *i915 = to_i915(state->dev);
14688
14689         drm_atomic_helper_cleanup_planes(&i915->drm, state);
14690         drm_atomic_helper_commit_cleanup_done(state);
14691         drm_atomic_state_put(state);
14692
14693         intel_atomic_helper_free_state(i915);
14694 }
14695
14696 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
14697 {
14698         struct drm_device *dev = state->base.dev;
14699         struct drm_i915_private *dev_priv = to_i915(dev);
14700         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14701         struct intel_crtc *crtc;
14702         u64 put_domains[I915_MAX_PIPES] = {};
14703         intel_wakeref_t wakeref = 0;
14704         int i;
14705
14706         intel_atomic_commit_fence_wait(state);
14707
14708         drm_atomic_helper_wait_for_dependencies(&state->base);
14709
14710         if (state->modeset)
14711                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
14712
14713         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14714                                             new_crtc_state, i) {
14715                 if (needs_modeset(new_crtc_state) ||
14716                     new_crtc_state->update_pipe) {
14717
14718                         put_domains[crtc->pipe] =
14719                                 modeset_get_crtc_power_domains(new_crtc_state);
14720                 }
14721         }
14722
14723         intel_commit_modeset_disables(state);
14724
14725         /* FIXME: Eventually get rid of our crtc->config pointer */
14726         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14727                 crtc->config = new_crtc_state;
14728
14729         if (state->modeset) {
14730                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
14731
14732                 intel_set_cdclk_pre_plane_update(dev_priv,
14733                                                  &state->cdclk.actual,
14734                                                  &dev_priv->cdclk.actual,
14735                                                  state->cdclk.pipe);
14736
14737                 /*
14738                  * SKL workaround: bspec recommends we disable the SAGV when we
14739                  * have more then one pipe enabled
14740                  */
14741                 if (!intel_can_enable_sagv(state))
14742                         intel_disable_sagv(dev_priv);
14743
14744                 intel_modeset_verify_disabled(dev_priv, state);
14745         }
14746
14747         /* Complete the events for pipes that have now been disabled */
14748         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14749                 bool modeset = needs_modeset(new_crtc_state);
14750
14751                 /* Complete events for now disable pipes here. */
14752                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
14753                         spin_lock_irq(&dev->event_lock);
14754                         drm_crtc_send_vblank_event(&crtc->base,
14755                                                    new_crtc_state->uapi.event);
14756                         spin_unlock_irq(&dev->event_lock);
14757
14758                         new_crtc_state->uapi.event = NULL;
14759                 }
14760         }
14761
14762         if (state->modeset)
14763                 intel_encoders_update_prepare(state);
14764
14765         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14766         dev_priv->display.commit_modeset_enables(state);
14767
14768         if (state->modeset) {
14769                 intel_encoders_update_complete(state);
14770
14771                 intel_set_cdclk_post_plane_update(dev_priv,
14772                                                   &state->cdclk.actual,
14773                                                   &dev_priv->cdclk.actual,
14774                                                   state->cdclk.pipe);
14775         }
14776
14777         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14778          * already, but still need the state for the delayed optimization. To
14779          * fix this:
14780          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14781          * - schedule that vblank worker _before_ calling hw_done
14782          * - at the start of commit_tail, cancel it _synchrously
14783          * - switch over to the vblank wait helper in the core after that since
14784          *   we don't need out special handling any more.
14785          */
14786         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14787
14788         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14789                 if (new_crtc_state->hw.active &&
14790                     !needs_modeset(new_crtc_state) &&
14791                     (new_crtc_state->uapi.color_mgmt_changed ||
14792                      new_crtc_state->update_pipe))
14793                         intel_color_load_luts(new_crtc_state);
14794         }
14795
14796         /*
14797          * Now that the vblank has passed, we can go ahead and program the
14798          * optimal watermarks on platforms that need two-step watermark
14799          * programming.
14800          *
14801          * TODO: Move this (and other cleanup) to an async worker eventually.
14802          */
14803         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14804                 if (dev_priv->display.optimize_watermarks)
14805                         dev_priv->display.optimize_watermarks(state,
14806                                                               new_crtc_state);
14807         }
14808
14809         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14810                 intel_post_plane_update(old_crtc_state);
14811
14812                 if (put_domains[i])
14813                         modeset_put_power_domains(dev_priv, put_domains[i]);
14814
14815                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14816         }
14817
14818         if (state->modeset)
14819                 intel_verify_planes(state);
14820
14821         if (state->modeset && intel_can_enable_sagv(state))
14822                 intel_enable_sagv(dev_priv);
14823
14824         drm_atomic_helper_commit_hw_done(&state->base);
14825
14826         if (state->modeset) {
14827                 /* As one of the primary mmio accessors, KMS has a high
14828                  * likelihood of triggering bugs in unclaimed access. After we
14829                  * finish modesetting, see if an error has been flagged, and if
14830                  * so enable debugging for the next modeset - and hope we catch
14831                  * the culprit.
14832                  */
14833                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
14834                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
14835         }
14836         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14837
14838         /*
14839          * Defer the cleanup of the old state to a separate worker to not
14840          * impede the current task (userspace for blocking modesets) that
14841          * are executed inline. For out-of-line asynchronous modesets/flips,
14842          * deferring to a new worker seems overkill, but we would place a
14843          * schedule point (cond_resched()) here anyway to keep latencies
14844          * down.
14845          */
14846         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
14847         queue_work(system_highpri_wq, &state->base.commit_work);
14848 }
14849
14850 static void intel_atomic_commit_work(struct work_struct *work)
14851 {
14852         struct intel_atomic_state *state =
14853                 container_of(work, struct intel_atomic_state, base.commit_work);
14854
14855         intel_atomic_commit_tail(state);
14856 }
14857
14858 static int __i915_sw_fence_call
14859 intel_atomic_commit_ready(struct i915_sw_fence *fence,
14860                           enum i915_sw_fence_notify notify)
14861 {
14862         struct intel_atomic_state *state =
14863                 container_of(fence, struct intel_atomic_state, commit_ready);
14864
14865         switch (notify) {
14866         case FENCE_COMPLETE:
14867                 /* we do blocking waits in the worker, nothing to do here */
14868                 break;
14869         case FENCE_FREE:
14870                 {
14871                         struct intel_atomic_helper *helper =
14872                                 &to_i915(state->base.dev)->atomic_helper;
14873
14874                         if (llist_add(&state->freed, &helper->free_list))
14875                                 schedule_work(&helper->free_work);
14876                         break;
14877                 }
14878         }
14879
14880         return NOTIFY_DONE;
14881 }
14882
14883 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
14884 {
14885         struct intel_plane_state *old_plane_state, *new_plane_state;
14886         struct intel_plane *plane;
14887         int i;
14888
14889         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14890                                              new_plane_state, i)
14891                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
14892                                         to_intel_frontbuffer(new_plane_state->hw.fb),
14893                                         plane->frontbuffer_bit);
14894 }
14895
14896 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
14897 {
14898         struct intel_crtc *crtc;
14899
14900         for_each_intel_crtc(&dev_priv->drm, crtc)
14901                 drm_modeset_lock_assert_held(&crtc->base.mutex);
14902 }
14903
14904 static int intel_atomic_commit(struct drm_device *dev,
14905                                struct drm_atomic_state *_state,
14906                                bool nonblock)
14907 {
14908         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14909         struct drm_i915_private *dev_priv = to_i915(dev);
14910         int ret = 0;
14911
14912         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
14913
14914         drm_atomic_state_get(&state->base);
14915         i915_sw_fence_init(&state->commit_ready,
14916                            intel_atomic_commit_ready);
14917
14918         /*
14919          * The intel_legacy_cursor_update() fast path takes care
14920          * of avoiding the vblank waits for simple cursor
14921          * movement and flips. For cursor on/off and size changes,
14922          * we want to perform the vblank waits so that watermark
14923          * updates happen during the correct frames. Gen9+ have
14924          * double buffered watermarks and so shouldn't need this.
14925          *
14926          * Unset state->legacy_cursor_update before the call to
14927          * drm_atomic_helper_setup_commit() because otherwise
14928          * drm_atomic_helper_wait_for_flip_done() is a noop and
14929          * we get FIFO underruns because we didn't wait
14930          * for vblank.
14931          *
14932          * FIXME doing watermarks and fb cleanup from a vblank worker
14933          * (assuming we had any) would solve these problems.
14934          */
14935         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
14936                 struct intel_crtc_state *new_crtc_state;
14937                 struct intel_crtc *crtc;
14938                 int i;
14939
14940                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14941                         if (new_crtc_state->wm.need_postvbl_update ||
14942                             new_crtc_state->update_wm_post)
14943                                 state->base.legacy_cursor_update = false;
14944         }
14945
14946         ret = intel_atomic_prepare_commit(state);
14947         if (ret) {
14948                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
14949                 i915_sw_fence_commit(&state->commit_ready);
14950                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14951                 return ret;
14952         }
14953
14954         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
14955         if (!ret)
14956                 ret = drm_atomic_helper_swap_state(&state->base, true);
14957
14958         if (ret) {
14959                 i915_sw_fence_commit(&state->commit_ready);
14960
14961                 drm_atomic_helper_cleanup_planes(dev, &state->base);
14962                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14963                 return ret;
14964         }
14965         dev_priv->wm.distrust_bios_wm = false;
14966         intel_shared_dpll_swap_state(state);
14967         intel_atomic_track_fbs(state);
14968
14969         if (state->global_state_changed) {
14970                 assert_global_state_locked(dev_priv);
14971
14972                 memcpy(dev_priv->min_cdclk, state->min_cdclk,
14973                        sizeof(state->min_cdclk));
14974                 memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
14975                        sizeof(state->min_voltage_level));
14976                 dev_priv->active_pipes = state->active_pipes;
14977                 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
14978
14979                 intel_cdclk_swap_state(state);
14980         }
14981
14982         drm_atomic_state_get(&state->base);
14983         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
14984
14985         i915_sw_fence_commit(&state->commit_ready);
14986         if (nonblock && state->modeset) {
14987                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
14988         } else if (nonblock) {
14989                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
14990         } else {
14991                 if (state->modeset)
14992                         flush_workqueue(dev_priv->modeset_wq);
14993                 intel_atomic_commit_tail(state);
14994         }
14995
14996         return 0;
14997 }
14998
14999 struct wait_rps_boost {
15000         struct wait_queue_entry wait;
15001
15002         struct drm_crtc *crtc;
15003         struct i915_request *request;
15004 };
15005
15006 static int do_rps_boost(struct wait_queue_entry *_wait,
15007                         unsigned mode, int sync, void *key)
15008 {
15009         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15010         struct i915_request *rq = wait->request;
15011
15012         /*
15013          * If we missed the vblank, but the request is already running it
15014          * is reasonable to assume that it will complete before the next
15015          * vblank without our intervention, so leave RPS alone.
15016          */
15017         if (!i915_request_started(rq))
15018                 intel_rps_boost(rq);
15019         i915_request_put(rq);
15020
15021         drm_crtc_vblank_put(wait->crtc);
15022
15023         list_del(&wait->wait.entry);
15024         kfree(wait);
15025         return 1;
15026 }
15027
15028 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15029                                        struct dma_fence *fence)
15030 {
15031         struct wait_rps_boost *wait;
15032
15033         if (!dma_fence_is_i915(fence))
15034                 return;
15035
15036         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15037                 return;
15038
15039         if (drm_crtc_vblank_get(crtc))
15040                 return;
15041
15042         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15043         if (!wait) {
15044                 drm_crtc_vblank_put(crtc);
15045                 return;
15046         }
15047
15048         wait->request = to_request(dma_fence_get(fence));
15049         wait->crtc = crtc;
15050
15051         wait->wait.func = do_rps_boost;
15052         wait->wait.flags = 0;
15053
15054         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15055 }
15056
15057 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15058 {
15059         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15060         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15061         struct drm_framebuffer *fb = plane_state->hw.fb;
15062         struct i915_vma *vma;
15063
15064         if (plane->id == PLANE_CURSOR &&
15065             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15066                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15067                 const int align = intel_cursor_alignment(dev_priv);
15068                 int err;
15069
15070                 err = i915_gem_object_attach_phys(obj, align);
15071                 if (err)
15072                         return err;
15073         }
15074
15075         vma = intel_pin_and_fence_fb_obj(fb,
15076                                          &plane_state->view,
15077                                          intel_plane_uses_fence(plane_state),
15078                                          &plane_state->flags);
15079         if (IS_ERR(vma))
15080                 return PTR_ERR(vma);
15081
15082         plane_state->vma = vma;
15083
15084         return 0;
15085 }
15086
15087 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15088 {
15089         struct i915_vma *vma;
15090
15091         vma = fetch_and_zero(&old_plane_state->vma);
15092         if (vma)
15093                 intel_unpin_fb_vma(vma, old_plane_state->flags);
15094 }
15095
15096 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15097 {
15098         struct i915_sched_attr attr = {
15099                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15100         };
15101
15102         i915_gem_object_wait_priority(obj, 0, &attr);
15103 }
15104
15105 /**
15106  * intel_prepare_plane_fb - Prepare fb for usage on plane
15107  * @plane: drm plane to prepare for
15108  * @_new_plane_state: the plane state being prepared
15109  *
15110  * Prepares a framebuffer for usage on a display plane.  Generally this
15111  * involves pinning the underlying object and updating the frontbuffer tracking
15112  * bits.  Some older platforms need special physical address handling for
15113  * cursor planes.
15114  *
15115  * Returns 0 on success, negative error code on failure.
15116  */
15117 int
15118 intel_prepare_plane_fb(struct drm_plane *plane,
15119                        struct drm_plane_state *_new_plane_state)
15120 {
15121         struct intel_plane_state *new_plane_state =
15122                 to_intel_plane_state(_new_plane_state);
15123         struct intel_atomic_state *intel_state =
15124                 to_intel_atomic_state(new_plane_state->uapi.state);
15125         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15126         struct drm_framebuffer *fb = new_plane_state->hw.fb;
15127         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15128         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
15129         int ret;
15130
15131         if (old_obj) {
15132                 struct intel_crtc_state *crtc_state =
15133                         intel_atomic_get_new_crtc_state(intel_state,
15134                                                         to_intel_crtc(plane->state->crtc));
15135
15136                 /* Big Hammer, we also need to ensure that any pending
15137                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15138                  * current scanout is retired before unpinning the old
15139                  * framebuffer. Note that we rely on userspace rendering
15140                  * into the buffer attached to the pipe they are waiting
15141                  * on. If not, userspace generates a GPU hang with IPEHR
15142                  * point to the MI_WAIT_FOR_EVENT.
15143                  *
15144                  * This should only fail upon a hung GPU, in which case we
15145                  * can safely continue.
15146                  */
15147                 if (needs_modeset(crtc_state)) {
15148                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15149                                                               old_obj->base.resv, NULL,
15150                                                               false, 0,
15151                                                               GFP_KERNEL);
15152                         if (ret < 0)
15153                                 return ret;
15154                 }
15155         }
15156
15157         if (new_plane_state->uapi.fence) { /* explicit fencing */
15158                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
15159                                                     new_plane_state->uapi.fence,
15160                                                     I915_FENCE_TIMEOUT,
15161                                                     GFP_KERNEL);
15162                 if (ret < 0)
15163                         return ret;
15164         }
15165
15166         if (!obj)
15167                 return 0;
15168
15169         ret = i915_gem_object_pin_pages(obj);
15170         if (ret)
15171                 return ret;
15172
15173         ret = intel_plane_pin_fb(new_plane_state);
15174
15175         i915_gem_object_unpin_pages(obj);
15176         if (ret)
15177                 return ret;
15178
15179         fb_obj_bump_render_priority(obj);
15180         intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
15181
15182         if (!new_plane_state->uapi.fence) { /* implicit fencing */
15183                 struct dma_fence *fence;
15184
15185                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15186                                                       obj->base.resv, NULL,
15187                                                       false, I915_FENCE_TIMEOUT,
15188                                                       GFP_KERNEL);
15189                 if (ret < 0)
15190                         return ret;
15191
15192                 fence = dma_resv_get_excl_rcu(obj->base.resv);
15193                 if (fence) {
15194                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15195                                                    fence);
15196                         dma_fence_put(fence);
15197                 }
15198         } else {
15199                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15200                                            new_plane_state->uapi.fence);
15201         }
15202
15203         /*
15204          * We declare pageflips to be interactive and so merit a small bias
15205          * towards upclocking to deliver the frame on time. By only changing
15206          * the RPS thresholds to sample more regularly and aim for higher
15207          * clocks we can hopefully deliver low power workloads (like kodi)
15208          * that are not quite steady state without resorting to forcing
15209          * maximum clocks following a vblank miss (see do_rps_boost()).
15210          */
15211         if (!intel_state->rps_interactive) {
15212                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15213                 intel_state->rps_interactive = true;
15214         }
15215
15216         return 0;
15217 }
15218
15219 /**
15220  * intel_cleanup_plane_fb - Cleans up an fb after plane use
15221  * @plane: drm plane to clean up for
15222  * @_old_plane_state: the state from the previous modeset
15223  *
15224  * Cleans up a framebuffer that has just been removed from a plane.
15225  */
15226 void
15227 intel_cleanup_plane_fb(struct drm_plane *plane,
15228                        struct drm_plane_state *_old_plane_state)
15229 {
15230         struct intel_plane_state *old_plane_state =
15231                 to_intel_plane_state(_old_plane_state);
15232         struct intel_atomic_state *intel_state =
15233                 to_intel_atomic_state(old_plane_state->uapi.state);
15234         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15235
15236         if (intel_state->rps_interactive) {
15237                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15238                 intel_state->rps_interactive = false;
15239         }
15240
15241         /* Should only be called after a successful intel_prepare_plane_fb()! */
15242         intel_plane_unpin_fb(old_plane_state);
15243 }
15244
15245 /**
15246  * intel_plane_destroy - destroy a plane
15247  * @plane: plane to destroy
15248  *
15249  * Common destruction function for all types of planes (primary, cursor,
15250  * sprite).
15251  */
15252 void intel_plane_destroy(struct drm_plane *plane)
15253 {
15254         drm_plane_cleanup(plane);
15255         kfree(to_intel_plane(plane));
15256 }
15257
15258 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15259                                             u32 format, u64 modifier)
15260 {
15261         switch (modifier) {
15262         case DRM_FORMAT_MOD_LINEAR:
15263         case I915_FORMAT_MOD_X_TILED:
15264                 break;
15265         default:
15266                 return false;
15267         }
15268
15269         switch (format) {
15270         case DRM_FORMAT_C8:
15271         case DRM_FORMAT_RGB565:
15272         case DRM_FORMAT_XRGB1555:
15273         case DRM_FORMAT_XRGB8888:
15274                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15275                         modifier == I915_FORMAT_MOD_X_TILED;
15276         default:
15277                 return false;
15278         }
15279 }
15280
15281 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
15282                                             u32 format, u64 modifier)
15283 {
15284         switch (modifier) {
15285         case DRM_FORMAT_MOD_LINEAR:
15286         case I915_FORMAT_MOD_X_TILED:
15287                 break;
15288         default:
15289                 return false;
15290         }
15291
15292         switch (format) {
15293         case DRM_FORMAT_C8:
15294         case DRM_FORMAT_RGB565:
15295         case DRM_FORMAT_XRGB8888:
15296         case DRM_FORMAT_XBGR8888:
15297         case DRM_FORMAT_XRGB2101010:
15298         case DRM_FORMAT_XBGR2101010:
15299         case DRM_FORMAT_XBGR16161616F:
15300                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15301                         modifier == I915_FORMAT_MOD_X_TILED;
15302         default:
15303                 return false;
15304         }
15305 }
15306
15307 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
15308                                               u32 format, u64 modifier)
15309 {
15310         return modifier == DRM_FORMAT_MOD_LINEAR &&
15311                 format == DRM_FORMAT_ARGB8888;
15312 }
15313
15314 static const struct drm_plane_funcs i965_plane_funcs = {
15315         .update_plane = drm_atomic_helper_update_plane,
15316         .disable_plane = drm_atomic_helper_disable_plane,
15317         .destroy = intel_plane_destroy,
15318         .atomic_duplicate_state = intel_plane_duplicate_state,
15319         .atomic_destroy_state = intel_plane_destroy_state,
15320         .format_mod_supported = i965_plane_format_mod_supported,
15321 };
15322
15323 static const struct drm_plane_funcs i8xx_plane_funcs = {
15324         .update_plane = drm_atomic_helper_update_plane,
15325         .disable_plane = drm_atomic_helper_disable_plane,
15326         .destroy = intel_plane_destroy,
15327         .atomic_duplicate_state = intel_plane_duplicate_state,
15328         .atomic_destroy_state = intel_plane_destroy_state,
15329         .format_mod_supported = i8xx_plane_format_mod_supported,
15330 };
15331
15332 static int
15333 intel_legacy_cursor_update(struct drm_plane *_plane,
15334                            struct drm_crtc *_crtc,
15335                            struct drm_framebuffer *fb,
15336                            int crtc_x, int crtc_y,
15337                            unsigned int crtc_w, unsigned int crtc_h,
15338                            u32 src_x, u32 src_y,
15339                            u32 src_w, u32 src_h,
15340                            struct drm_modeset_acquire_ctx *ctx)
15341 {
15342         struct intel_plane *plane = to_intel_plane(_plane);
15343         struct intel_crtc *crtc = to_intel_crtc(_crtc);
15344         struct intel_plane_state *old_plane_state =
15345                 to_intel_plane_state(plane->base.state);
15346         struct intel_plane_state *new_plane_state;
15347         struct intel_crtc_state *crtc_state =
15348                 to_intel_crtc_state(crtc->base.state);
15349         struct intel_crtc_state *new_crtc_state;
15350         int ret;
15351
15352         /*
15353          * When crtc is inactive or there is a modeset pending,
15354          * wait for it to complete in the slowpath
15355          */
15356         if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
15357             crtc_state->update_pipe)
15358                 goto slow;
15359
15360         /*
15361          * Don't do an async update if there is an outstanding commit modifying
15362          * the plane.  This prevents our async update's changes from getting
15363          * overridden by a previous synchronous update's state.
15364          */
15365         if (old_plane_state->uapi.commit &&
15366             !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
15367                 goto slow;
15368
15369         /*
15370          * If any parameters change that may affect watermarks,
15371          * take the slowpath. Only changing fb or position should be
15372          * in the fastpath.
15373          */
15374         if (old_plane_state->uapi.crtc != &crtc->base ||
15375             old_plane_state->uapi.src_w != src_w ||
15376             old_plane_state->uapi.src_h != src_h ||
15377             old_plane_state->uapi.crtc_w != crtc_w ||
15378             old_plane_state->uapi.crtc_h != crtc_h ||
15379             !old_plane_state->uapi.fb != !fb)
15380                 goto slow;
15381
15382         new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
15383         if (!new_plane_state)
15384                 return -ENOMEM;
15385
15386         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
15387         if (!new_crtc_state) {
15388                 ret = -ENOMEM;
15389                 goto out_free;
15390         }
15391
15392         drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
15393
15394         new_plane_state->uapi.src_x = src_x;
15395         new_plane_state->uapi.src_y = src_y;
15396         new_plane_state->uapi.src_w = src_w;
15397         new_plane_state->uapi.src_h = src_h;
15398         new_plane_state->uapi.crtc_x = crtc_x;
15399         new_plane_state->uapi.crtc_y = crtc_y;
15400         new_plane_state->uapi.crtc_w = crtc_w;
15401         new_plane_state->uapi.crtc_h = crtc_h;
15402
15403         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
15404                                                   old_plane_state, new_plane_state);
15405         if (ret)
15406                 goto out_free;
15407
15408         ret = intel_plane_pin_fb(new_plane_state);
15409         if (ret)
15410                 goto out_free;
15411
15412         intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
15413                                 ORIGIN_FLIP);
15414         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15415                                 to_intel_frontbuffer(new_plane_state->hw.fb),
15416                                 plane->frontbuffer_bit);
15417
15418         /* Swap plane state */
15419         plane->base.state = &new_plane_state->uapi;
15420
15421         /*
15422          * We cannot swap crtc_state as it may be in use by an atomic commit or
15423          * page flip that's running simultaneously. If we swap crtc_state and
15424          * destroy the old state, we will cause a use-after-free there.
15425          *
15426          * Only update active_planes, which is needed for our internal
15427          * bookkeeping. Either value will do the right thing when updating
15428          * planes atomically. If the cursor was part of the atomic update then
15429          * we would have taken the slowpath.
15430          */
15431         crtc_state->active_planes = new_crtc_state->active_planes;
15432
15433         if (new_plane_state->uapi.visible)
15434                 intel_update_plane(plane, crtc_state, new_plane_state);
15435         else
15436                 intel_disable_plane(plane, crtc_state);
15437
15438         intel_plane_unpin_fb(old_plane_state);
15439
15440 out_free:
15441         if (new_crtc_state)
15442                 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
15443         if (ret)
15444                 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
15445         else
15446                 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
15447         return ret;
15448
15449 slow:
15450         return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
15451                                               crtc_x, crtc_y, crtc_w, crtc_h,
15452                                               src_x, src_y, src_w, src_h, ctx);
15453 }
15454
15455 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
15456         .update_plane = intel_legacy_cursor_update,
15457         .disable_plane = drm_atomic_helper_disable_plane,
15458         .destroy = intel_plane_destroy,
15459         .atomic_duplicate_state = intel_plane_duplicate_state,
15460         .atomic_destroy_state = intel_plane_destroy_state,
15461         .format_mod_supported = intel_cursor_format_mod_supported,
15462 };
15463
15464 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
15465                                enum i9xx_plane_id i9xx_plane)
15466 {
15467         if (!HAS_FBC(dev_priv))
15468                 return false;
15469
15470         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15471                 return i9xx_plane == PLANE_A; /* tied to pipe A */
15472         else if (IS_IVYBRIDGE(dev_priv))
15473                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
15474                         i9xx_plane == PLANE_C;
15475         else if (INTEL_GEN(dev_priv) >= 4)
15476                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
15477         else
15478                 return i9xx_plane == PLANE_A;
15479 }
15480
15481 static struct intel_plane *
15482 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
15483 {
15484         struct intel_plane *plane;
15485         const struct drm_plane_funcs *plane_funcs;
15486         unsigned int supported_rotations;
15487         unsigned int possible_crtcs;
15488         const u64 *modifiers;
15489         const u32 *formats;
15490         int num_formats;
15491         int ret, zpos;
15492
15493         if (INTEL_GEN(dev_priv) >= 9)
15494                 return skl_universal_plane_create(dev_priv, pipe,
15495                                                   PLANE_PRIMARY);
15496
15497         plane = intel_plane_alloc();
15498         if (IS_ERR(plane))
15499                 return plane;
15500
15501         plane->pipe = pipe;
15502         /*
15503          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
15504          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
15505          */
15506         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
15507                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
15508         else
15509                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
15510         plane->id = PLANE_PRIMARY;
15511         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
15512
15513         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
15514         if (plane->has_fbc) {
15515                 struct intel_fbc *fbc = &dev_priv->fbc;
15516
15517                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
15518         }
15519
15520         if (INTEL_GEN(dev_priv) >= 4) {
15521                 /*
15522                  * WaFP16GammaEnabling:ivb
15523                  * "Workaround : When using the 64-bit format, the plane
15524                  *  output on each color channel has one quarter amplitude.
15525                  *  It can be brought up to full amplitude by using pipe
15526                  *  gamma correction or pipe color space conversion to
15527                  *  multiply the plane output by four."
15528                  *
15529                  * There is no dedicated plane gamma for the primary plane,
15530                  * and using the pipe gamma/csc could conflict with other
15531                  * planes, so we choose not to expose fp16 on IVB primary
15532                  * planes. HSW primary planes no longer have this problem.
15533                  */
15534                 if (IS_IVYBRIDGE(dev_priv)) {
15535                         formats = ivb_primary_formats;
15536                         num_formats = ARRAY_SIZE(ivb_primary_formats);
15537                 } else {
15538                         formats = i965_primary_formats;
15539                         num_formats = ARRAY_SIZE(i965_primary_formats);
15540                 }
15541                 modifiers = i9xx_format_modifiers;
15542
15543                 plane->max_stride = i9xx_plane_max_stride;
15544                 plane->update_plane = i9xx_update_plane;
15545                 plane->disable_plane = i9xx_disable_plane;
15546                 plane->get_hw_state = i9xx_plane_get_hw_state;
15547                 plane->check_plane = i9xx_plane_check;
15548
15549                 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15550                         plane->min_cdclk = hsw_plane_min_cdclk;
15551                 else if (IS_IVYBRIDGE(dev_priv))
15552                         plane->min_cdclk = ivb_plane_min_cdclk;
15553                 else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv))
15554                         plane->min_cdclk = vlv_plane_min_cdclk;
15555                 else
15556                         plane->min_cdclk = i9xx_plane_min_cdclk;
15557
15558                 plane_funcs = &i965_plane_funcs;
15559         } else {
15560                 formats = i8xx_primary_formats;
15561                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
15562                 modifiers = i9xx_format_modifiers;
15563
15564                 plane->max_stride = i9xx_plane_max_stride;
15565                 plane->update_plane = i9xx_update_plane;
15566                 plane->disable_plane = i9xx_disable_plane;
15567                 plane->get_hw_state = i9xx_plane_get_hw_state;
15568                 plane->check_plane = i9xx_plane_check;
15569                 plane->min_cdclk = i9xx_plane_min_cdclk;
15570
15571                 plane_funcs = &i8xx_plane_funcs;
15572         }
15573
15574         possible_crtcs = BIT(pipe);
15575
15576         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
15577                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15578                                                possible_crtcs, plane_funcs,
15579                                                formats, num_formats, modifiers,
15580                                                DRM_PLANE_TYPE_PRIMARY,
15581                                                "primary %c", pipe_name(pipe));
15582         else
15583                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15584                                                possible_crtcs, plane_funcs,
15585                                                formats, num_formats, modifiers,
15586                                                DRM_PLANE_TYPE_PRIMARY,
15587                                                "plane %c",
15588                                                plane_name(plane->i9xx_plane));
15589         if (ret)
15590                 goto fail;
15591
15592         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
15593                 supported_rotations =
15594                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
15595                         DRM_MODE_REFLECT_X;
15596         } else if (INTEL_GEN(dev_priv) >= 4) {
15597                 supported_rotations =
15598                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
15599         } else {
15600                 supported_rotations = DRM_MODE_ROTATE_0;
15601         }
15602
15603         if (INTEL_GEN(dev_priv) >= 4)
15604                 drm_plane_create_rotation_property(&plane->base,
15605                                                    DRM_MODE_ROTATE_0,
15606                                                    supported_rotations);
15607
15608         zpos = 0;
15609         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
15610
15611         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
15612
15613         return plane;
15614
15615 fail:
15616         intel_plane_free(plane);
15617
15618         return ERR_PTR(ret);
15619 }
15620
15621 static struct intel_plane *
15622 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
15623                           enum pipe pipe)
15624 {
15625         unsigned int possible_crtcs;
15626         struct intel_plane *cursor;
15627         int ret, zpos;
15628
15629         cursor = intel_plane_alloc();
15630         if (IS_ERR(cursor))
15631                 return cursor;
15632
15633         cursor->pipe = pipe;
15634         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
15635         cursor->id = PLANE_CURSOR;
15636         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
15637
15638         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15639                 cursor->max_stride = i845_cursor_max_stride;
15640                 cursor->update_plane = i845_update_cursor;
15641                 cursor->disable_plane = i845_disable_cursor;
15642                 cursor->get_hw_state = i845_cursor_get_hw_state;
15643                 cursor->check_plane = i845_check_cursor;
15644         } else {
15645                 cursor->max_stride = i9xx_cursor_max_stride;
15646                 cursor->update_plane = i9xx_update_cursor;
15647                 cursor->disable_plane = i9xx_disable_cursor;
15648                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
15649                 cursor->check_plane = i9xx_check_cursor;
15650         }
15651
15652         cursor->cursor.base = ~0;
15653         cursor->cursor.cntl = ~0;
15654
15655         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
15656                 cursor->cursor.size = ~0;
15657
15658         possible_crtcs = BIT(pipe);
15659
15660         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
15661                                        possible_crtcs, &intel_cursor_plane_funcs,
15662                                        intel_cursor_formats,
15663                                        ARRAY_SIZE(intel_cursor_formats),
15664                                        cursor_format_modifiers,
15665                                        DRM_PLANE_TYPE_CURSOR,
15666                                        "cursor %c", pipe_name(pipe));
15667         if (ret)
15668                 goto fail;
15669
15670         if (INTEL_GEN(dev_priv) >= 4)
15671                 drm_plane_create_rotation_property(&cursor->base,
15672                                                    DRM_MODE_ROTATE_0,
15673                                                    DRM_MODE_ROTATE_0 |
15674                                                    DRM_MODE_ROTATE_180);
15675
15676         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
15677         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
15678
15679         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15680
15681         return cursor;
15682
15683 fail:
15684         intel_plane_free(cursor);
15685
15686         return ERR_PTR(ret);
15687 }
15688
15689 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
15690                                     struct intel_crtc_state *crtc_state)
15691 {
15692         struct intel_crtc_scaler_state *scaler_state =
15693                 &crtc_state->scaler_state;
15694         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15695         int i;
15696
15697         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
15698         if (!crtc->num_scalers)
15699                 return;
15700
15701         for (i = 0; i < crtc->num_scalers; i++) {
15702                 struct intel_scaler *scaler = &scaler_state->scalers[i];
15703
15704                 scaler->in_use = 0;
15705                 scaler->mode = 0;
15706         }
15707
15708         scaler_state->scaler_id = -1;
15709 }
15710
15711 #define INTEL_CRTC_FUNCS \
15712         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
15713         .set_config = drm_atomic_helper_set_config, \
15714         .destroy = intel_crtc_destroy, \
15715         .page_flip = drm_atomic_helper_page_flip, \
15716         .atomic_duplicate_state = intel_crtc_duplicate_state, \
15717         .atomic_destroy_state = intel_crtc_destroy_state, \
15718         .set_crc_source = intel_crtc_set_crc_source, \
15719         .verify_crc_source = intel_crtc_verify_crc_source, \
15720         .get_crc_sources = intel_crtc_get_crc_sources
15721
15722 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15723         INTEL_CRTC_FUNCS,
15724
15725         .get_vblank_counter = g4x_get_vblank_counter,
15726         .enable_vblank = bdw_enable_vblank,
15727         .disable_vblank = bdw_disable_vblank,
15728 };
15729
15730 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15731         INTEL_CRTC_FUNCS,
15732
15733         .get_vblank_counter = g4x_get_vblank_counter,
15734         .enable_vblank = ilk_enable_vblank,
15735         .disable_vblank = ilk_disable_vblank,
15736 };
15737
15738 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15739         INTEL_CRTC_FUNCS,
15740
15741         .get_vblank_counter = g4x_get_vblank_counter,
15742         .enable_vblank = i965_enable_vblank,
15743         .disable_vblank = i965_disable_vblank,
15744 };
15745
15746 static const struct drm_crtc_funcs i965_crtc_funcs = {
15747         INTEL_CRTC_FUNCS,
15748
15749         .get_vblank_counter = i915_get_vblank_counter,
15750         .enable_vblank = i965_enable_vblank,
15751         .disable_vblank = i965_disable_vblank,
15752 };
15753
15754 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15755         INTEL_CRTC_FUNCS,
15756
15757         .get_vblank_counter = i915_get_vblank_counter,
15758         .enable_vblank = i915gm_enable_vblank,
15759         .disable_vblank = i915gm_disable_vblank,
15760 };
15761
15762 static const struct drm_crtc_funcs i915_crtc_funcs = {
15763         INTEL_CRTC_FUNCS,
15764
15765         .get_vblank_counter = i915_get_vblank_counter,
15766         .enable_vblank = i8xx_enable_vblank,
15767         .disable_vblank = i8xx_disable_vblank,
15768 };
15769
15770 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15771         INTEL_CRTC_FUNCS,
15772
15773         /* no hw vblank counter */
15774         .enable_vblank = i8xx_enable_vblank,
15775         .disable_vblank = i8xx_disable_vblank,
15776 };
15777
15778 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15779 {
15780         const struct drm_crtc_funcs *funcs;
15781         struct intel_crtc *intel_crtc;
15782         struct intel_crtc_state *crtc_state = NULL;
15783         struct intel_plane *primary = NULL;
15784         struct intel_plane *cursor = NULL;
15785         int sprite, ret;
15786
15787         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
15788         if (!intel_crtc)
15789                 return -ENOMEM;
15790
15791         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
15792         if (!crtc_state) {
15793                 ret = -ENOMEM;
15794                 goto fail;
15795         }
15796         __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->uapi);
15797         intel_crtc->config = crtc_state;
15798
15799         primary = intel_primary_plane_create(dev_priv, pipe);
15800         if (IS_ERR(primary)) {
15801                 ret = PTR_ERR(primary);
15802                 goto fail;
15803         }
15804         intel_crtc->plane_ids_mask |= BIT(primary->id);
15805
15806         for_each_sprite(dev_priv, pipe, sprite) {
15807                 struct intel_plane *plane;
15808
15809                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15810                 if (IS_ERR(plane)) {
15811                         ret = PTR_ERR(plane);
15812                         goto fail;
15813                 }
15814                 intel_crtc->plane_ids_mask |= BIT(plane->id);
15815         }
15816
15817         cursor = intel_cursor_plane_create(dev_priv, pipe);
15818         if (IS_ERR(cursor)) {
15819                 ret = PTR_ERR(cursor);
15820                 goto fail;
15821         }
15822         intel_crtc->plane_ids_mask |= BIT(cursor->id);
15823
15824         if (HAS_GMCH(dev_priv)) {
15825                 if (IS_CHERRYVIEW(dev_priv) ||
15826                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15827                         funcs = &g4x_crtc_funcs;
15828                 else if (IS_GEN(dev_priv, 4))
15829                         funcs = &i965_crtc_funcs;
15830                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
15831                         funcs = &i915gm_crtc_funcs;
15832                 else if (IS_GEN(dev_priv, 3))
15833                         funcs = &i915_crtc_funcs;
15834                 else
15835                         funcs = &i8xx_crtc_funcs;
15836         } else {
15837                 if (INTEL_GEN(dev_priv) >= 8)
15838                         funcs = &bdw_crtc_funcs;
15839                 else
15840                         funcs = &ilk_crtc_funcs;
15841         }
15842
15843         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
15844                                         &primary->base, &cursor->base,
15845                                         funcs, "pipe %c", pipe_name(pipe));
15846         if (ret)
15847                 goto fail;
15848
15849         intel_crtc->pipe = pipe;
15850
15851         /* initialize shared scalers */
15852         intel_crtc_init_scalers(intel_crtc, crtc_state);
15853
15854         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15855                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15856         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
15857
15858         if (INTEL_GEN(dev_priv) < 9) {
15859                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15860
15861                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15862                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15863                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
15864         }
15865
15866         intel_color_init(intel_crtc);
15867
15868         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15869
15870         return 0;
15871
15872 fail:
15873         /*
15874          * drm_mode_config_cleanup() will free up any
15875          * crtcs/planes already initialized.
15876          */
15877         kfree(crtc_state);
15878         kfree(intel_crtc);
15879
15880         return ret;
15881 }
15882
15883 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15884                                       struct drm_file *file)
15885 {
15886         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15887         struct drm_crtc *drmmode_crtc;
15888         struct intel_crtc *crtc;
15889
15890         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15891         if (!drmmode_crtc)
15892                 return -ENOENT;
15893
15894         crtc = to_intel_crtc(drmmode_crtc);
15895         pipe_from_crtc_id->pipe = crtc->pipe;
15896
15897         return 0;
15898 }
15899
15900 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
15901 {
15902         struct drm_device *dev = encoder->base.dev;
15903         struct intel_encoder *source_encoder;
15904         u32 possible_clones = 0;
15905
15906         for_each_intel_encoder(dev, source_encoder) {
15907                 if (encoders_cloneable(encoder, source_encoder))
15908                         possible_clones |= drm_encoder_mask(&source_encoder->base);
15909         }
15910
15911         return possible_clones;
15912 }
15913
15914 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
15915 {
15916         struct drm_device *dev = encoder->base.dev;
15917         struct intel_crtc *crtc;
15918         u32 possible_crtcs = 0;
15919
15920         for_each_intel_crtc(dev, crtc) {
15921                 if (encoder->pipe_mask & BIT(crtc->pipe))
15922                         possible_crtcs |= drm_crtc_mask(&crtc->base);
15923         }
15924
15925         return possible_crtcs;
15926 }
15927
15928 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
15929 {
15930         if (!IS_MOBILE(dev_priv))
15931                 return false;
15932
15933         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
15934                 return false;
15935
15936         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
15937                 return false;
15938
15939         return true;
15940 }
15941
15942 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
15943 {
15944         if (INTEL_GEN(dev_priv) >= 9)
15945                 return false;
15946
15947         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
15948                 return false;
15949
15950         if (HAS_PCH_LPT_H(dev_priv) &&
15951             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
15952                 return false;
15953
15954         /* DDI E can't be used if DDI A requires 4 lanes */
15955         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
15956                 return false;
15957
15958         if (!dev_priv->vbt.int_crt_support)
15959                 return false;
15960
15961         return true;
15962 }
15963
15964 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
15965 {
15966         int pps_num;
15967         int pps_idx;
15968
15969         if (HAS_DDI(dev_priv))
15970                 return;
15971         /*
15972          * This w/a is needed at least on CPT/PPT, but to be sure apply it
15973          * everywhere where registers can be write protected.
15974          */
15975         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15976                 pps_num = 2;
15977         else
15978                 pps_num = 1;
15979
15980         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
15981                 u32 val = I915_READ(PP_CONTROL(pps_idx));
15982
15983                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
15984                 I915_WRITE(PP_CONTROL(pps_idx), val);
15985         }
15986 }
15987
15988 static void intel_pps_init(struct drm_i915_private *dev_priv)
15989 {
15990         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
15991                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
15992         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15993                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
15994         else
15995                 dev_priv->pps_mmio_base = PPS_BASE;
15996
15997         intel_pps_unlock_regs_wa(dev_priv);
15998 }
15999
16000 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16001 {
16002         struct intel_encoder *encoder;
16003         bool dpd_is_edp = false;
16004
16005         intel_pps_init(dev_priv);
16006
16007         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16008                 return;
16009
16010         if (INTEL_GEN(dev_priv) >= 12) {
16011                 intel_ddi_init(dev_priv, PORT_A);
16012                 intel_ddi_init(dev_priv, PORT_B);
16013                 intel_ddi_init(dev_priv, PORT_D);
16014                 intel_ddi_init(dev_priv, PORT_E);
16015                 intel_ddi_init(dev_priv, PORT_F);
16016                 intel_ddi_init(dev_priv, PORT_G);
16017                 intel_ddi_init(dev_priv, PORT_H);
16018                 intel_ddi_init(dev_priv, PORT_I);
16019                 icl_dsi_init(dev_priv);
16020         } else if (IS_ELKHARTLAKE(dev_priv)) {
16021                 intel_ddi_init(dev_priv, PORT_A);
16022                 intel_ddi_init(dev_priv, PORT_B);
16023                 intel_ddi_init(dev_priv, PORT_C);
16024                 intel_ddi_init(dev_priv, PORT_D);
16025                 icl_dsi_init(dev_priv);
16026         } else if (IS_GEN(dev_priv, 11)) {
16027                 intel_ddi_init(dev_priv, PORT_A);
16028                 intel_ddi_init(dev_priv, PORT_B);
16029                 intel_ddi_init(dev_priv, PORT_C);
16030                 intel_ddi_init(dev_priv, PORT_D);
16031                 intel_ddi_init(dev_priv, PORT_E);
16032                 /*
16033                  * On some ICL SKUs port F is not present. No strap bits for
16034                  * this, so rely on VBT.
16035                  * Work around broken VBTs on SKUs known to have no port F.
16036                  */
16037                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16038                     intel_bios_is_port_present(dev_priv, PORT_F))
16039                         intel_ddi_init(dev_priv, PORT_F);
16040
16041                 icl_dsi_init(dev_priv);
16042         } else if (IS_GEN9_LP(dev_priv)) {
16043                 /*
16044                  * FIXME: Broxton doesn't support port detection via the
16045                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16046                  * detect the ports.
16047                  */
16048                 intel_ddi_init(dev_priv, PORT_A);
16049                 intel_ddi_init(dev_priv, PORT_B);
16050                 intel_ddi_init(dev_priv, PORT_C);
16051
16052                 vlv_dsi_init(dev_priv);
16053         } else if (HAS_DDI(dev_priv)) {
16054                 int found;
16055
16056                 if (intel_ddi_crt_present(dev_priv))
16057                         intel_crt_init(dev_priv);
16058
16059                 /*
16060                  * Haswell uses DDI functions to detect digital outputs.
16061                  * On SKL pre-D0 the strap isn't connected, so we assume
16062                  * it's there.
16063                  */
16064                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16065                 /* WaIgnoreDDIAStrap: skl */
16066                 if (found || IS_GEN9_BC(dev_priv))
16067                         intel_ddi_init(dev_priv, PORT_A);
16068
16069                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16070                  * register */
16071                 found = I915_READ(SFUSE_STRAP);
16072
16073                 if (found & SFUSE_STRAP_DDIB_DETECTED)
16074                         intel_ddi_init(dev_priv, PORT_B);
16075                 if (found & SFUSE_STRAP_DDIC_DETECTED)
16076                         intel_ddi_init(dev_priv, PORT_C);
16077                 if (found & SFUSE_STRAP_DDID_DETECTED)
16078                         intel_ddi_init(dev_priv, PORT_D);
16079                 if (found & SFUSE_STRAP_DDIF_DETECTED)
16080                         intel_ddi_init(dev_priv, PORT_F);
16081                 /*
16082                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16083                  */
16084                 if (IS_GEN9_BC(dev_priv) &&
16085                     intel_bios_is_port_present(dev_priv, PORT_E))
16086                         intel_ddi_init(dev_priv, PORT_E);
16087
16088         } else if (HAS_PCH_SPLIT(dev_priv)) {
16089                 int found;
16090
16091                 /*
16092                  * intel_edp_init_connector() depends on this completing first,
16093                  * to prevent the registration of both eDP and LVDS and the
16094                  * incorrect sharing of the PPS.
16095                  */
16096                 intel_lvds_init(dev_priv);
16097                 intel_crt_init(dev_priv);
16098
16099                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16100
16101                 if (ilk_has_edp_a(dev_priv))
16102                         intel_dp_init(dev_priv, DP_A, PORT_A);
16103
16104                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
16105                         /* PCH SDVOB multiplex with HDMIB */
16106                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16107                         if (!found)
16108                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16109                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
16110                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16111                 }
16112
16113                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
16114                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16115
16116                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
16117                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16118
16119                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
16120                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16121
16122                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
16123                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16124         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16125                 bool has_edp, has_port;
16126
16127                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16128                         intel_crt_init(dev_priv);
16129
16130                 /*
16131                  * The DP_DETECTED bit is the latched state of the DDC
16132                  * SDA pin at boot. However since eDP doesn't require DDC
16133                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
16134                  * eDP ports may have been muxed to an alternate function.
16135                  * Thus we can't rely on the DP_DETECTED bit alone to detect
16136                  * eDP ports. Consult the VBT as well as DP_DETECTED to
16137                  * detect eDP ports.
16138                  *
16139                  * Sadly the straps seem to be missing sometimes even for HDMI
16140                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16141                  * and VBT for the presence of the port. Additionally we can't
16142                  * trust the port type the VBT declares as we've seen at least
16143                  * HDMI ports that the VBT claim are DP or eDP.
16144                  */
16145                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16146                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16147                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
16148                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16149                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16150                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16151
16152                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16153                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16154                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
16155                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16156                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16157                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16158
16159                 if (IS_CHERRYVIEW(dev_priv)) {
16160                         /*
16161                          * eDP not supported on port D,
16162                          * so no need to worry about it
16163                          */
16164                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16165                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
16166                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16167                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
16168                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16169                 }
16170
16171                 vlv_dsi_init(dev_priv);
16172         } else if (IS_PINEVIEW(dev_priv)) {
16173                 intel_lvds_init(dev_priv);
16174                 intel_crt_init(dev_priv);
16175         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16176                 bool found = false;
16177
16178                 if (IS_MOBILE(dev_priv))
16179                         intel_lvds_init(dev_priv);
16180
16181                 intel_crt_init(dev_priv);
16182
16183                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16184                         DRM_DEBUG_KMS("probing SDVOB\n");
16185                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16186                         if (!found && IS_G4X(dev_priv)) {
16187                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
16188                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16189                         }
16190
16191                         if (!found && IS_G4X(dev_priv))
16192                                 intel_dp_init(dev_priv, DP_B, PORT_B);
16193                 }
16194
16195                 /* Before G4X SDVOC doesn't have its own detect register */
16196
16197                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16198                         DRM_DEBUG_KMS("probing SDVOC\n");
16199                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16200                 }
16201
16202                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
16203
16204                         if (IS_G4X(dev_priv)) {
16205                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
16206                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16207                         }
16208                         if (IS_G4X(dev_priv))
16209                                 intel_dp_init(dev_priv, DP_C, PORT_C);
16210                 }
16211
16212                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
16213                         intel_dp_init(dev_priv, DP_D, PORT_D);
16214
16215                 if (SUPPORTS_TV(dev_priv))
16216                         intel_tv_init(dev_priv);
16217         } else if (IS_GEN(dev_priv, 2)) {
16218                 if (IS_I85X(dev_priv))
16219                         intel_lvds_init(dev_priv);
16220
16221                 intel_crt_init(dev_priv);
16222                 intel_dvo_init(dev_priv);
16223         }
16224
16225         intel_psr_init(dev_priv);
16226
16227         for_each_intel_encoder(&dev_priv->drm, encoder) {
16228                 encoder->base.possible_crtcs =
16229                         intel_encoder_possible_crtcs(encoder);
16230                 encoder->base.possible_clones =
16231                         intel_encoder_possible_clones(encoder);
16232         }
16233
16234         intel_init_pch_refclk(dev_priv);
16235
16236         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16237 }
16238
16239 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16240 {
16241         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16242
16243         drm_framebuffer_cleanup(fb);
16244         intel_frontbuffer_put(intel_fb->frontbuffer);
16245
16246         kfree(intel_fb);
16247 }
16248
16249 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16250                                                 struct drm_file *file,
16251                                                 unsigned int *handle)
16252 {
16253         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16254
16255         if (obj->userptr.mm) {
16256                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
16257                 return -EINVAL;
16258         }
16259
16260         return drm_gem_handle_create(file, &obj->base, handle);
16261 }
16262
16263 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16264                                         struct drm_file *file,
16265                                         unsigned flags, unsigned color,
16266                                         struct drm_clip_rect *clips,
16267                                         unsigned num_clips)
16268 {
16269         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16270
16271         i915_gem_object_flush_if_display(obj);
16272         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16273
16274         return 0;
16275 }
16276
16277 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16278         .destroy = intel_user_framebuffer_destroy,
16279         .create_handle = intel_user_framebuffer_create_handle,
16280         .dirty = intel_user_framebuffer_dirty,
16281 };
16282
16283 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16284                                   struct drm_i915_gem_object *obj,
16285                                   struct drm_mode_fb_cmd2 *mode_cmd)
16286 {
16287         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16288         struct drm_framebuffer *fb = &intel_fb->base;
16289         u32 max_stride;
16290         unsigned int tiling, stride;
16291         int ret = -EINVAL;
16292         int i;
16293
16294         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16295         if (!intel_fb->frontbuffer)
16296                 return -ENOMEM;
16297
16298         i915_gem_object_lock(obj);
16299         tiling = i915_gem_object_get_tiling(obj);
16300         stride = i915_gem_object_get_stride(obj);
16301         i915_gem_object_unlock(obj);
16302
16303         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16304                 /*
16305                  * If there's a fence, enforce that
16306                  * the fb modifier and tiling mode match.
16307                  */
16308                 if (tiling != I915_TILING_NONE &&
16309                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16310                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
16311                         goto err;
16312                 }
16313         } else {
16314                 if (tiling == I915_TILING_X) {
16315                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16316                 } else if (tiling == I915_TILING_Y) {
16317                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
16318                         goto err;
16319                 }
16320         }
16321
16322         if (!drm_any_plane_has_format(&dev_priv->drm,
16323                                       mode_cmd->pixel_format,
16324                                       mode_cmd->modifier[0])) {
16325                 struct drm_format_name_buf format_name;
16326
16327                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
16328                               drm_get_format_name(mode_cmd->pixel_format,
16329                                                   &format_name),
16330                               mode_cmd->modifier[0]);
16331                 goto err;
16332         }
16333
16334         /*
16335          * gen2/3 display engine uses the fence if present,
16336          * so the tiling mode must match the fb modifier exactly.
16337          */
16338         if (INTEL_GEN(dev_priv) < 4 &&
16339             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16340                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
16341                 goto err;
16342         }
16343
16344         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16345                                          mode_cmd->modifier[0]);
16346         if (mode_cmd->pitches[0] > max_stride) {
16347                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
16348                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16349                               "tiled" : "linear",
16350                               mode_cmd->pitches[0], max_stride);
16351                 goto err;
16352         }
16353
16354         /*
16355          * If there's a fence, enforce that
16356          * the fb pitch and fence stride match.
16357          */
16358         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16359                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
16360                               mode_cmd->pitches[0], stride);
16361                 goto err;
16362         }
16363
16364         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16365         if (mode_cmd->offsets[0] != 0)
16366                 goto err;
16367
16368         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16369
16370         for (i = 0; i < fb->format->num_planes; i++) {
16371                 u32 stride_alignment;
16372
16373                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16374                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
16375                         goto err;
16376                 }
16377
16378                 stride_alignment = intel_fb_stride_alignment(fb, i);
16379
16380                 /*
16381                  * Display WA #0531: skl,bxt,kbl,glk
16382                  *
16383                  * Render decompression and plane width > 3840
16384                  * combined with horizontal panning requires the
16385                  * plane stride to be a multiple of 4. We'll just
16386                  * require the entire fb to accommodate that to avoid
16387                  * potential runtime errors at plane configuration time.
16388                  */
16389                 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
16390                     is_ccs_modifier(fb->modifier))
16391                         stride_alignment *= 4;
16392
16393                 if (fb->pitches[i] & (stride_alignment - 1)) {
16394                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
16395                                       i, fb->pitches[i], stride_alignment);
16396                         goto err;
16397                 }
16398
16399                 fb->obj[i] = &obj->base;
16400         }
16401
16402         ret = intel_fill_fb_info(dev_priv, fb);
16403         if (ret)
16404                 goto err;
16405
16406         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16407         if (ret) {
16408                 DRM_ERROR("framebuffer init failed %d\n", ret);
16409                 goto err;
16410         }
16411
16412         return 0;
16413
16414 err:
16415         intel_frontbuffer_put(intel_fb->frontbuffer);
16416         return ret;
16417 }
16418
16419 static struct drm_framebuffer *
16420 intel_user_framebuffer_create(struct drm_device *dev,
16421                               struct drm_file *filp,
16422                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
16423 {
16424         struct drm_framebuffer *fb;
16425         struct drm_i915_gem_object *obj;
16426         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16427
16428         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16429         if (!obj)
16430                 return ERR_PTR(-ENOENT);
16431
16432         fb = intel_framebuffer_create(obj, &mode_cmd);
16433         i915_gem_object_put(obj);
16434
16435         return fb;
16436 }
16437
16438 static void intel_atomic_state_free(struct drm_atomic_state *state)
16439 {
16440         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
16441
16442         drm_atomic_state_default_release(state);
16443
16444         i915_sw_fence_fini(&intel_state->commit_ready);
16445
16446         kfree(state);
16447 }
16448
16449 static enum drm_mode_status
16450 intel_mode_valid(struct drm_device *dev,
16451                  const struct drm_display_mode *mode)
16452 {
16453         struct drm_i915_private *dev_priv = to_i915(dev);
16454         int hdisplay_max, htotal_max;
16455         int vdisplay_max, vtotal_max;
16456
16457         /*
16458          * Can't reject DBLSCAN here because Xorg ddxen can add piles
16459          * of DBLSCAN modes to the output's mode list when they detect
16460          * the scaling mode property on the connector. And they don't
16461          * ask the kernel to validate those modes in any way until
16462          * modeset time at which point the client gets a protocol error.
16463          * So in order to not upset those clients we silently ignore the
16464          * DBLSCAN flag on such connectors. For other connectors we will
16465          * reject modes with the DBLSCAN flag in encoder->compute_config().
16466          * And we always reject DBLSCAN modes in connector->mode_valid()
16467          * as we never want such modes on the connector's mode list.
16468          */
16469
16470         if (mode->vscan > 1)
16471                 return MODE_NO_VSCAN;
16472
16473         if (mode->flags & DRM_MODE_FLAG_HSKEW)
16474                 return MODE_H_ILLEGAL;
16475
16476         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16477                            DRM_MODE_FLAG_NCSYNC |
16478                            DRM_MODE_FLAG_PCSYNC))
16479                 return MODE_HSYNC;
16480
16481         if (mode->flags & (DRM_MODE_FLAG_BCAST |
16482                            DRM_MODE_FLAG_PIXMUX |
16483                            DRM_MODE_FLAG_CLKDIV2))
16484                 return MODE_BAD;
16485
16486         /* Transcoder timing limits */
16487         if (INTEL_GEN(dev_priv) >= 11) {
16488                 hdisplay_max = 16384;
16489                 vdisplay_max = 8192;
16490                 htotal_max = 16384;
16491                 vtotal_max = 8192;
16492         } else if (INTEL_GEN(dev_priv) >= 9 ||
16493                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16494                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16495                 vdisplay_max = 4096;
16496                 htotal_max = 8192;
16497                 vtotal_max = 8192;
16498         } else if (INTEL_GEN(dev_priv) >= 3) {
16499                 hdisplay_max = 4096;
16500                 vdisplay_max = 4096;
16501                 htotal_max = 8192;
16502                 vtotal_max = 8192;
16503         } else {
16504                 hdisplay_max = 2048;
16505                 vdisplay_max = 2048;
16506                 htotal_max = 4096;
16507                 vtotal_max = 4096;
16508         }
16509
16510         if (mode->hdisplay > hdisplay_max ||
16511             mode->hsync_start > htotal_max ||
16512             mode->hsync_end > htotal_max ||
16513             mode->htotal > htotal_max)
16514                 return MODE_H_ILLEGAL;
16515
16516         if (mode->vdisplay > vdisplay_max ||
16517             mode->vsync_start > vtotal_max ||
16518             mode->vsync_end > vtotal_max ||
16519             mode->vtotal > vtotal_max)
16520                 return MODE_V_ILLEGAL;
16521
16522         if (INTEL_GEN(dev_priv) >= 5) {
16523                 if (mode->hdisplay < 64 ||
16524                     mode->htotal - mode->hdisplay < 32)
16525                         return MODE_H_ILLEGAL;
16526
16527                 if (mode->vtotal - mode->vdisplay < 5)
16528                         return MODE_V_ILLEGAL;
16529         } else {
16530                 if (mode->htotal - mode->hdisplay < 32)
16531                         return MODE_H_ILLEGAL;
16532
16533                 if (mode->vtotal - mode->vdisplay < 3)
16534                         return MODE_V_ILLEGAL;
16535         }
16536
16537         return MODE_OK;
16538 }
16539
16540 enum drm_mode_status
16541 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16542                                 const struct drm_display_mode *mode)
16543 {
16544         int plane_width_max, plane_height_max;
16545
16546         /*
16547          * intel_mode_valid() should be
16548          * sufficient on older platforms.
16549          */
16550         if (INTEL_GEN(dev_priv) < 9)
16551                 return MODE_OK;
16552
16553         /*
16554          * Most people will probably want a fullscreen
16555          * plane so let's not advertize modes that are
16556          * too big for that.
16557          */
16558         if (INTEL_GEN(dev_priv) >= 11) {
16559                 plane_width_max = 5120;
16560                 plane_height_max = 4320;
16561         } else {
16562                 plane_width_max = 5120;
16563                 plane_height_max = 4096;
16564         }
16565
16566         if (mode->hdisplay > plane_width_max)
16567                 return MODE_H_ILLEGAL;
16568
16569         if (mode->vdisplay > plane_height_max)
16570                 return MODE_V_ILLEGAL;
16571
16572         return MODE_OK;
16573 }
16574
16575 static const struct drm_mode_config_funcs intel_mode_funcs = {
16576         .fb_create = intel_user_framebuffer_create,
16577         .get_format_info = intel_get_format_info,
16578         .output_poll_changed = intel_fbdev_output_poll_changed,
16579         .mode_valid = intel_mode_valid,
16580         .atomic_check = intel_atomic_check,
16581         .atomic_commit = intel_atomic_commit,
16582         .atomic_state_alloc = intel_atomic_state_alloc,
16583         .atomic_state_clear = intel_atomic_state_clear,
16584         .atomic_state_free = intel_atomic_state_free,
16585 };
16586
16587 /**
16588  * intel_init_display_hooks - initialize the display modesetting hooks
16589  * @dev_priv: device private
16590  */
16591 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16592 {
16593         intel_init_cdclk_hooks(dev_priv);
16594
16595         if (INTEL_GEN(dev_priv) >= 9) {
16596                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16597                 dev_priv->display.get_initial_plane_config =
16598                         skylake_get_initial_plane_config;
16599                 dev_priv->display.crtc_compute_clock =
16600                         haswell_crtc_compute_clock;
16601                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16602                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16603         } else if (HAS_DDI(dev_priv)) {
16604                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16605                 dev_priv->display.get_initial_plane_config =
16606                         i9xx_get_initial_plane_config;
16607                 dev_priv->display.crtc_compute_clock =
16608                         haswell_crtc_compute_clock;
16609                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16610                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16611         } else if (HAS_PCH_SPLIT(dev_priv)) {
16612                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
16613                 dev_priv->display.get_initial_plane_config =
16614                         i9xx_get_initial_plane_config;
16615                 dev_priv->display.crtc_compute_clock =
16616                         ironlake_crtc_compute_clock;
16617                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
16618                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
16619         } else if (IS_CHERRYVIEW(dev_priv)) {
16620                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16621                 dev_priv->display.get_initial_plane_config =
16622                         i9xx_get_initial_plane_config;
16623                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16624                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16625                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16626         } else if (IS_VALLEYVIEW(dev_priv)) {
16627                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16628                 dev_priv->display.get_initial_plane_config =
16629                         i9xx_get_initial_plane_config;
16630                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16631                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16632                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16633         } else if (IS_G4X(dev_priv)) {
16634                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16635                 dev_priv->display.get_initial_plane_config =
16636                         i9xx_get_initial_plane_config;
16637                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16638                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16639                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16640         } else if (IS_PINEVIEW(dev_priv)) {
16641                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16642                 dev_priv->display.get_initial_plane_config =
16643                         i9xx_get_initial_plane_config;
16644                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16645                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16646                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16647         } else if (!IS_GEN(dev_priv, 2)) {
16648                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16649                 dev_priv->display.get_initial_plane_config =
16650                         i9xx_get_initial_plane_config;
16651                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16652                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16653                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16654         } else {
16655                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16656                 dev_priv->display.get_initial_plane_config =
16657                         i9xx_get_initial_plane_config;
16658                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16659                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16660                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16661         }
16662
16663         if (IS_GEN(dev_priv, 5)) {
16664                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
16665         } else if (IS_GEN(dev_priv, 6)) {
16666                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16667         } else if (IS_IVYBRIDGE(dev_priv)) {
16668                 /* FIXME: detect B0+ stepping and use auto training */
16669                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16670         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
16671                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
16672         }
16673
16674         if (INTEL_GEN(dev_priv) >= 9)
16675                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16676         else
16677                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16678
16679 }
16680
16681 void intel_modeset_init_hw(struct drm_i915_private *i915)
16682 {
16683         intel_update_cdclk(i915);
16684         intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
16685         i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
16686 }
16687
16688 /*
16689  * Calculate what we think the watermarks should be for the state we've read
16690  * out of the hardware and then immediately program those watermarks so that
16691  * we ensure the hardware settings match our internal state.
16692  *
16693  * We can calculate what we think WM's should be by creating a duplicate of the
16694  * current state (which was constructed during hardware readout) and running it
16695  * through the atomic check code to calculate new watermark values in the
16696  * state object.
16697  */
16698 static void sanitize_watermarks(struct drm_device *dev)
16699 {
16700         struct drm_i915_private *dev_priv = to_i915(dev);
16701         struct drm_atomic_state *state;
16702         struct intel_atomic_state *intel_state;
16703         struct intel_crtc *crtc;
16704         struct intel_crtc_state *crtc_state;
16705         struct drm_modeset_acquire_ctx ctx;
16706         int ret;
16707         int i;
16708
16709         /* Only supported on platforms that use atomic watermark design */
16710         if (!dev_priv->display.optimize_watermarks)
16711                 return;
16712
16713         /*
16714          * We need to hold connection_mutex before calling duplicate_state so
16715          * that the connector loop is protected.
16716          */
16717         drm_modeset_acquire_init(&ctx, 0);
16718 retry:
16719         ret = drm_modeset_lock_all_ctx(dev, &ctx);
16720         if (ret == -EDEADLK) {
16721                 drm_modeset_backoff(&ctx);
16722                 goto retry;
16723         } else if (WARN_ON(ret)) {
16724                 goto fail;
16725         }
16726
16727         state = drm_atomic_helper_duplicate_state(dev, &ctx);
16728         if (WARN_ON(IS_ERR(state)))
16729                 goto fail;
16730
16731         intel_state = to_intel_atomic_state(state);
16732
16733         /*
16734          * Hardware readout is the only time we don't want to calculate
16735          * intermediate watermarks (since we don't trust the current
16736          * watermarks).
16737          */
16738         if (!HAS_GMCH(dev_priv))
16739                 intel_state->skip_intermediate_wm = true;
16740
16741         ret = intel_atomic_check(dev, state);
16742         if (ret) {
16743                 /*
16744                  * If we fail here, it means that the hardware appears to be
16745                  * programmed in a way that shouldn't be possible, given our
16746                  * understanding of watermark requirements.  This might mean a
16747                  * mistake in the hardware readout code or a mistake in the
16748                  * watermark calculations for a given platform.  Raise a WARN
16749                  * so that this is noticeable.
16750                  *
16751                  * If this actually happens, we'll have to just leave the
16752                  * BIOS-programmed watermarks untouched and hope for the best.
16753                  */
16754                 WARN(true, "Could not determine valid watermarks for inherited state\n");
16755                 goto put_state;
16756         }
16757
16758         /* Write calculated watermark values back */
16759         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16760                 crtc_state->wm.need_postvbl_update = true;
16761                 dev_priv->display.optimize_watermarks(intel_state, crtc_state);
16762
16763                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16764         }
16765
16766 put_state:
16767         drm_atomic_state_put(state);
16768 fail:
16769         drm_modeset_drop_locks(&ctx);
16770         drm_modeset_acquire_fini(&ctx);
16771 }
16772
16773 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16774 {
16775         if (IS_GEN(dev_priv, 5)) {
16776                 u32 fdi_pll_clk =
16777                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16778
16779                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16780         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16781                 dev_priv->fdi_pll_freq = 270000;
16782         } else {
16783                 return;
16784         }
16785
16786         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16787 }
16788
16789 static int intel_initial_commit(struct drm_device *dev)
16790 {
16791         struct drm_atomic_state *state = NULL;
16792         struct drm_modeset_acquire_ctx ctx;
16793         struct intel_crtc *crtc;
16794         int ret = 0;
16795
16796         state = drm_atomic_state_alloc(dev);
16797         if (!state)
16798                 return -ENOMEM;
16799
16800         drm_modeset_acquire_init(&ctx, 0);
16801
16802 retry:
16803         state->acquire_ctx = &ctx;
16804
16805         for_each_intel_crtc(dev, crtc) {
16806                 struct intel_crtc_state *crtc_state =
16807                         intel_atomic_get_crtc_state(state, crtc);
16808
16809                 if (IS_ERR(crtc_state)) {
16810                         ret = PTR_ERR(crtc_state);
16811                         goto out;
16812                 }
16813
16814                 if (crtc_state->hw.active) {
16815                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
16816                         if (ret)
16817                                 goto out;
16818
16819                         /*
16820                          * FIXME hack to force a LUT update to avoid the
16821                          * plane update forcing the pipe gamma on without
16822                          * having a proper LUT loaded. Remove once we
16823                          * have readout for pipe gamma enable.
16824                          */
16825                         crtc_state->uapi.color_mgmt_changed = true;
16826                 }
16827         }
16828
16829         ret = drm_atomic_commit(state);
16830
16831 out:
16832         if (ret == -EDEADLK) {
16833                 drm_atomic_state_clear(state);
16834                 drm_modeset_backoff(&ctx);
16835                 goto retry;
16836         }
16837
16838         drm_atomic_state_put(state);
16839
16840         drm_modeset_drop_locks(&ctx);
16841         drm_modeset_acquire_fini(&ctx);
16842
16843         return ret;
16844 }
16845
16846 static void intel_mode_config_init(struct drm_i915_private *i915)
16847 {
16848         struct drm_mode_config *mode_config = &i915->drm.mode_config;
16849
16850         drm_mode_config_init(&i915->drm);
16851
16852         mode_config->min_width = 0;
16853         mode_config->min_height = 0;
16854
16855         mode_config->preferred_depth = 24;
16856         mode_config->prefer_shadow = 1;
16857
16858         mode_config->allow_fb_modifiers = true;
16859
16860         mode_config->funcs = &intel_mode_funcs;
16861
16862         /*
16863          * Maximum framebuffer dimensions, chosen to match
16864          * the maximum render engine surface size on gen4+.
16865          */
16866         if (INTEL_GEN(i915) >= 7) {
16867                 mode_config->max_width = 16384;
16868                 mode_config->max_height = 16384;
16869         } else if (INTEL_GEN(i915) >= 4) {
16870                 mode_config->max_width = 8192;
16871                 mode_config->max_height = 8192;
16872         } else if (IS_GEN(i915, 3)) {
16873                 mode_config->max_width = 4096;
16874                 mode_config->max_height = 4096;
16875         } else {
16876                 mode_config->max_width = 2048;
16877                 mode_config->max_height = 2048;
16878         }
16879
16880         if (IS_I845G(i915) || IS_I865G(i915)) {
16881                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
16882                 mode_config->cursor_height = 1023;
16883         } else if (IS_GEN(i915, 2)) {
16884                 mode_config->cursor_width = 64;
16885                 mode_config->cursor_height = 64;
16886         } else {
16887                 mode_config->cursor_width = 256;
16888                 mode_config->cursor_height = 256;
16889         }
16890 }
16891
16892 int intel_modeset_init(struct drm_i915_private *i915)
16893 {
16894         struct drm_device *dev = &i915->drm;
16895         enum pipe pipe;
16896         struct intel_crtc *crtc;
16897         int ret;
16898
16899         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
16900         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
16901                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
16902
16903         intel_mode_config_init(i915);
16904
16905         ret = intel_bw_init(i915);
16906         if (ret)
16907                 return ret;
16908
16909         init_llist_head(&i915->atomic_helper.free_list);
16910         INIT_WORK(&i915->atomic_helper.free_work,
16911                   intel_atomic_helper_free_state_worker);
16912
16913         intel_init_quirks(i915);
16914
16915         intel_fbc_init(i915);
16916
16917         intel_init_pm(i915);
16918
16919         intel_panel_sanitize_ssc(i915);
16920
16921         intel_gmbus_setup(i915);
16922
16923         DRM_DEBUG_KMS("%d display pipe%s available.\n",
16924                       INTEL_NUM_PIPES(i915),
16925                       INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
16926
16927         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
16928                 for_each_pipe(i915, pipe) {
16929                         ret = intel_crtc_init(i915, pipe);
16930                         if (ret) {
16931                                 drm_mode_config_cleanup(dev);
16932                                 return ret;
16933                         }
16934                 }
16935         }
16936
16937         intel_shared_dpll_init(dev);
16938         intel_update_fdi_pll_freq(i915);
16939
16940         intel_update_czclk(i915);
16941         intel_modeset_init_hw(i915);
16942
16943         intel_hdcp_component_init(i915);
16944
16945         if (i915->max_cdclk_freq == 0)
16946                 intel_update_max_cdclk(i915);
16947
16948         /* Just disable it once at startup */
16949         intel_vga_disable(i915);
16950         intel_setup_outputs(i915);
16951
16952         drm_modeset_lock_all(dev);
16953         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
16954         drm_modeset_unlock_all(dev);
16955
16956         for_each_intel_crtc(dev, crtc) {
16957                 struct intel_initial_plane_config plane_config = {};
16958
16959                 if (!crtc->active)
16960                         continue;
16961
16962                 /*
16963                  * Note that reserving the BIOS fb up front prevents us
16964                  * from stuffing other stolen allocations like the ring
16965                  * on top.  This prevents some ugliness at boot time, and
16966                  * can even allow for smooth boot transitions if the BIOS
16967                  * fb is large enough for the active pipe configuration.
16968                  */
16969                 i915->display.get_initial_plane_config(crtc, &plane_config);
16970
16971                 /*
16972                  * If the fb is shared between multiple heads, we'll
16973                  * just get the first one.
16974                  */
16975                 intel_find_initial_plane_obj(crtc, &plane_config);
16976         }
16977
16978         /*
16979          * Make sure hardware watermarks really match the state we read out.
16980          * Note that we need to do this after reconstructing the BIOS fb's
16981          * since the watermark calculation done here will use pstate->fb.
16982          */
16983         if (!HAS_GMCH(i915))
16984                 sanitize_watermarks(dev);
16985
16986         /*
16987          * Force all active planes to recompute their states. So that on
16988          * mode_setcrtc after probe, all the intel_plane_state variables
16989          * are already calculated and there is no assert_plane warnings
16990          * during bootup.
16991          */
16992         ret = intel_initial_commit(dev);
16993         if (ret)
16994                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
16995
16996         return 0;
16997 }
16998
16999 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17000 {
17001         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17002         /* 640x480@60Hz, ~25175 kHz */
17003         struct dpll clock = {
17004                 .m1 = 18,
17005                 .m2 = 7,
17006                 .p1 = 13,
17007                 .p2 = 4,
17008                 .n = 2,
17009         };
17010         u32 dpll, fp;
17011         int i;
17012
17013         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
17014
17015         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17016                       pipe_name(pipe), clock.vco, clock.dot);
17017
17018         fp = i9xx_dpll_compute_fp(&clock);
17019         dpll = DPLL_DVO_2X_MODE |
17020                 DPLL_VGA_MODE_DIS |
17021                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17022                 PLL_P2_DIVIDE_BY_4 |
17023                 PLL_REF_INPUT_DREFCLK |
17024                 DPLL_VCO_ENABLE;
17025
17026         I915_WRITE(FP0(pipe), fp);
17027         I915_WRITE(FP1(pipe), fp);
17028
17029         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17030         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17031         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17032         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17033         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17034         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17035         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17036
17037         /*
17038          * Apparently we need to have VGA mode enabled prior to changing
17039          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17040          * dividers, even though the register value does change.
17041          */
17042         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17043         I915_WRITE(DPLL(pipe), dpll);
17044
17045         /* Wait for the clocks to stabilize. */
17046         POSTING_READ(DPLL(pipe));
17047         udelay(150);
17048
17049         /* The pixel multiplier can only be updated once the
17050          * DPLL is enabled and the clocks are stable.
17051          *
17052          * So write it again.
17053          */
17054         I915_WRITE(DPLL(pipe), dpll);
17055
17056         /* We do this three times for luck */
17057         for (i = 0; i < 3 ; i++) {
17058                 I915_WRITE(DPLL(pipe), dpll);
17059                 POSTING_READ(DPLL(pipe));
17060                 udelay(150); /* wait for warmup */
17061         }
17062
17063         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17064         POSTING_READ(PIPECONF(pipe));
17065
17066         intel_wait_for_pipe_scanline_moving(crtc);
17067 }
17068
17069 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17070 {
17071         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17072
17073         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
17074                       pipe_name(pipe));
17075
17076         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
17077         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
17078         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
17079         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
17080         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
17081
17082         I915_WRITE(PIPECONF(pipe), 0);
17083         POSTING_READ(PIPECONF(pipe));
17084
17085         intel_wait_for_pipe_scanline_stopped(crtc);
17086
17087         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
17088         POSTING_READ(DPLL(pipe));
17089 }
17090
17091 static void
17092 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17093 {
17094         struct intel_crtc *crtc;
17095
17096         if (INTEL_GEN(dev_priv) >= 4)
17097                 return;
17098
17099         for_each_intel_crtc(&dev_priv->drm, crtc) {
17100                 struct intel_plane *plane =
17101                         to_intel_plane(crtc->base.primary);
17102                 struct intel_crtc *plane_crtc;
17103                 enum pipe pipe;
17104
17105                 if (!plane->get_hw_state(plane, &pipe))
17106                         continue;
17107
17108                 if (pipe == crtc->pipe)
17109                         continue;
17110
17111                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17112                               plane->base.base.id, plane->base.name);
17113
17114                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17115                 intel_plane_disable_noatomic(plane_crtc, plane);
17116         }
17117 }
17118
17119 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17120 {
17121         struct drm_device *dev = crtc->base.dev;
17122         struct intel_encoder *encoder;
17123
17124         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17125                 return true;
17126
17127         return false;
17128 }
17129
17130 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17131 {
17132         struct drm_device *dev = encoder->base.dev;
17133         struct intel_connector *connector;
17134
17135         for_each_connector_on_encoder(dev, &encoder->base, connector)
17136                 return connector;
17137
17138         return NULL;
17139 }
17140
17141 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17142                               enum pipe pch_transcoder)
17143 {
17144         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17145                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17146 }
17147
17148 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17149                                 struct drm_modeset_acquire_ctx *ctx)
17150 {
17151         struct drm_device *dev = crtc->base.dev;
17152         struct drm_i915_private *dev_priv = to_i915(dev);
17153         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17154         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17155
17156         /* Clear any frame start delays used for debugging left by the BIOS */
17157         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
17158                 i915_reg_t reg = PIPECONF(cpu_transcoder);
17159
17160                 I915_WRITE(reg,
17161                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
17162         }
17163
17164         if (crtc_state->hw.active) {
17165                 struct intel_plane *plane;
17166
17167                 /* Disable everything but the primary plane */
17168                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
17169                         const struct intel_plane_state *plane_state =
17170                                 to_intel_plane_state(plane->base.state);
17171
17172                         if (plane_state->uapi.visible &&
17173                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17174                                 intel_plane_disable_noatomic(crtc, plane);
17175                 }
17176
17177                 /*
17178                  * Disable any background color set by the BIOS, but enable the
17179                  * gamma and CSC to match how we program our planes.
17180                  */
17181                 if (INTEL_GEN(dev_priv) >= 9)
17182                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
17183                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
17184                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
17185         }
17186
17187         /* Adjust the state of the output pipe according to whether we
17188          * have active connectors/encoders. */
17189         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
17190                 intel_crtc_disable_noatomic(&crtc->base, ctx);
17191
17192         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
17193                 /*
17194                  * We start out with underrun reporting disabled to avoid races.
17195                  * For correct bookkeeping mark this on active crtcs.
17196                  *
17197                  * Also on gmch platforms we dont have any hardware bits to
17198                  * disable the underrun reporting. Which means we need to start
17199                  * out with underrun reporting disabled also on inactive pipes,
17200                  * since otherwise we'll complain about the garbage we read when
17201                  * e.g. coming up after runtime pm.
17202                  *
17203                  * No protection against concurrent access is required - at
17204                  * worst a fifo underrun happens which also sets this to false.
17205                  */
17206                 crtc->cpu_fifo_underrun_disabled = true;
17207                 /*
17208                  * We track the PCH trancoder underrun reporting state
17209                  * within the crtc. With crtc for pipe A housing the underrun
17210                  * reporting state for PCH transcoder A, crtc for pipe B housing
17211                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17212                  * and marking underrun reporting as disabled for the non-existing
17213                  * PCH transcoders B and C would prevent enabling the south
17214                  * error interrupt (see cpt_can_enable_serr_int()).
17215                  */
17216                 if (has_pch_trancoder(dev_priv, crtc->pipe))
17217                         crtc->pch_fifo_underrun_disabled = true;
17218         }
17219 }
17220
17221 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17222 {
17223         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
17224
17225         /*
17226          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17227          * the hardware when a high res displays plugged in. DPLL P
17228          * divider is zero, and the pipe timings are bonkers. We'll
17229          * try to disable everything in that case.
17230          *
17231          * FIXME would be nice to be able to sanitize this state
17232          * without several WARNs, but for now let's take the easy
17233          * road.
17234          */
17235         return IS_GEN(dev_priv, 6) &&
17236                 crtc_state->hw.active &&
17237                 crtc_state->shared_dpll &&
17238                 crtc_state->port_clock == 0;
17239 }
17240
17241 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17242 {
17243         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17244         struct intel_connector *connector;
17245         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17246         struct intel_crtc_state *crtc_state = crtc ?
17247                 to_intel_crtc_state(crtc->base.state) : NULL;
17248
17249         /* We need to check both for a crtc link (meaning that the
17250          * encoder is active and trying to read from a pipe) and the
17251          * pipe itself being active. */
17252         bool has_active_crtc = crtc_state &&
17253                 crtc_state->hw.active;
17254
17255         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17256                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17257                               pipe_name(crtc->pipe));
17258                 has_active_crtc = false;
17259         }
17260
17261         connector = intel_encoder_find_connector(encoder);
17262         if (connector && !has_active_crtc) {
17263                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17264                               encoder->base.base.id,
17265                               encoder->base.name);
17266
17267                 /* Connector is active, but has no active pipe. This is
17268                  * fallout from our resume register restoring. Disable
17269                  * the encoder manually again. */
17270                 if (crtc_state) {
17271                         struct drm_encoder *best_encoder;
17272
17273                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
17274                                       encoder->base.base.id,
17275                                       encoder->base.name);
17276
17277                         /* avoid oopsing in case the hooks consult best_encoder */
17278                         best_encoder = connector->base.state->best_encoder;
17279                         connector->base.state->best_encoder = &encoder->base;
17280
17281                         if (encoder->disable)
17282                                 encoder->disable(encoder, crtc_state,
17283                                                  connector->base.state);
17284                         if (encoder->post_disable)
17285                                 encoder->post_disable(encoder, crtc_state,
17286                                                       connector->base.state);
17287
17288                         connector->base.state->best_encoder = best_encoder;
17289                 }
17290                 encoder->base.crtc = NULL;
17291
17292                 /* Inconsistent output/port/pipe state happens presumably due to
17293                  * a bug in one of the get_hw_state functions. Or someplace else
17294                  * in our code, like the register restore mess on resume. Clamp
17295                  * things to off as a safer default. */
17296
17297                 connector->base.dpms = DRM_MODE_DPMS_OFF;
17298                 connector->base.encoder = NULL;
17299         }
17300
17301         /* notify opregion of the sanitized encoder state */
17302         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17303
17304         if (INTEL_GEN(dev_priv) >= 11)
17305                 icl_sanitize_encoder_pll_mapping(encoder);
17306 }
17307
17308 /* FIXME read out full plane state for all planes */
17309 static void readout_plane_state(struct drm_i915_private *dev_priv)
17310 {
17311         struct intel_plane *plane;
17312         struct intel_crtc *crtc;
17313
17314         for_each_intel_plane(&dev_priv->drm, plane) {
17315                 struct intel_plane_state *plane_state =
17316                         to_intel_plane_state(plane->base.state);
17317                 struct intel_crtc_state *crtc_state;
17318                 enum pipe pipe = PIPE_A;
17319                 bool visible;
17320
17321                 visible = plane->get_hw_state(plane, &pipe);
17322
17323                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17324                 crtc_state = to_intel_crtc_state(crtc->base.state);
17325
17326                 intel_set_plane_visible(crtc_state, plane_state, visible);
17327
17328                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17329                               plane->base.base.id, plane->base.name,
17330                               enableddisabled(visible), pipe_name(pipe));
17331         }
17332
17333         for_each_intel_crtc(&dev_priv->drm, crtc) {
17334                 struct intel_crtc_state *crtc_state =
17335                         to_intel_crtc_state(crtc->base.state);
17336
17337                 fixup_active_planes(crtc_state);
17338         }
17339 }
17340
17341 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17342 {
17343         struct drm_i915_private *dev_priv = to_i915(dev);
17344         enum pipe pipe;
17345         struct intel_crtc *crtc;
17346         struct intel_encoder *encoder;
17347         struct intel_connector *connector;
17348         struct drm_connector_list_iter conn_iter;
17349         int i;
17350
17351         dev_priv->active_pipes = 0;
17352
17353         for_each_intel_crtc(dev, crtc) {
17354                 struct intel_crtc_state *crtc_state =
17355                         to_intel_crtc_state(crtc->base.state);
17356
17357                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
17358                 intel_crtc_free_hw_state(crtc_state);
17359                 memset(crtc_state, 0, sizeof(*crtc_state));
17360                 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->uapi);
17361
17362                 crtc_state->hw.active = crtc_state->hw.enable =
17363                         dev_priv->display.get_pipe_config(crtc, crtc_state);
17364
17365                 crtc->base.enabled = crtc_state->hw.enable;
17366                 crtc->active = crtc_state->hw.active;
17367
17368                 if (crtc_state->hw.active)
17369                         dev_priv->active_pipes |= BIT(crtc->pipe);
17370
17371                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
17372                               crtc->base.base.id, crtc->base.name,
17373                               enableddisabled(crtc_state->hw.active));
17374         }
17375
17376         readout_plane_state(dev_priv);
17377
17378         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17379                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17380
17381                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
17382                                                         &pll->state.hw_state);
17383
17384                 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
17385                     pll->info->id == DPLL_ID_EHL_DPLL4) {
17386                         pll->wakeref = intel_display_power_get(dev_priv,
17387                                                                POWER_DOMAIN_DPLL_DC_OFF);
17388                 }
17389
17390                 pll->state.crtc_mask = 0;
17391                 for_each_intel_crtc(dev, crtc) {
17392                         struct intel_crtc_state *crtc_state =
17393                                 to_intel_crtc_state(crtc->base.state);
17394
17395                         if (crtc_state->hw.active &&
17396                             crtc_state->shared_dpll == pll)
17397                                 pll->state.crtc_mask |= 1 << crtc->pipe;
17398                 }
17399                 pll->active_mask = pll->state.crtc_mask;
17400
17401                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
17402                               pll->info->name, pll->state.crtc_mask, pll->on);
17403         }
17404
17405         for_each_intel_encoder(dev, encoder) {
17406                 pipe = 0;
17407
17408                 if (encoder->get_hw_state(encoder, &pipe)) {
17409                         struct intel_crtc_state *crtc_state;
17410
17411                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17412                         crtc_state = to_intel_crtc_state(crtc->base.state);
17413
17414                         encoder->base.crtc = &crtc->base;
17415                         encoder->get_config(encoder, crtc_state);
17416                 } else {
17417                         encoder->base.crtc = NULL;
17418                 }
17419
17420                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17421                               encoder->base.base.id, encoder->base.name,
17422                               enableddisabled(encoder->base.crtc),
17423                               pipe_name(pipe));
17424         }
17425
17426         drm_connector_list_iter_begin(dev, &conn_iter);
17427         for_each_intel_connector_iter(connector, &conn_iter) {
17428                 if (connector->get_hw_state(connector)) {
17429                         struct intel_crtc_state *crtc_state;
17430                         struct intel_crtc *crtc;
17431
17432                         connector->base.dpms = DRM_MODE_DPMS_ON;
17433
17434                         encoder = connector->encoder;
17435                         connector->base.encoder = &encoder->base;
17436
17437                         crtc = to_intel_crtc(encoder->base.crtc);
17438                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17439
17440                         if (crtc_state && crtc_state->hw.active) {
17441                                 /*
17442                                  * This has to be done during hardware readout
17443                                  * because anything calling .crtc_disable may
17444                                  * rely on the connector_mask being accurate.
17445                                  */
17446                                 crtc_state->uapi.connector_mask |=
17447                                         drm_connector_mask(&connector->base);
17448                                 crtc_state->uapi.encoder_mask |=
17449                                         drm_encoder_mask(&encoder->base);
17450                         }
17451                 } else {
17452                         connector->base.dpms = DRM_MODE_DPMS_OFF;
17453                         connector->base.encoder = NULL;
17454                 }
17455                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
17456                               connector->base.base.id, connector->base.name,
17457                               enableddisabled(connector->base.encoder));
17458         }
17459         drm_connector_list_iter_end(&conn_iter);
17460
17461         for_each_intel_crtc(dev, crtc) {
17462                 struct intel_bw_state *bw_state =
17463                         to_intel_bw_state(dev_priv->bw_obj.state);
17464                 struct intel_crtc_state *crtc_state =
17465                         to_intel_crtc_state(crtc->base.state);
17466                 struct intel_plane *plane;
17467                 int min_cdclk = 0;
17468
17469                 if (crtc_state->hw.active) {
17470                         struct drm_display_mode *mode = &crtc_state->hw.mode;
17471
17472                         intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
17473                                                     crtc_state);
17474
17475                         *mode = crtc_state->hw.adjusted_mode;
17476                         mode->hdisplay = crtc_state->pipe_src_w;
17477                         mode->vdisplay = crtc_state->pipe_src_h;
17478
17479                         /*
17480                          * The initial mode needs to be set in order to keep
17481                          * the atomic core happy. It wants a valid mode if the
17482                          * crtc's enabled, so we do the above call.
17483                          *
17484                          * But we don't set all the derived state fully, hence
17485                          * set a flag to indicate that a full recalculation is
17486                          * needed on the next commit.
17487                          */
17488                         mode->private_flags = I915_MODE_FLAG_INHERITED;
17489
17490                         intel_crtc_compute_pixel_rate(crtc_state);
17491
17492                         intel_crtc_update_active_timings(crtc_state);
17493
17494                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
17495                 }
17496
17497                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17498                         const struct intel_plane_state *plane_state =
17499                                 to_intel_plane_state(plane->base.state);
17500
17501                         /*
17502                          * FIXME don't have the fb yet, so can't
17503                          * use intel_plane_data_rate() :(
17504                          */
17505                         if (plane_state->uapi.visible)
17506                                 crtc_state->data_rate[plane->id] =
17507                                         4 * crtc_state->pixel_rate;
17508                         /*
17509                          * FIXME don't have the fb yet, so can't
17510                          * use plane->min_cdclk() :(
17511                          */
17512                         if (plane_state->uapi.visible && plane->min_cdclk) {
17513                                 if (crtc_state->double_wide ||
17514                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17515                                         crtc_state->min_cdclk[plane->id] =
17516                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17517                                 else
17518                                         crtc_state->min_cdclk[plane->id] =
17519                                                 crtc_state->pixel_rate;
17520                         }
17521                         DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
17522                                       plane->base.base.id, plane->base.name,
17523                                       crtc_state->min_cdclk[plane->id]);
17524                 }
17525
17526                 if (crtc_state->hw.active) {
17527                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17528                         if (WARN_ON(min_cdclk < 0))
17529                                 min_cdclk = 0;
17530                 }
17531
17532                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
17533                 dev_priv->min_voltage_level[crtc->pipe] =
17534                         crtc_state->min_voltage_level;
17535
17536                 intel_bw_crtc_update(bw_state, crtc_state);
17537
17538                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
17539         }
17540 }
17541
17542 static void
17543 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17544 {
17545         struct intel_encoder *encoder;
17546
17547         for_each_intel_encoder(&dev_priv->drm, encoder) {
17548                 struct intel_crtc_state *crtc_state;
17549
17550                 if (!encoder->get_power_domains)
17551                         continue;
17552
17553                 /*
17554                  * MST-primary and inactive encoders don't have a crtc state
17555                  * and neither of these require any power domain references.
17556                  */
17557                 if (!encoder->base.crtc)
17558                         continue;
17559
17560                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17561                 encoder->get_power_domains(encoder, crtc_state);
17562         }
17563 }
17564
17565 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17566 {
17567         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
17568         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
17569                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
17570                            DARBF_GATING_DIS);
17571
17572         if (IS_HASWELL(dev_priv)) {
17573                 /*
17574                  * WaRsPkgCStateDisplayPMReq:hsw
17575                  * System hang if this isn't done before disabling all planes!
17576                  */
17577                 I915_WRITE(CHICKEN_PAR1_1,
17578                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17579         }
17580 }
17581
17582 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17583                                        enum port port, i915_reg_t hdmi_reg)
17584 {
17585         u32 val = I915_READ(hdmi_reg);
17586
17587         if (val & SDVO_ENABLE ||
17588             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17589                 return;
17590
17591         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
17592                       port_name(port));
17593
17594         val &= ~SDVO_PIPE_SEL_MASK;
17595         val |= SDVO_PIPE_SEL(PIPE_A);
17596
17597         I915_WRITE(hdmi_reg, val);
17598 }
17599
17600 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
17601                                      enum port port, i915_reg_t dp_reg)
17602 {
17603         u32 val = I915_READ(dp_reg);
17604
17605         if (val & DP_PORT_EN ||
17606             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
17607                 return;
17608
17609         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
17610                       port_name(port));
17611
17612         val &= ~DP_PIPE_SEL_MASK;
17613         val |= DP_PIPE_SEL(PIPE_A);
17614
17615         I915_WRITE(dp_reg, val);
17616 }
17617
17618 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
17619 {
17620         /*
17621          * The BIOS may select transcoder B on some of the PCH
17622          * ports even it doesn't enable the port. This would trip
17623          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
17624          * Sanitize the transcoder select bits to prevent that. We
17625          * assume that the BIOS never actually enabled the port,
17626          * because if it did we'd actually have to toggle the port
17627          * on and back off to make the transcoder A select stick
17628          * (see. intel_dp_link_down(), intel_disable_hdmi(),
17629          * intel_disable_sdvo()).
17630          */
17631         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
17632         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
17633         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
17634
17635         /* PCH SDVOB multiplex with HDMIB */
17636         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
17637         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
17638         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
17639 }
17640
17641 /* Scan out the current hw modeset state,
17642  * and sanitizes it to the current state
17643  */
17644 static void
17645 intel_modeset_setup_hw_state(struct drm_device *dev,
17646                              struct drm_modeset_acquire_ctx *ctx)
17647 {
17648         struct drm_i915_private *dev_priv = to_i915(dev);
17649         struct intel_crtc_state *crtc_state;
17650         struct intel_encoder *encoder;
17651         struct intel_crtc *crtc;
17652         intel_wakeref_t wakeref;
17653         int i;
17654
17655         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
17656
17657         intel_early_display_was(dev_priv);
17658         intel_modeset_readout_hw_state(dev);
17659
17660         /* HW state is read out, now we need to sanitize this mess. */
17661
17662         /* Sanitize the TypeC port mode upfront, encoders depend on this */
17663         for_each_intel_encoder(dev, encoder) {
17664                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
17665
17666                 /* We need to sanitize only the MST primary port. */
17667                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
17668                     intel_phy_is_tc(dev_priv, phy))
17669                         intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
17670         }
17671
17672         get_encoder_power_domains(dev_priv);
17673
17674         if (HAS_PCH_IBX(dev_priv))
17675                 ibx_sanitize_pch_ports(dev_priv);
17676
17677         /*
17678          * intel_sanitize_plane_mapping() may need to do vblank
17679          * waits, so we need vblank interrupts restored beforehand.
17680          */
17681         for_each_intel_crtc(&dev_priv->drm, crtc) {
17682                 crtc_state = to_intel_crtc_state(crtc->base.state);
17683
17684                 drm_crtc_vblank_reset(&crtc->base);
17685
17686                 if (crtc_state->hw.active)
17687                         intel_crtc_vblank_on(crtc_state);
17688         }
17689
17690         intel_sanitize_plane_mapping(dev_priv);
17691
17692         for_each_intel_encoder(dev, encoder)
17693                 intel_sanitize_encoder(encoder);
17694
17695         for_each_intel_crtc(&dev_priv->drm, crtc) {
17696                 crtc_state = to_intel_crtc_state(crtc->base.state);
17697                 intel_sanitize_crtc(crtc, ctx);
17698                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
17699         }
17700
17701         intel_modeset_update_connector_atomic_state(dev);
17702
17703         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17704                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17705
17706                 if (!pll->on || pll->active_mask)
17707                         continue;
17708
17709                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17710                               pll->info->name);
17711
17712                 pll->info->funcs->disable(dev_priv, pll);
17713                 pll->on = false;
17714         }
17715
17716         if (IS_G4X(dev_priv)) {
17717                 g4x_wm_get_hw_state(dev_priv);
17718                 g4x_wm_sanitize(dev_priv);
17719         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17720                 vlv_wm_get_hw_state(dev_priv);
17721                 vlv_wm_sanitize(dev_priv);
17722         } else if (INTEL_GEN(dev_priv) >= 9) {
17723                 skl_wm_get_hw_state(dev_priv);
17724         } else if (HAS_PCH_SPLIT(dev_priv)) {
17725                 ilk_wm_get_hw_state(dev_priv);
17726         }
17727
17728         for_each_intel_crtc(dev, crtc) {
17729                 u64 put_domains;
17730
17731                 crtc_state = to_intel_crtc_state(crtc->base.state);
17732                 put_domains = modeset_get_crtc_power_domains(crtc_state);
17733                 if (WARN_ON(put_domains))
17734                         modeset_put_power_domains(dev_priv, put_domains);
17735         }
17736
17737         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17738
17739         intel_fbc_init_pipe_state(dev_priv);
17740 }
17741
17742 void intel_display_resume(struct drm_device *dev)
17743 {
17744         struct drm_i915_private *dev_priv = to_i915(dev);
17745         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17746         struct drm_modeset_acquire_ctx ctx;
17747         int ret;
17748
17749         dev_priv->modeset_restore_state = NULL;
17750         if (state)
17751                 state->acquire_ctx = &ctx;
17752
17753         drm_modeset_acquire_init(&ctx, 0);
17754
17755         while (1) {
17756                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17757                 if (ret != -EDEADLK)
17758                         break;
17759
17760                 drm_modeset_backoff(&ctx);
17761         }
17762
17763         if (!ret)
17764                 ret = __intel_display_resume(dev, state, &ctx);
17765
17766         intel_enable_ipc(dev_priv);
17767         drm_modeset_drop_locks(&ctx);
17768         drm_modeset_acquire_fini(&ctx);
17769
17770         if (ret)
17771                 DRM_ERROR("Restoring old state failed with %i\n", ret);
17772         if (state)
17773                 drm_atomic_state_put(state);
17774 }
17775
17776 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
17777 {
17778         struct intel_connector *connector;
17779         struct drm_connector_list_iter conn_iter;
17780
17781         /* Kill all the work that may have been queued by hpd. */
17782         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
17783         for_each_intel_connector_iter(connector, &conn_iter) {
17784                 if (connector->modeset_retry_work.func)
17785                         cancel_work_sync(&connector->modeset_retry_work);
17786                 if (connector->hdcp.shim) {
17787                         cancel_delayed_work_sync(&connector->hdcp.check_work);
17788                         cancel_work_sync(&connector->hdcp.prop_work);
17789                 }
17790         }
17791         drm_connector_list_iter_end(&conn_iter);
17792 }
17793
17794 void intel_modeset_driver_remove(struct drm_i915_private *i915)
17795 {
17796         flush_workqueue(i915->flip_wq);
17797         flush_workqueue(i915->modeset_wq);
17798
17799         flush_work(&i915->atomic_helper.free_work);
17800         WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
17801
17802         /*
17803          * Interrupts and polling as the first thing to avoid creating havoc.
17804          * Too much stuff here (turning of connectors, ...) would
17805          * experience fancy races otherwise.
17806          */
17807         intel_irq_uninstall(i915);
17808
17809         /*
17810          * Due to the hpd irq storm handling the hotplug work can re-arm the
17811          * poll handlers. Hence disable polling after hpd handling is shut down.
17812          */
17813         intel_hpd_poll_fini(i915);
17814
17815         /* poll work can call into fbdev, hence clean that up afterwards */
17816         intel_fbdev_fini(i915);
17817
17818         intel_unregister_dsm_handler();
17819
17820         intel_fbc_global_disable(i915);
17821
17822         /* flush any delayed tasks or pending work */
17823         flush_scheduled_work();
17824
17825         intel_hdcp_component_fini(i915);
17826
17827         drm_mode_config_cleanup(&i915->drm);
17828
17829         intel_overlay_cleanup(i915);
17830
17831         intel_gmbus_teardown(i915);
17832
17833         destroy_workqueue(i915->flip_wq);
17834         destroy_workqueue(i915->modeset_wq);
17835
17836         intel_fbc_cleanup_cfb(i915);
17837 }
17838
17839 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
17840
17841 struct intel_display_error_state {
17842
17843         u32 power_well_driver;
17844
17845         struct intel_cursor_error_state {
17846                 u32 control;
17847                 u32 position;
17848                 u32 base;
17849                 u32 size;
17850         } cursor[I915_MAX_PIPES];
17851
17852         struct intel_pipe_error_state {
17853                 bool power_domain_on;
17854                 u32 source;
17855                 u32 stat;
17856         } pipe[I915_MAX_PIPES];
17857
17858         struct intel_plane_error_state {
17859                 u32 control;
17860                 u32 stride;
17861                 u32 size;
17862                 u32 pos;
17863                 u32 addr;
17864                 u32 surface;
17865                 u32 tile_offset;
17866         } plane[I915_MAX_PIPES];
17867
17868         struct intel_transcoder_error_state {
17869                 bool available;
17870                 bool power_domain_on;
17871                 enum transcoder cpu_transcoder;
17872
17873                 u32 conf;
17874
17875                 u32 htotal;
17876                 u32 hblank;
17877                 u32 hsync;
17878                 u32 vtotal;
17879                 u32 vblank;
17880                 u32 vsync;
17881         } transcoder[5];
17882 };
17883
17884 struct intel_display_error_state *
17885 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
17886 {
17887         struct intel_display_error_state *error;
17888         int transcoders[] = {
17889                 TRANSCODER_A,
17890                 TRANSCODER_B,
17891                 TRANSCODER_C,
17892                 TRANSCODER_D,
17893                 TRANSCODER_EDP,
17894         };
17895         int i;
17896
17897         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
17898
17899         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
17900                 return NULL;
17901
17902         error = kzalloc(sizeof(*error), GFP_ATOMIC);
17903         if (error == NULL)
17904                 return NULL;
17905
17906         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17907                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
17908
17909         for_each_pipe(dev_priv, i) {
17910                 error->pipe[i].power_domain_on =
17911                         __intel_display_power_is_enabled(dev_priv,
17912                                                          POWER_DOMAIN_PIPE(i));
17913                 if (!error->pipe[i].power_domain_on)
17914                         continue;
17915
17916                 error->cursor[i].control = I915_READ(CURCNTR(i));
17917                 error->cursor[i].position = I915_READ(CURPOS(i));
17918                 error->cursor[i].base = I915_READ(CURBASE(i));
17919
17920                 error->plane[i].control = I915_READ(DSPCNTR(i));
17921                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
17922                 if (INTEL_GEN(dev_priv) <= 3) {
17923                         error->plane[i].size = I915_READ(DSPSIZE(i));
17924                         error->plane[i].pos = I915_READ(DSPPOS(i));
17925                 }
17926                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17927                         error->plane[i].addr = I915_READ(DSPADDR(i));
17928                 if (INTEL_GEN(dev_priv) >= 4) {
17929                         error->plane[i].surface = I915_READ(DSPSURF(i));
17930                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
17931                 }
17932
17933                 error->pipe[i].source = I915_READ(PIPESRC(i));
17934
17935                 if (HAS_GMCH(dev_priv))
17936                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
17937         }
17938
17939         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17940                 enum transcoder cpu_transcoder = transcoders[i];
17941
17942                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
17943                         continue;
17944
17945                 error->transcoder[i].available = true;
17946                 error->transcoder[i].power_domain_on =
17947                         __intel_display_power_is_enabled(dev_priv,
17948                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
17949                 if (!error->transcoder[i].power_domain_on)
17950                         continue;
17951
17952                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
17953
17954                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
17955                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
17956                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
17957                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
17958                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
17959                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
17960                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
17961         }
17962
17963         return error;
17964 }
17965
17966 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
17967
17968 void
17969 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
17970                                 struct intel_display_error_state *error)
17971 {
17972         struct drm_i915_private *dev_priv = m->i915;
17973         int i;
17974
17975         if (!error)
17976                 return;
17977
17978         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
17979         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17980                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
17981                            error->power_well_driver);
17982         for_each_pipe(dev_priv, i) {
17983                 err_printf(m, "Pipe [%d]:\n", i);
17984                 err_printf(m, "  Power: %s\n",
17985                            onoff(error->pipe[i].power_domain_on));
17986                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
17987                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
17988
17989                 err_printf(m, "Plane [%d]:\n", i);
17990                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
17991                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
17992                 if (INTEL_GEN(dev_priv) <= 3) {
17993                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
17994                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
17995                 }
17996                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17997                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
17998                 if (INTEL_GEN(dev_priv) >= 4) {
17999                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
18000                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
18001                 }
18002
18003                 err_printf(m, "Cursor [%d]:\n", i);
18004                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
18005                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
18006                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
18007         }
18008
18009         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18010                 if (!error->transcoder[i].available)
18011                         continue;
18012
18013                 err_printf(m, "CPU transcoder: %s\n",
18014                            transcoder_name(error->transcoder[i].cpu_transcoder));
18015                 err_printf(m, "  Power: %s\n",
18016                            onoff(error->transcoder[i].power_domain_on));
18017                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
18018                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
18019                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
18020                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
18021                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
18022                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
18023                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
18024         }
18025 }
18026
18027 #endif