]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/display/intel_display.c
drm/i915: Avoid calling i915_gem_object_unbind holding object lock
[linux.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57
58 #include "gt/intel_rps.h"
59
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_dp_link_training.h"
70 #include "intel_fbc.h"
71 #include "intel_fbdev.h"
72 #include "intel_fifo_underrun.h"
73 #include "intel_frontbuffer.h"
74 #include "intel_hdcp.h"
75 #include "intel_hotplug.h"
76 #include "intel_overlay.h"
77 #include "intel_pipe_crc.h"
78 #include "intel_pm.h"
79 #include "intel_psr.h"
80 #include "intel_quirks.h"
81 #include "intel_sideband.h"
82 #include "intel_sprite.h"
83 #include "intel_tc.h"
84 #include "intel_vga.h"
85
86 /* Primary plane formats for gen <= 3 */
87 static const u32 i8xx_primary_formats[] = {
88         DRM_FORMAT_C8,
89         DRM_FORMAT_XRGB1555,
90         DRM_FORMAT_RGB565,
91         DRM_FORMAT_XRGB8888,
92 };
93
94 /* Primary plane formats for ivb (no fp16 due to hw issue) */
95 static const u32 ivb_primary_formats[] = {
96         DRM_FORMAT_C8,
97         DRM_FORMAT_RGB565,
98         DRM_FORMAT_XRGB8888,
99         DRM_FORMAT_XBGR8888,
100         DRM_FORMAT_XRGB2101010,
101         DRM_FORMAT_XBGR2101010,
102 };
103
104 /* Primary plane formats for gen >= 4, except ivb */
105 static const u32 i965_primary_formats[] = {
106         DRM_FORMAT_C8,
107         DRM_FORMAT_RGB565,
108         DRM_FORMAT_XRGB8888,
109         DRM_FORMAT_XBGR8888,
110         DRM_FORMAT_XRGB2101010,
111         DRM_FORMAT_XBGR2101010,
112         DRM_FORMAT_XBGR16161616F,
113 };
114
115 /* Primary plane formats for vlv/chv */
116 static const u32 vlv_primary_formats[] = {
117         DRM_FORMAT_C8,
118         DRM_FORMAT_RGB565,
119         DRM_FORMAT_XRGB8888,
120         DRM_FORMAT_XBGR8888,
121         DRM_FORMAT_ARGB8888,
122         DRM_FORMAT_ABGR8888,
123         DRM_FORMAT_XRGB2101010,
124         DRM_FORMAT_XBGR2101010,
125         DRM_FORMAT_ARGB2101010,
126         DRM_FORMAT_ABGR2101010,
127         DRM_FORMAT_XBGR16161616F,
128 };
129
130 static const u64 i9xx_format_modifiers[] = {
131         I915_FORMAT_MOD_X_TILED,
132         DRM_FORMAT_MOD_LINEAR,
133         DRM_FORMAT_MOD_INVALID
134 };
135
136 /* Cursor formats */
137 static const u32 intel_cursor_formats[] = {
138         DRM_FORMAT_ARGB8888,
139 };
140
141 static const u64 cursor_format_modifiers[] = {
142         DRM_FORMAT_MOD_LINEAR,
143         DRM_FORMAT_MOD_INVALID
144 };
145
146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
147                                 struct intel_crtc_state *pipe_config);
148 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
149                                    struct intel_crtc_state *pipe_config);
150
151 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
152                                   struct drm_i915_gem_object *obj,
153                                   struct drm_mode_fb_cmd2 *mode_cmd);
154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
157                                          const struct intel_link_m_n *m_n,
158                                          const struct intel_link_m_n *m2_n2);
159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
160 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
163 static void vlv_prepare_pll(struct intel_crtc *crtc,
164                             const struct intel_crtc_state *pipe_config);
165 static void chv_prepare_pll(struct intel_crtc *crtc,
166                             const struct intel_crtc_state *pipe_config);
167 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
168                                     struct intel_crtc_state *crtc_state);
169 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
170 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
171 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
172 static void intel_modeset_setup_hw_state(struct drm_device *dev,
173                                          struct drm_modeset_acquire_ctx *ctx);
174
175 struct intel_limit {
176         struct {
177                 int min, max;
178         } dot, vco, n, m, m1, m2, p, p1;
179
180         struct {
181                 int dot_limit;
182                 int p2_slow, p2_fast;
183         } p2;
184 };
185
186 /* returns HPLL frequency in kHz */
187 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
188 {
189         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
190
191         /* Obtain SKU information */
192         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
193                 CCK_FUSE_HPLL_FREQ_MASK;
194
195         return vco_freq[hpll_freq] * 1000;
196 }
197
198 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
199                       const char *name, u32 reg, int ref_freq)
200 {
201         u32 val;
202         int divider;
203
204         val = vlv_cck_read(dev_priv, reg);
205         divider = val & CCK_FREQUENCY_VALUES;
206
207         WARN((val & CCK_FREQUENCY_STATUS) !=
208              (divider << CCK_FREQUENCY_STATUS_SHIFT),
209              "%s change in progress\n", name);
210
211         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
212 }
213
214 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
215                            const char *name, u32 reg)
216 {
217         int hpll;
218
219         vlv_cck_get(dev_priv);
220
221         if (dev_priv->hpll_freq == 0)
222                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
223
224         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
225
226         vlv_cck_put(dev_priv);
227
228         return hpll;
229 }
230
231 static void intel_update_czclk(struct drm_i915_private *dev_priv)
232 {
233         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
234                 return;
235
236         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
237                                                       CCK_CZ_CLOCK_CONTROL);
238
239         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
240 }
241
242 static inline u32 /* units of 100MHz */
243 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
244                     const struct intel_crtc_state *pipe_config)
245 {
246         if (HAS_DDI(dev_priv))
247                 return pipe_config->port_clock; /* SPLL */
248         else
249                 return dev_priv->fdi_pll_freq;
250 }
251
252 static const struct intel_limit intel_limits_i8xx_dac = {
253         .dot = { .min = 25000, .max = 350000 },
254         .vco = { .min = 908000, .max = 1512000 },
255         .n = { .min = 2, .max = 16 },
256         .m = { .min = 96, .max = 140 },
257         .m1 = { .min = 18, .max = 26 },
258         .m2 = { .min = 6, .max = 16 },
259         .p = { .min = 4, .max = 128 },
260         .p1 = { .min = 2, .max = 33 },
261         .p2 = { .dot_limit = 165000,
262                 .p2_slow = 4, .p2_fast = 2 },
263 };
264
265 static const struct intel_limit intel_limits_i8xx_dvo = {
266         .dot = { .min = 25000, .max = 350000 },
267         .vco = { .min = 908000, .max = 1512000 },
268         .n = { .min = 2, .max = 16 },
269         .m = { .min = 96, .max = 140 },
270         .m1 = { .min = 18, .max = 26 },
271         .m2 = { .min = 6, .max = 16 },
272         .p = { .min = 4, .max = 128 },
273         .p1 = { .min = 2, .max = 33 },
274         .p2 = { .dot_limit = 165000,
275                 .p2_slow = 4, .p2_fast = 4 },
276 };
277
278 static const struct intel_limit intel_limits_i8xx_lvds = {
279         .dot = { .min = 25000, .max = 350000 },
280         .vco = { .min = 908000, .max = 1512000 },
281         .n = { .min = 2, .max = 16 },
282         .m = { .min = 96, .max = 140 },
283         .m1 = { .min = 18, .max = 26 },
284         .m2 = { .min = 6, .max = 16 },
285         .p = { .min = 4, .max = 128 },
286         .p1 = { .min = 1, .max = 6 },
287         .p2 = { .dot_limit = 165000,
288                 .p2_slow = 14, .p2_fast = 7 },
289 };
290
291 static const struct intel_limit intel_limits_i9xx_sdvo = {
292         .dot = { .min = 20000, .max = 400000 },
293         .vco = { .min = 1400000, .max = 2800000 },
294         .n = { .min = 1, .max = 6 },
295         .m = { .min = 70, .max = 120 },
296         .m1 = { .min = 8, .max = 18 },
297         .m2 = { .min = 3, .max = 7 },
298         .p = { .min = 5, .max = 80 },
299         .p1 = { .min = 1, .max = 8 },
300         .p2 = { .dot_limit = 200000,
301                 .p2_slow = 10, .p2_fast = 5 },
302 };
303
304 static const struct intel_limit intel_limits_i9xx_lvds = {
305         .dot = { .min = 20000, .max = 400000 },
306         .vco = { .min = 1400000, .max = 2800000 },
307         .n = { .min = 1, .max = 6 },
308         .m = { .min = 70, .max = 120 },
309         .m1 = { .min = 8, .max = 18 },
310         .m2 = { .min = 3, .max = 7 },
311         .p = { .min = 7, .max = 98 },
312         .p1 = { .min = 1, .max = 8 },
313         .p2 = { .dot_limit = 112000,
314                 .p2_slow = 14, .p2_fast = 7 },
315 };
316
317
318 static const struct intel_limit intel_limits_g4x_sdvo = {
319         .dot = { .min = 25000, .max = 270000 },
320         .vco = { .min = 1750000, .max = 3500000},
321         .n = { .min = 1, .max = 4 },
322         .m = { .min = 104, .max = 138 },
323         .m1 = { .min = 17, .max = 23 },
324         .m2 = { .min = 5, .max = 11 },
325         .p = { .min = 10, .max = 30 },
326         .p1 = { .min = 1, .max = 3},
327         .p2 = { .dot_limit = 270000,
328                 .p2_slow = 10,
329                 .p2_fast = 10
330         },
331 };
332
333 static const struct intel_limit intel_limits_g4x_hdmi = {
334         .dot = { .min = 22000, .max = 400000 },
335         .vco = { .min = 1750000, .max = 3500000},
336         .n = { .min = 1, .max = 4 },
337         .m = { .min = 104, .max = 138 },
338         .m1 = { .min = 16, .max = 23 },
339         .m2 = { .min = 5, .max = 11 },
340         .p = { .min = 5, .max = 80 },
341         .p1 = { .min = 1, .max = 8},
342         .p2 = { .dot_limit = 165000,
343                 .p2_slow = 10, .p2_fast = 5 },
344 };
345
346 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
347         .dot = { .min = 20000, .max = 115000 },
348         .vco = { .min = 1750000, .max = 3500000 },
349         .n = { .min = 1, .max = 3 },
350         .m = { .min = 104, .max = 138 },
351         .m1 = { .min = 17, .max = 23 },
352         .m2 = { .min = 5, .max = 11 },
353         .p = { .min = 28, .max = 112 },
354         .p1 = { .min = 2, .max = 8 },
355         .p2 = { .dot_limit = 0,
356                 .p2_slow = 14, .p2_fast = 14
357         },
358 };
359
360 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
361         .dot = { .min = 80000, .max = 224000 },
362         .vco = { .min = 1750000, .max = 3500000 },
363         .n = { .min = 1, .max = 3 },
364         .m = { .min = 104, .max = 138 },
365         .m1 = { .min = 17, .max = 23 },
366         .m2 = { .min = 5, .max = 11 },
367         .p = { .min = 14, .max = 42 },
368         .p1 = { .min = 2, .max = 6 },
369         .p2 = { .dot_limit = 0,
370                 .p2_slow = 7, .p2_fast = 7
371         },
372 };
373
374 static const struct intel_limit intel_limits_pineview_sdvo = {
375         .dot = { .min = 20000, .max = 400000},
376         .vco = { .min = 1700000, .max = 3500000 },
377         /* Pineview's Ncounter is a ring counter */
378         .n = { .min = 3, .max = 6 },
379         .m = { .min = 2, .max = 256 },
380         /* Pineview only has one combined m divider, which we treat as m2. */
381         .m1 = { .min = 0, .max = 0 },
382         .m2 = { .min = 0, .max = 254 },
383         .p = { .min = 5, .max = 80 },
384         .p1 = { .min = 1, .max = 8 },
385         .p2 = { .dot_limit = 200000,
386                 .p2_slow = 10, .p2_fast = 5 },
387 };
388
389 static const struct intel_limit intel_limits_pineview_lvds = {
390         .dot = { .min = 20000, .max = 400000 },
391         .vco = { .min = 1700000, .max = 3500000 },
392         .n = { .min = 3, .max = 6 },
393         .m = { .min = 2, .max = 256 },
394         .m1 = { .min = 0, .max = 0 },
395         .m2 = { .min = 0, .max = 254 },
396         .p = { .min = 7, .max = 112 },
397         .p1 = { .min = 1, .max = 8 },
398         .p2 = { .dot_limit = 112000,
399                 .p2_slow = 14, .p2_fast = 14 },
400 };
401
402 /* Ironlake / Sandybridge
403  *
404  * We calculate clock using (register_value + 2) for N/M1/M2, so here
405  * the range value for them is (actual_value - 2).
406  */
407 static const struct intel_limit intel_limits_ironlake_dac = {
408         .dot = { .min = 25000, .max = 350000 },
409         .vco = { .min = 1760000, .max = 3510000 },
410         .n = { .min = 1, .max = 5 },
411         .m = { .min = 79, .max = 127 },
412         .m1 = { .min = 12, .max = 22 },
413         .m2 = { .min = 5, .max = 9 },
414         .p = { .min = 5, .max = 80 },
415         .p1 = { .min = 1, .max = 8 },
416         .p2 = { .dot_limit = 225000,
417                 .p2_slow = 10, .p2_fast = 5 },
418 };
419
420 static const struct intel_limit intel_limits_ironlake_single_lvds = {
421         .dot = { .min = 25000, .max = 350000 },
422         .vco = { .min = 1760000, .max = 3510000 },
423         .n = { .min = 1, .max = 3 },
424         .m = { .min = 79, .max = 118 },
425         .m1 = { .min = 12, .max = 22 },
426         .m2 = { .min = 5, .max = 9 },
427         .p = { .min = 28, .max = 112 },
428         .p1 = { .min = 2, .max = 8 },
429         .p2 = { .dot_limit = 225000,
430                 .p2_slow = 14, .p2_fast = 14 },
431 };
432
433 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
434         .dot = { .min = 25000, .max = 350000 },
435         .vco = { .min = 1760000, .max = 3510000 },
436         .n = { .min = 1, .max = 3 },
437         .m = { .min = 79, .max = 127 },
438         .m1 = { .min = 12, .max = 22 },
439         .m2 = { .min = 5, .max = 9 },
440         .p = { .min = 14, .max = 56 },
441         .p1 = { .min = 2, .max = 8 },
442         .p2 = { .dot_limit = 225000,
443                 .p2_slow = 7, .p2_fast = 7 },
444 };
445
446 /* LVDS 100mhz refclk limits. */
447 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
448         .dot = { .min = 25000, .max = 350000 },
449         .vco = { .min = 1760000, .max = 3510000 },
450         .n = { .min = 1, .max = 2 },
451         .m = { .min = 79, .max = 126 },
452         .m1 = { .min = 12, .max = 22 },
453         .m2 = { .min = 5, .max = 9 },
454         .p = { .min = 28, .max = 112 },
455         .p1 = { .min = 2, .max = 8 },
456         .p2 = { .dot_limit = 225000,
457                 .p2_slow = 14, .p2_fast = 14 },
458 };
459
460 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
461         .dot = { .min = 25000, .max = 350000 },
462         .vco = { .min = 1760000, .max = 3510000 },
463         .n = { .min = 1, .max = 3 },
464         .m = { .min = 79, .max = 126 },
465         .m1 = { .min = 12, .max = 22 },
466         .m2 = { .min = 5, .max = 9 },
467         .p = { .min = 14, .max = 42 },
468         .p1 = { .min = 2, .max = 6 },
469         .p2 = { .dot_limit = 225000,
470                 .p2_slow = 7, .p2_fast = 7 },
471 };
472
473 static const struct intel_limit intel_limits_vlv = {
474          /*
475           * These are the data rate limits (measured in fast clocks)
476           * since those are the strictest limits we have. The fast
477           * clock and actual rate limits are more relaxed, so checking
478           * them would make no difference.
479           */
480         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
481         .vco = { .min = 4000000, .max = 6000000 },
482         .n = { .min = 1, .max = 7 },
483         .m1 = { .min = 2, .max = 3 },
484         .m2 = { .min = 11, .max = 156 },
485         .p1 = { .min = 2, .max = 3 },
486         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
487 };
488
489 static const struct intel_limit intel_limits_chv = {
490         /*
491          * These are the data rate limits (measured in fast clocks)
492          * since those are the strictest limits we have.  The fast
493          * clock and actual rate limits are more relaxed, so checking
494          * them would make no difference.
495          */
496         .dot = { .min = 25000 * 5, .max = 540000 * 5},
497         .vco = { .min = 4800000, .max = 6480000 },
498         .n = { .min = 1, .max = 1 },
499         .m1 = { .min = 2, .max = 2 },
500         .m2 = { .min = 24 << 22, .max = 175 << 22 },
501         .p1 = { .min = 2, .max = 4 },
502         .p2 = { .p2_slow = 1, .p2_fast = 14 },
503 };
504
505 static const struct intel_limit intel_limits_bxt = {
506         /* FIXME: find real dot limits */
507         .dot = { .min = 0, .max = INT_MAX },
508         .vco = { .min = 4800000, .max = 6700000 },
509         .n = { .min = 1, .max = 1 },
510         .m1 = { .min = 2, .max = 2 },
511         /* FIXME: find real m2 limits */
512         .m2 = { .min = 2 << 22, .max = 255 << 22 },
513         .p1 = { .min = 2, .max = 4 },
514         .p2 = { .p2_slow = 1, .p2_fast = 20 },
515 };
516
517 /* WA Display #0827: Gen9:all */
518 static void
519 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
520 {
521         if (enable)
522                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
523                            I915_READ(CLKGATE_DIS_PSL(pipe)) |
524                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
525         else
526                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
527                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
528                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
529 }
530
531 /* Wa_2006604312:icl */
532 static void
533 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
534                        bool enable)
535 {
536         if (enable)
537                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
538                            I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
539         else
540                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
541                            I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
542 }
543
544 static bool
545 needs_modeset(const struct intel_crtc_state *state)
546 {
547         return drm_atomic_crtc_needs_modeset(&state->uapi);
548 }
549
550 bool
551 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
552 {
553         return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
554                 crtc_state->sync_mode_slaves_mask);
555 }
556
557 static bool
558 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
559 {
560         return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
561                 crtc_state->sync_mode_slaves_mask);
562 }
563
564 static bool
565 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
566 {
567         return crtc_state->master_transcoder != INVALID_TRANSCODER;
568 }
569
570 /*
571  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
572  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
573  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
574  * The helpers' return value is the rate of the clock that is fed to the
575  * display engine's pipe which can be the above fast dot clock rate or a
576  * divided-down version of it.
577  */
578 /* m1 is reserved as 0 in Pineview, n is a ring counter */
579 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
580 {
581         clock->m = clock->m2 + 2;
582         clock->p = clock->p1 * clock->p2;
583         if (WARN_ON(clock->n == 0 || clock->p == 0))
584                 return 0;
585         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
586         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
587
588         return clock->dot;
589 }
590
591 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
592 {
593         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
594 }
595
596 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
597 {
598         clock->m = i9xx_dpll_compute_m(clock);
599         clock->p = clock->p1 * clock->p2;
600         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
601                 return 0;
602         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
603         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
604
605         return clock->dot;
606 }
607
608 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
609 {
610         clock->m = clock->m1 * clock->m2;
611         clock->p = clock->p1 * clock->p2;
612         if (WARN_ON(clock->n == 0 || clock->p == 0))
613                 return 0;
614         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
615         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
616
617         return clock->dot / 5;
618 }
619
620 int chv_calc_dpll_params(int refclk, struct dpll *clock)
621 {
622         clock->m = clock->m1 * clock->m2;
623         clock->p = clock->p1 * clock->p2;
624         if (WARN_ON(clock->n == 0 || clock->p == 0))
625                 return 0;
626         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
627                                            clock->n << 22);
628         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
629
630         return clock->dot / 5;
631 }
632
633 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
634
635 /*
636  * Returns whether the given set of divisors are valid for a given refclk with
637  * the given connectors.
638  */
639 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
640                                const struct intel_limit *limit,
641                                const struct dpll *clock)
642 {
643         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
644                 INTELPllInvalid("n out of range\n");
645         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
646                 INTELPllInvalid("p1 out of range\n");
647         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
648                 INTELPllInvalid("m2 out of range\n");
649         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
650                 INTELPllInvalid("m1 out of range\n");
651
652         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
653             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
654                 if (clock->m1 <= clock->m2)
655                         INTELPllInvalid("m1 <= m2\n");
656
657         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
658             !IS_GEN9_LP(dev_priv)) {
659                 if (clock->p < limit->p.min || limit->p.max < clock->p)
660                         INTELPllInvalid("p out of range\n");
661                 if (clock->m < limit->m.min || limit->m.max < clock->m)
662                         INTELPllInvalid("m out of range\n");
663         }
664
665         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
666                 INTELPllInvalid("vco out of range\n");
667         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
668          * connector, etc., rather than just a single range.
669          */
670         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
671                 INTELPllInvalid("dot out of range\n");
672
673         return true;
674 }
675
676 static int
677 i9xx_select_p2_div(const struct intel_limit *limit,
678                    const struct intel_crtc_state *crtc_state,
679                    int target)
680 {
681         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
682
683         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
684                 /*
685                  * For LVDS just rely on its current settings for dual-channel.
686                  * We haven't figured out how to reliably set up different
687                  * single/dual channel state, if we even can.
688                  */
689                 if (intel_is_dual_link_lvds(dev_priv))
690                         return limit->p2.p2_fast;
691                 else
692                         return limit->p2.p2_slow;
693         } else {
694                 if (target < limit->p2.dot_limit)
695                         return limit->p2.p2_slow;
696                 else
697                         return limit->p2.p2_fast;
698         }
699 }
700
701 /*
702  * Returns a set of divisors for the desired target clock with the given
703  * refclk, or FALSE.  The returned values represent the clock equation:
704  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
705  *
706  * Target and reference clocks are specified in kHz.
707  *
708  * If match_clock is provided, then best_clock P divider must match the P
709  * divider from @match_clock used for LVDS downclocking.
710  */
711 static bool
712 i9xx_find_best_dpll(const struct intel_limit *limit,
713                     struct intel_crtc_state *crtc_state,
714                     int target, int refclk, struct dpll *match_clock,
715                     struct dpll *best_clock)
716 {
717         struct drm_device *dev = crtc_state->uapi.crtc->dev;
718         struct dpll clock;
719         int err = target;
720
721         memset(best_clock, 0, sizeof(*best_clock));
722
723         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
724
725         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
726              clock.m1++) {
727                 for (clock.m2 = limit->m2.min;
728                      clock.m2 <= limit->m2.max; clock.m2++) {
729                         if (clock.m2 >= clock.m1)
730                                 break;
731                         for (clock.n = limit->n.min;
732                              clock.n <= limit->n.max; clock.n++) {
733                                 for (clock.p1 = limit->p1.min;
734                                         clock.p1 <= limit->p1.max; clock.p1++) {
735                                         int this_err;
736
737                                         i9xx_calc_dpll_params(refclk, &clock);
738                                         if (!intel_PLL_is_valid(to_i915(dev),
739                                                                 limit,
740                                                                 &clock))
741                                                 continue;
742                                         if (match_clock &&
743                                             clock.p != match_clock->p)
744                                                 continue;
745
746                                         this_err = abs(clock.dot - target);
747                                         if (this_err < err) {
748                                                 *best_clock = clock;
749                                                 err = this_err;
750                                         }
751                                 }
752                         }
753                 }
754         }
755
756         return (err != target);
757 }
758
759 /*
760  * Returns a set of divisors for the desired target clock with the given
761  * refclk, or FALSE.  The returned values represent the clock equation:
762  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
763  *
764  * Target and reference clocks are specified in kHz.
765  *
766  * If match_clock is provided, then best_clock P divider must match the P
767  * divider from @match_clock used for LVDS downclocking.
768  */
769 static bool
770 pnv_find_best_dpll(const struct intel_limit *limit,
771                    struct intel_crtc_state *crtc_state,
772                    int target, int refclk, struct dpll *match_clock,
773                    struct dpll *best_clock)
774 {
775         struct drm_device *dev = crtc_state->uapi.crtc->dev;
776         struct dpll clock;
777         int err = target;
778
779         memset(best_clock, 0, sizeof(*best_clock));
780
781         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
782
783         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
784              clock.m1++) {
785                 for (clock.m2 = limit->m2.min;
786                      clock.m2 <= limit->m2.max; clock.m2++) {
787                         for (clock.n = limit->n.min;
788                              clock.n <= limit->n.max; clock.n++) {
789                                 for (clock.p1 = limit->p1.min;
790                                         clock.p1 <= limit->p1.max; clock.p1++) {
791                                         int this_err;
792
793                                         pnv_calc_dpll_params(refclk, &clock);
794                                         if (!intel_PLL_is_valid(to_i915(dev),
795                                                                 limit,
796                                                                 &clock))
797                                                 continue;
798                                         if (match_clock &&
799                                             clock.p != match_clock->p)
800                                                 continue;
801
802                                         this_err = abs(clock.dot - target);
803                                         if (this_err < err) {
804                                                 *best_clock = clock;
805                                                 err = this_err;
806                                         }
807                                 }
808                         }
809                 }
810         }
811
812         return (err != target);
813 }
814
815 /*
816  * Returns a set of divisors for the desired target clock with the given
817  * refclk, or FALSE.  The returned values represent the clock equation:
818  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
819  *
820  * Target and reference clocks are specified in kHz.
821  *
822  * If match_clock is provided, then best_clock P divider must match the P
823  * divider from @match_clock used for LVDS downclocking.
824  */
825 static bool
826 g4x_find_best_dpll(const struct intel_limit *limit,
827                    struct intel_crtc_state *crtc_state,
828                    int target, int refclk, struct dpll *match_clock,
829                    struct dpll *best_clock)
830 {
831         struct drm_device *dev = crtc_state->uapi.crtc->dev;
832         struct dpll clock;
833         int max_n;
834         bool found = false;
835         /* approximately equals target * 0.00585 */
836         int err_most = (target >> 8) + (target >> 9);
837
838         memset(best_clock, 0, sizeof(*best_clock));
839
840         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
841
842         max_n = limit->n.max;
843         /* based on hardware requirement, prefer smaller n to precision */
844         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
845                 /* based on hardware requirement, prefere larger m1,m2 */
846                 for (clock.m1 = limit->m1.max;
847                      clock.m1 >= limit->m1.min; clock.m1--) {
848                         for (clock.m2 = limit->m2.max;
849                              clock.m2 >= limit->m2.min; clock.m2--) {
850                                 for (clock.p1 = limit->p1.max;
851                                      clock.p1 >= limit->p1.min; clock.p1--) {
852                                         int this_err;
853
854                                         i9xx_calc_dpll_params(refclk, &clock);
855                                         if (!intel_PLL_is_valid(to_i915(dev),
856                                                                 limit,
857                                                                 &clock))
858                                                 continue;
859
860                                         this_err = abs(clock.dot - target);
861                                         if (this_err < err_most) {
862                                                 *best_clock = clock;
863                                                 err_most = this_err;
864                                                 max_n = clock.n;
865                                                 found = true;
866                                         }
867                                 }
868                         }
869                 }
870         }
871         return found;
872 }
873
874 /*
875  * Check if the calculated PLL configuration is more optimal compared to the
876  * best configuration and error found so far. Return the calculated error.
877  */
878 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
879                                const struct dpll *calculated_clock,
880                                const struct dpll *best_clock,
881                                unsigned int best_error_ppm,
882                                unsigned int *error_ppm)
883 {
884         /*
885          * For CHV ignore the error and consider only the P value.
886          * Prefer a bigger P value based on HW requirements.
887          */
888         if (IS_CHERRYVIEW(to_i915(dev))) {
889                 *error_ppm = 0;
890
891                 return calculated_clock->p > best_clock->p;
892         }
893
894         if (WARN_ON_ONCE(!target_freq))
895                 return false;
896
897         *error_ppm = div_u64(1000000ULL *
898                                 abs(target_freq - calculated_clock->dot),
899                              target_freq);
900         /*
901          * Prefer a better P value over a better (smaller) error if the error
902          * is small. Ensure this preference for future configurations too by
903          * setting the error to 0.
904          */
905         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
906                 *error_ppm = 0;
907
908                 return true;
909         }
910
911         return *error_ppm + 10 < best_error_ppm;
912 }
913
914 /*
915  * Returns a set of divisors for the desired target clock with the given
916  * refclk, or FALSE.  The returned values represent the clock equation:
917  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
918  */
919 static bool
920 vlv_find_best_dpll(const struct intel_limit *limit,
921                    struct intel_crtc_state *crtc_state,
922                    int target, int refclk, struct dpll *match_clock,
923                    struct dpll *best_clock)
924 {
925         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
926         struct drm_device *dev = crtc->base.dev;
927         struct dpll clock;
928         unsigned int bestppm = 1000000;
929         /* min update 19.2 MHz */
930         int max_n = min(limit->n.max, refclk / 19200);
931         bool found = false;
932
933         target *= 5; /* fast clock */
934
935         memset(best_clock, 0, sizeof(*best_clock));
936
937         /* based on hardware requirement, prefer smaller n to precision */
938         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
939                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
940                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
941                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
942                                 clock.p = clock.p1 * clock.p2;
943                                 /* based on hardware requirement, prefer bigger m1,m2 values */
944                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
945                                         unsigned int ppm;
946
947                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
948                                                                      refclk * clock.m1);
949
950                                         vlv_calc_dpll_params(refclk, &clock);
951
952                                         if (!intel_PLL_is_valid(to_i915(dev),
953                                                                 limit,
954                                                                 &clock))
955                                                 continue;
956
957                                         if (!vlv_PLL_is_optimal(dev, target,
958                                                                 &clock,
959                                                                 best_clock,
960                                                                 bestppm, &ppm))
961                                                 continue;
962
963                                         *best_clock = clock;
964                                         bestppm = ppm;
965                                         found = true;
966                                 }
967                         }
968                 }
969         }
970
971         return found;
972 }
973
974 /*
975  * Returns a set of divisors for the desired target clock with the given
976  * refclk, or FALSE.  The returned values represent the clock equation:
977  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
978  */
979 static bool
980 chv_find_best_dpll(const struct intel_limit *limit,
981                    struct intel_crtc_state *crtc_state,
982                    int target, int refclk, struct dpll *match_clock,
983                    struct dpll *best_clock)
984 {
985         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
986         struct drm_device *dev = crtc->base.dev;
987         unsigned int best_error_ppm;
988         struct dpll clock;
989         u64 m2;
990         int found = false;
991
992         memset(best_clock, 0, sizeof(*best_clock));
993         best_error_ppm = 1000000;
994
995         /*
996          * Based on hardware doc, the n always set to 1, and m1 always
997          * set to 2.  If requires to support 200Mhz refclk, we need to
998          * revisit this because n may not 1 anymore.
999          */
1000         clock.n = 1, clock.m1 = 2;
1001         target *= 5;    /* fast clock */
1002
1003         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1004                 for (clock.p2 = limit->p2.p2_fast;
1005                                 clock.p2 >= limit->p2.p2_slow;
1006                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1007                         unsigned int error_ppm;
1008
1009                         clock.p = clock.p1 * clock.p2;
1010
1011                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1012                                                    refclk * clock.m1);
1013
1014                         if (m2 > INT_MAX/clock.m1)
1015                                 continue;
1016
1017                         clock.m2 = m2;
1018
1019                         chv_calc_dpll_params(refclk, &clock);
1020
1021                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1022                                 continue;
1023
1024                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1025                                                 best_error_ppm, &error_ppm))
1026                                 continue;
1027
1028                         *best_clock = clock;
1029                         best_error_ppm = error_ppm;
1030                         found = true;
1031                 }
1032         }
1033
1034         return found;
1035 }
1036
1037 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1038                         struct dpll *best_clock)
1039 {
1040         int refclk = 100000;
1041         const struct intel_limit *limit = &intel_limits_bxt;
1042
1043         return chv_find_best_dpll(limit, crtc_state,
1044                                   crtc_state->port_clock, refclk,
1045                                   NULL, best_clock);
1046 }
1047
1048 bool intel_crtc_active(struct intel_crtc *crtc)
1049 {
1050         /* Be paranoid as we can arrive here with only partial
1051          * state retrieved from the hardware during setup.
1052          *
1053          * We can ditch the adjusted_mode.crtc_clock check as soon
1054          * as Haswell has gained clock readout/fastboot support.
1055          *
1056          * We can ditch the crtc->primary->state->fb check as soon as we can
1057          * properly reconstruct framebuffers.
1058          *
1059          * FIXME: The intel_crtc->active here should be switched to
1060          * crtc->state->active once we have proper CRTC states wired up
1061          * for atomic.
1062          */
1063         return crtc->active && crtc->base.primary->state->fb &&
1064                 crtc->config->hw.adjusted_mode.crtc_clock;
1065 }
1066
1067 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1068                                              enum pipe pipe)
1069 {
1070         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1071
1072         return crtc->config->cpu_transcoder;
1073 }
1074
1075 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1076                                     enum pipe pipe)
1077 {
1078         i915_reg_t reg = PIPEDSL(pipe);
1079         u32 line1, line2;
1080         u32 line_mask;
1081
1082         if (IS_GEN(dev_priv, 2))
1083                 line_mask = DSL_LINEMASK_GEN2;
1084         else
1085                 line_mask = DSL_LINEMASK_GEN3;
1086
1087         line1 = I915_READ(reg) & line_mask;
1088         msleep(5);
1089         line2 = I915_READ(reg) & line_mask;
1090
1091         return line1 != line2;
1092 }
1093
1094 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1095 {
1096         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1097         enum pipe pipe = crtc->pipe;
1098
1099         /* Wait for the display line to settle/start moving */
1100         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1101                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1102                           pipe_name(pipe), onoff(state));
1103 }
1104
1105 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1106 {
1107         wait_for_pipe_scanline_moving(crtc, false);
1108 }
1109
1110 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1111 {
1112         wait_for_pipe_scanline_moving(crtc, true);
1113 }
1114
1115 static void
1116 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1117 {
1118         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1119         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1120
1121         if (INTEL_GEN(dev_priv) >= 4) {
1122                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1123                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1124
1125                 /* Wait for the Pipe State to go off */
1126                 if (intel_de_wait_for_clear(dev_priv, reg,
1127                                             I965_PIPECONF_ACTIVE, 100))
1128                         WARN(1, "pipe_off wait timed out\n");
1129         } else {
1130                 intel_wait_for_pipe_scanline_stopped(crtc);
1131         }
1132 }
1133
1134 /* Only for pre-ILK configs */
1135 void assert_pll(struct drm_i915_private *dev_priv,
1136                 enum pipe pipe, bool state)
1137 {
1138         u32 val;
1139         bool cur_state;
1140
1141         val = I915_READ(DPLL(pipe));
1142         cur_state = !!(val & DPLL_VCO_ENABLE);
1143         I915_STATE_WARN(cur_state != state,
1144              "PLL state assertion failure (expected %s, current %s)\n",
1145                         onoff(state), onoff(cur_state));
1146 }
1147
1148 /* XXX: the dsi pll is shared between MIPI DSI ports */
1149 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1150 {
1151         u32 val;
1152         bool cur_state;
1153
1154         vlv_cck_get(dev_priv);
1155         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1156         vlv_cck_put(dev_priv);
1157
1158         cur_state = val & DSI_PLL_VCO_EN;
1159         I915_STATE_WARN(cur_state != state,
1160              "DSI PLL state assertion failure (expected %s, current %s)\n",
1161                         onoff(state), onoff(cur_state));
1162 }
1163
1164 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1165                           enum pipe pipe, bool state)
1166 {
1167         bool cur_state;
1168         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1169                                                                       pipe);
1170
1171         if (HAS_DDI(dev_priv)) {
1172                 /* DDI does not have a specific FDI_TX register */
1173                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1174                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1175         } else {
1176                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1177                 cur_state = !!(val & FDI_TX_ENABLE);
1178         }
1179         I915_STATE_WARN(cur_state != state,
1180              "FDI TX state assertion failure (expected %s, current %s)\n",
1181                         onoff(state), onoff(cur_state));
1182 }
1183 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1184 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1185
1186 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1187                           enum pipe pipe, bool state)
1188 {
1189         u32 val;
1190         bool cur_state;
1191
1192         val = I915_READ(FDI_RX_CTL(pipe));
1193         cur_state = !!(val & FDI_RX_ENABLE);
1194         I915_STATE_WARN(cur_state != state,
1195              "FDI RX state assertion failure (expected %s, current %s)\n",
1196                         onoff(state), onoff(cur_state));
1197 }
1198 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1199 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1200
1201 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1202                                       enum pipe pipe)
1203 {
1204         u32 val;
1205
1206         /* ILK FDI PLL is always enabled */
1207         if (IS_GEN(dev_priv, 5))
1208                 return;
1209
1210         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1211         if (HAS_DDI(dev_priv))
1212                 return;
1213
1214         val = I915_READ(FDI_TX_CTL(pipe));
1215         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1216 }
1217
1218 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1219                        enum pipe pipe, bool state)
1220 {
1221         u32 val;
1222         bool cur_state;
1223
1224         val = I915_READ(FDI_RX_CTL(pipe));
1225         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1226         I915_STATE_WARN(cur_state != state,
1227              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1228                         onoff(state), onoff(cur_state));
1229 }
1230
1231 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1232 {
1233         i915_reg_t pp_reg;
1234         u32 val;
1235         enum pipe panel_pipe = INVALID_PIPE;
1236         bool locked = true;
1237
1238         if (WARN_ON(HAS_DDI(dev_priv)))
1239                 return;
1240
1241         if (HAS_PCH_SPLIT(dev_priv)) {
1242                 u32 port_sel;
1243
1244                 pp_reg = PP_CONTROL(0);
1245                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1246
1247                 switch (port_sel) {
1248                 case PANEL_PORT_SELECT_LVDS:
1249                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1250                         break;
1251                 case PANEL_PORT_SELECT_DPA:
1252                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1253                         break;
1254                 case PANEL_PORT_SELECT_DPC:
1255                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1256                         break;
1257                 case PANEL_PORT_SELECT_DPD:
1258                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1259                         break;
1260                 default:
1261                         MISSING_CASE(port_sel);
1262                         break;
1263                 }
1264         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1265                 /* presumably write lock depends on pipe, not port select */
1266                 pp_reg = PP_CONTROL(pipe);
1267                 panel_pipe = pipe;
1268         } else {
1269                 u32 port_sel;
1270
1271                 pp_reg = PP_CONTROL(0);
1272                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1273
1274                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1275                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1276         }
1277
1278         val = I915_READ(pp_reg);
1279         if (!(val & PANEL_POWER_ON) ||
1280             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1281                 locked = false;
1282
1283         I915_STATE_WARN(panel_pipe == pipe && locked,
1284              "panel assertion failure, pipe %c regs locked\n",
1285              pipe_name(pipe));
1286 }
1287
1288 void assert_pipe(struct drm_i915_private *dev_priv,
1289                  enum pipe pipe, bool state)
1290 {
1291         bool cur_state;
1292         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1293                                                                       pipe);
1294         enum intel_display_power_domain power_domain;
1295         intel_wakeref_t wakeref;
1296
1297         /* we keep both pipes enabled on 830 */
1298         if (IS_I830(dev_priv))
1299                 state = true;
1300
1301         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1302         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1303         if (wakeref) {
1304                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1305                 cur_state = !!(val & PIPECONF_ENABLE);
1306
1307                 intel_display_power_put(dev_priv, power_domain, wakeref);
1308         } else {
1309                 cur_state = false;
1310         }
1311
1312         I915_STATE_WARN(cur_state != state,
1313              "pipe %c assertion failure (expected %s, current %s)\n",
1314                         pipe_name(pipe), onoff(state), onoff(cur_state));
1315 }
1316
1317 static void assert_plane(struct intel_plane *plane, bool state)
1318 {
1319         enum pipe pipe;
1320         bool cur_state;
1321
1322         cur_state = plane->get_hw_state(plane, &pipe);
1323
1324         I915_STATE_WARN(cur_state != state,
1325                         "%s assertion failure (expected %s, current %s)\n",
1326                         plane->base.name, onoff(state), onoff(cur_state));
1327 }
1328
1329 #define assert_plane_enabled(p) assert_plane(p, true)
1330 #define assert_plane_disabled(p) assert_plane(p, false)
1331
1332 static void assert_planes_disabled(struct intel_crtc *crtc)
1333 {
1334         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1335         struct intel_plane *plane;
1336
1337         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1338                 assert_plane_disabled(plane);
1339 }
1340
1341 static void assert_vblank_disabled(struct drm_crtc *crtc)
1342 {
1343         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1344                 drm_crtc_vblank_put(crtc);
1345 }
1346
1347 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1348                                     enum pipe pipe)
1349 {
1350         u32 val;
1351         bool enabled;
1352
1353         val = I915_READ(PCH_TRANSCONF(pipe));
1354         enabled = !!(val & TRANS_ENABLE);
1355         I915_STATE_WARN(enabled,
1356              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1357              pipe_name(pipe));
1358 }
1359
1360 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1361                                    enum pipe pipe, enum port port,
1362                                    i915_reg_t dp_reg)
1363 {
1364         enum pipe port_pipe;
1365         bool state;
1366
1367         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1368
1369         I915_STATE_WARN(state && port_pipe == pipe,
1370                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1371                         port_name(port), pipe_name(pipe));
1372
1373         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1374                         "IBX PCH DP %c still using transcoder B\n",
1375                         port_name(port));
1376 }
1377
1378 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1379                                      enum pipe pipe, enum port port,
1380                                      i915_reg_t hdmi_reg)
1381 {
1382         enum pipe port_pipe;
1383         bool state;
1384
1385         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1386
1387         I915_STATE_WARN(state && port_pipe == pipe,
1388                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1389                         port_name(port), pipe_name(pipe));
1390
1391         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1392                         "IBX PCH HDMI %c still using transcoder B\n",
1393                         port_name(port));
1394 }
1395
1396 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1397                                       enum pipe pipe)
1398 {
1399         enum pipe port_pipe;
1400
1401         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1402         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1403         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1404
1405         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1406                         port_pipe == pipe,
1407                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1408                         pipe_name(pipe));
1409
1410         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1411                         port_pipe == pipe,
1412                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1413                         pipe_name(pipe));
1414
1415         /* PCH SDVOB multiplex with HDMIB */
1416         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1417         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1418         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1419 }
1420
1421 static void _vlv_enable_pll(struct intel_crtc *crtc,
1422                             const struct intel_crtc_state *pipe_config)
1423 {
1424         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1425         enum pipe pipe = crtc->pipe;
1426
1427         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1428         POSTING_READ(DPLL(pipe));
1429         udelay(150);
1430
1431         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1432                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1433 }
1434
1435 static void vlv_enable_pll(struct intel_crtc *crtc,
1436                            const struct intel_crtc_state *pipe_config)
1437 {
1438         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1439         enum pipe pipe = crtc->pipe;
1440
1441         assert_pipe_disabled(dev_priv, pipe);
1442
1443         /* PLL is protected by panel, make sure we can write it */
1444         assert_panel_unlocked(dev_priv, pipe);
1445
1446         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1447                 _vlv_enable_pll(crtc, pipe_config);
1448
1449         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1450         POSTING_READ(DPLL_MD(pipe));
1451 }
1452
1453
1454 static void _chv_enable_pll(struct intel_crtc *crtc,
1455                             const struct intel_crtc_state *pipe_config)
1456 {
1457         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1458         enum pipe pipe = crtc->pipe;
1459         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1460         u32 tmp;
1461
1462         vlv_dpio_get(dev_priv);
1463
1464         /* Enable back the 10bit clock to display controller */
1465         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1466         tmp |= DPIO_DCLKP_EN;
1467         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1468
1469         vlv_dpio_put(dev_priv);
1470
1471         /*
1472          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1473          */
1474         udelay(1);
1475
1476         /* Enable PLL */
1477         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1478
1479         /* Check PLL is locked */
1480         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1481                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1482 }
1483
1484 static void chv_enable_pll(struct intel_crtc *crtc,
1485                            const struct intel_crtc_state *pipe_config)
1486 {
1487         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1488         enum pipe pipe = crtc->pipe;
1489
1490         assert_pipe_disabled(dev_priv, pipe);
1491
1492         /* PLL is protected by panel, make sure we can write it */
1493         assert_panel_unlocked(dev_priv, pipe);
1494
1495         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1496                 _chv_enable_pll(crtc, pipe_config);
1497
1498         if (pipe != PIPE_A) {
1499                 /*
1500                  * WaPixelRepeatModeFixForC0:chv
1501                  *
1502                  * DPLLCMD is AWOL. Use chicken bits to propagate
1503                  * the value from DPLLBMD to either pipe B or C.
1504                  */
1505                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1506                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1507                 I915_WRITE(CBR4_VLV, 0);
1508                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1509
1510                 /*
1511                  * DPLLB VGA mode also seems to cause problems.
1512                  * We should always have it disabled.
1513                  */
1514                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1515         } else {
1516                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1517                 POSTING_READ(DPLL_MD(pipe));
1518         }
1519 }
1520
1521 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1522 {
1523         if (IS_I830(dev_priv))
1524                 return false;
1525
1526         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1527 }
1528
1529 static void i9xx_enable_pll(struct intel_crtc *crtc,
1530                             const struct intel_crtc_state *crtc_state)
1531 {
1532         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1533         i915_reg_t reg = DPLL(crtc->pipe);
1534         u32 dpll = crtc_state->dpll_hw_state.dpll;
1535         int i;
1536
1537         assert_pipe_disabled(dev_priv, crtc->pipe);
1538
1539         /* PLL is protected by panel, make sure we can write it */
1540         if (i9xx_has_pps(dev_priv))
1541                 assert_panel_unlocked(dev_priv, crtc->pipe);
1542
1543         /*
1544          * Apparently we need to have VGA mode enabled prior to changing
1545          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1546          * dividers, even though the register value does change.
1547          */
1548         I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1549         I915_WRITE(reg, dpll);
1550
1551         /* Wait for the clocks to stabilize. */
1552         POSTING_READ(reg);
1553         udelay(150);
1554
1555         if (INTEL_GEN(dev_priv) >= 4) {
1556                 I915_WRITE(DPLL_MD(crtc->pipe),
1557                            crtc_state->dpll_hw_state.dpll_md);
1558         } else {
1559                 /* The pixel multiplier can only be updated once the
1560                  * DPLL is enabled and the clocks are stable.
1561                  *
1562                  * So write it again.
1563                  */
1564                 I915_WRITE(reg, dpll);
1565         }
1566
1567         /* We do this three times for luck */
1568         for (i = 0; i < 3; i++) {
1569                 I915_WRITE(reg, dpll);
1570                 POSTING_READ(reg);
1571                 udelay(150); /* wait for warmup */
1572         }
1573 }
1574
1575 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1576 {
1577         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1578         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1579         enum pipe pipe = crtc->pipe;
1580
1581         /* Don't disable pipe or pipe PLLs if needed */
1582         if (IS_I830(dev_priv))
1583                 return;
1584
1585         /* Make sure the pipe isn't still relying on us */
1586         assert_pipe_disabled(dev_priv, pipe);
1587
1588         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1589         POSTING_READ(DPLL(pipe));
1590 }
1591
1592 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1593 {
1594         u32 val;
1595
1596         /* Make sure the pipe isn't still relying on us */
1597         assert_pipe_disabled(dev_priv, pipe);
1598
1599         val = DPLL_INTEGRATED_REF_CLK_VLV |
1600                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1601         if (pipe != PIPE_A)
1602                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1603
1604         I915_WRITE(DPLL(pipe), val);
1605         POSTING_READ(DPLL(pipe));
1606 }
1607
1608 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1609 {
1610         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1611         u32 val;
1612
1613         /* Make sure the pipe isn't still relying on us */
1614         assert_pipe_disabled(dev_priv, pipe);
1615
1616         val = DPLL_SSC_REF_CLK_CHV |
1617                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1618         if (pipe != PIPE_A)
1619                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1620
1621         I915_WRITE(DPLL(pipe), val);
1622         POSTING_READ(DPLL(pipe));
1623
1624         vlv_dpio_get(dev_priv);
1625
1626         /* Disable 10bit clock to display controller */
1627         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1628         val &= ~DPIO_DCLKP_EN;
1629         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1630
1631         vlv_dpio_put(dev_priv);
1632 }
1633
1634 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1635                          struct intel_digital_port *dport,
1636                          unsigned int expected_mask)
1637 {
1638         u32 port_mask;
1639         i915_reg_t dpll_reg;
1640
1641         switch (dport->base.port) {
1642         case PORT_B:
1643                 port_mask = DPLL_PORTB_READY_MASK;
1644                 dpll_reg = DPLL(0);
1645                 break;
1646         case PORT_C:
1647                 port_mask = DPLL_PORTC_READY_MASK;
1648                 dpll_reg = DPLL(0);
1649                 expected_mask <<= 4;
1650                 break;
1651         case PORT_D:
1652                 port_mask = DPLL_PORTD_READY_MASK;
1653                 dpll_reg = DPIO_PHY_STATUS;
1654                 break;
1655         default:
1656                 BUG();
1657         }
1658
1659         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1660                                        port_mask, expected_mask, 1000))
1661                 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1662                      dport->base.base.base.id, dport->base.base.name,
1663                      I915_READ(dpll_reg) & port_mask, expected_mask);
1664 }
1665
1666 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1667 {
1668         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1669         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1670         enum pipe pipe = crtc->pipe;
1671         i915_reg_t reg;
1672         u32 val, pipeconf_val;
1673
1674         /* Make sure PCH DPLL is enabled */
1675         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1676
1677         /* FDI must be feeding us bits for PCH ports */
1678         assert_fdi_tx_enabled(dev_priv, pipe);
1679         assert_fdi_rx_enabled(dev_priv, pipe);
1680
1681         if (HAS_PCH_CPT(dev_priv)) {
1682                 reg = TRANS_CHICKEN2(pipe);
1683                 val = I915_READ(reg);
1684                 /*
1685                  * Workaround: Set the timing override bit
1686                  * before enabling the pch transcoder.
1687                  */
1688                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1689                 /* Configure frame start delay to match the CPU */
1690                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1691                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1692                 I915_WRITE(reg, val);
1693         }
1694
1695         reg = PCH_TRANSCONF(pipe);
1696         val = I915_READ(reg);
1697         pipeconf_val = I915_READ(PIPECONF(pipe));
1698
1699         if (HAS_PCH_IBX(dev_priv)) {
1700                 /* Configure frame start delay to match the CPU */
1701                 val &= ~TRANS_FRAME_START_DELAY_MASK;
1702                 val |= TRANS_FRAME_START_DELAY(0);
1703
1704                 /*
1705                  * Make the BPC in transcoder be consistent with
1706                  * that in pipeconf reg. For HDMI we must use 8bpc
1707                  * here for both 8bpc and 12bpc.
1708                  */
1709                 val &= ~PIPECONF_BPC_MASK;
1710                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1711                         val |= PIPECONF_8BPC;
1712                 else
1713                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1714         }
1715
1716         val &= ~TRANS_INTERLACE_MASK;
1717         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1718                 if (HAS_PCH_IBX(dev_priv) &&
1719                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1720                         val |= TRANS_LEGACY_INTERLACED_ILK;
1721                 else
1722                         val |= TRANS_INTERLACED;
1723         } else {
1724                 val |= TRANS_PROGRESSIVE;
1725         }
1726
1727         I915_WRITE(reg, val | TRANS_ENABLE);
1728         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1729                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1730 }
1731
1732 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1733                                       enum transcoder cpu_transcoder)
1734 {
1735         u32 val, pipeconf_val;
1736
1737         /* FDI must be feeding us bits for PCH ports */
1738         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1739         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1740
1741         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1742         /* Workaround: set timing override bit. */
1743         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1744         /* Configure frame start delay to match the CPU */
1745         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1746         val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1747         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1748
1749         val = TRANS_ENABLE;
1750         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1751
1752         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1753             PIPECONF_INTERLACED_ILK)
1754                 val |= TRANS_INTERLACED;
1755         else
1756                 val |= TRANS_PROGRESSIVE;
1757
1758         I915_WRITE(LPT_TRANSCONF, val);
1759         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1760                                   TRANS_STATE_ENABLE, 100))
1761                 DRM_ERROR("Failed to enable PCH transcoder\n");
1762 }
1763
1764 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1765                                             enum pipe pipe)
1766 {
1767         i915_reg_t reg;
1768         u32 val;
1769
1770         /* FDI relies on the transcoder */
1771         assert_fdi_tx_disabled(dev_priv, pipe);
1772         assert_fdi_rx_disabled(dev_priv, pipe);
1773
1774         /* Ports must be off as well */
1775         assert_pch_ports_disabled(dev_priv, pipe);
1776
1777         reg = PCH_TRANSCONF(pipe);
1778         val = I915_READ(reg);
1779         val &= ~TRANS_ENABLE;
1780         I915_WRITE(reg, val);
1781         /* wait for PCH transcoder off, transcoder state */
1782         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1783                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1784
1785         if (HAS_PCH_CPT(dev_priv)) {
1786                 /* Workaround: Clear the timing override chicken bit again. */
1787                 reg = TRANS_CHICKEN2(pipe);
1788                 val = I915_READ(reg);
1789                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1790                 I915_WRITE(reg, val);
1791         }
1792 }
1793
1794 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1795 {
1796         u32 val;
1797
1798         val = I915_READ(LPT_TRANSCONF);
1799         val &= ~TRANS_ENABLE;
1800         I915_WRITE(LPT_TRANSCONF, val);
1801         /* wait for PCH transcoder off, transcoder state */
1802         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1803                                     TRANS_STATE_ENABLE, 50))
1804                 DRM_ERROR("Failed to disable PCH transcoder\n");
1805
1806         /* Workaround: clear timing override bit. */
1807         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1808         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1809         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1810 }
1811
1812 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1813 {
1814         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1815
1816         if (HAS_PCH_LPT(dev_priv))
1817                 return PIPE_A;
1818         else
1819                 return crtc->pipe;
1820 }
1821
1822 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1823 {
1824         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1825
1826         /*
1827          * On i965gm the hardware frame counter reads
1828          * zero when the TV encoder is enabled :(
1829          */
1830         if (IS_I965GM(dev_priv) &&
1831             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1832                 return 0;
1833
1834         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1835                 return 0xffffffff; /* full 32 bit counter */
1836         else if (INTEL_GEN(dev_priv) >= 3)
1837                 return 0xffffff; /* only 24 bits of frame count */
1838         else
1839                 return 0; /* Gen2 doesn't have a hardware frame counter */
1840 }
1841
1842 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1843 {
1844         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1845
1846         assert_vblank_disabled(&crtc->base);
1847         drm_crtc_set_max_vblank_count(&crtc->base,
1848                                       intel_crtc_max_vblank_count(crtc_state));
1849         drm_crtc_vblank_on(&crtc->base);
1850 }
1851
1852 static void intel_crtc_vblank_off(struct intel_crtc *crtc)
1853 {
1854         drm_crtc_vblank_off(&crtc->base);
1855         assert_vblank_disabled(&crtc->base);
1856 }
1857
1858 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1859 {
1860         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1861         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1862         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1863         enum pipe pipe = crtc->pipe;
1864         i915_reg_t reg;
1865         u32 val;
1866
1867         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1868
1869         assert_planes_disabled(crtc);
1870
1871         /*
1872          * A pipe without a PLL won't actually be able to drive bits from
1873          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1874          * need the check.
1875          */
1876         if (HAS_GMCH(dev_priv)) {
1877                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1878                         assert_dsi_pll_enabled(dev_priv);
1879                 else
1880                         assert_pll_enabled(dev_priv, pipe);
1881         } else {
1882                 if (new_crtc_state->has_pch_encoder) {
1883                         /* if driving the PCH, we need FDI enabled */
1884                         assert_fdi_rx_pll_enabled(dev_priv,
1885                                                   intel_crtc_pch_transcoder(crtc));
1886                         assert_fdi_tx_pll_enabled(dev_priv,
1887                                                   (enum pipe) cpu_transcoder);
1888                 }
1889                 /* FIXME: assert CPU port conditions for SNB+ */
1890         }
1891
1892         trace_intel_pipe_enable(crtc);
1893
1894         reg = PIPECONF(cpu_transcoder);
1895         val = I915_READ(reg);
1896         if (val & PIPECONF_ENABLE) {
1897                 /* we keep both pipes enabled on 830 */
1898                 WARN_ON(!IS_I830(dev_priv));
1899                 return;
1900         }
1901
1902         I915_WRITE(reg, val | PIPECONF_ENABLE);
1903         POSTING_READ(reg);
1904
1905         /*
1906          * Until the pipe starts PIPEDSL reads will return a stale value,
1907          * which causes an apparent vblank timestamp jump when PIPEDSL
1908          * resets to its proper value. That also messes up the frame count
1909          * when it's derived from the timestamps. So let's wait for the
1910          * pipe to start properly before we call drm_crtc_vblank_on()
1911          */
1912         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1913                 intel_wait_for_pipe_scanline_moving(crtc);
1914 }
1915
1916 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1917 {
1918         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1919         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1920         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1921         enum pipe pipe = crtc->pipe;
1922         i915_reg_t reg;
1923         u32 val;
1924
1925         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1926
1927         /*
1928          * Make sure planes won't keep trying to pump pixels to us,
1929          * or we might hang the display.
1930          */
1931         assert_planes_disabled(crtc);
1932
1933         trace_intel_pipe_disable(crtc);
1934
1935         reg = PIPECONF(cpu_transcoder);
1936         val = I915_READ(reg);
1937         if ((val & PIPECONF_ENABLE) == 0)
1938                 return;
1939
1940         /*
1941          * Double wide has implications for planes
1942          * so best keep it disabled when not needed.
1943          */
1944         if (old_crtc_state->double_wide)
1945                 val &= ~PIPECONF_DOUBLE_WIDE;
1946
1947         /* Don't disable pipe or pipe PLLs if needed */
1948         if (!IS_I830(dev_priv))
1949                 val &= ~PIPECONF_ENABLE;
1950
1951         I915_WRITE(reg, val);
1952         if ((val & PIPECONF_ENABLE) == 0)
1953                 intel_wait_for_pipe_off(old_crtc_state);
1954 }
1955
1956 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1957 {
1958         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1959 }
1960
1961 static unsigned int
1962 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1963 {
1964         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1965         unsigned int cpp = fb->format->cpp[color_plane];
1966
1967         switch (fb->modifier) {
1968         case DRM_FORMAT_MOD_LINEAR:
1969                 return intel_tile_size(dev_priv);
1970         case I915_FORMAT_MOD_X_TILED:
1971                 if (IS_GEN(dev_priv, 2))
1972                         return 128;
1973                 else
1974                         return 512;
1975         case I915_FORMAT_MOD_Y_TILED_CCS:
1976                 if (color_plane == 1)
1977                         return 128;
1978                 /* fall through */
1979         case I915_FORMAT_MOD_Y_TILED:
1980                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1981                         return 128;
1982                 else
1983                         return 512;
1984         case I915_FORMAT_MOD_Yf_TILED_CCS:
1985                 if (color_plane == 1)
1986                         return 128;
1987                 /* fall through */
1988         case I915_FORMAT_MOD_Yf_TILED:
1989                 switch (cpp) {
1990                 case 1:
1991                         return 64;
1992                 case 2:
1993                 case 4:
1994                         return 128;
1995                 case 8:
1996                 case 16:
1997                         return 256;
1998                 default:
1999                         MISSING_CASE(cpp);
2000                         return cpp;
2001                 }
2002                 break;
2003         default:
2004                 MISSING_CASE(fb->modifier);
2005                 return cpp;
2006         }
2007 }
2008
2009 static unsigned int
2010 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2011 {
2012         return intel_tile_size(to_i915(fb->dev)) /
2013                 intel_tile_width_bytes(fb, color_plane);
2014 }
2015
2016 /* Return the tile dimensions in pixel units */
2017 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2018                             unsigned int *tile_width,
2019                             unsigned int *tile_height)
2020 {
2021         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2022         unsigned int cpp = fb->format->cpp[color_plane];
2023
2024         *tile_width = tile_width_bytes / cpp;
2025         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
2026 }
2027
2028 unsigned int
2029 intel_fb_align_height(const struct drm_framebuffer *fb,
2030                       int color_plane, unsigned int height)
2031 {
2032         unsigned int tile_height = intel_tile_height(fb, color_plane);
2033
2034         return ALIGN(height, tile_height);
2035 }
2036
2037 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2038 {
2039         unsigned int size = 0;
2040         int i;
2041
2042         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2043                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2044
2045         return size;
2046 }
2047
2048 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2049 {
2050         unsigned int size = 0;
2051         int i;
2052
2053         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2054                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2055
2056         return size;
2057 }
2058
2059 static void
2060 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2061                         const struct drm_framebuffer *fb,
2062                         unsigned int rotation)
2063 {
2064         view->type = I915_GGTT_VIEW_NORMAL;
2065         if (drm_rotation_90_or_270(rotation)) {
2066                 view->type = I915_GGTT_VIEW_ROTATED;
2067                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2068         }
2069 }
2070
2071 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2072 {
2073         if (IS_I830(dev_priv))
2074                 return 16 * 1024;
2075         else if (IS_I85X(dev_priv))
2076                 return 256;
2077         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2078                 return 32;
2079         else
2080                 return 4 * 1024;
2081 }
2082
2083 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2084 {
2085         if (INTEL_GEN(dev_priv) >= 9)
2086                 return 256 * 1024;
2087         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2088                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2089                 return 128 * 1024;
2090         else if (INTEL_GEN(dev_priv) >= 4)
2091                 return 4 * 1024;
2092         else
2093                 return 0;
2094 }
2095
2096 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2097                                          int color_plane)
2098 {
2099         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2100
2101         /* AUX_DIST needs only 4K alignment */
2102         if (color_plane == 1)
2103                 return 4096;
2104
2105         switch (fb->modifier) {
2106         case DRM_FORMAT_MOD_LINEAR:
2107                 return intel_linear_alignment(dev_priv);
2108         case I915_FORMAT_MOD_X_TILED:
2109                 if (INTEL_GEN(dev_priv) >= 9)
2110                         return 256 * 1024;
2111                 return 0;
2112         case I915_FORMAT_MOD_Y_TILED_CCS:
2113         case I915_FORMAT_MOD_Yf_TILED_CCS:
2114         case I915_FORMAT_MOD_Y_TILED:
2115         case I915_FORMAT_MOD_Yf_TILED:
2116                 return 1 * 1024 * 1024;
2117         default:
2118                 MISSING_CASE(fb->modifier);
2119                 return 0;
2120         }
2121 }
2122
2123 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2124 {
2125         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2126         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2127
2128         return INTEL_GEN(dev_priv) < 4 ||
2129                 (plane->has_fbc &&
2130                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2131 }
2132
2133 struct i915_vma *
2134 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2135                            const struct i915_ggtt_view *view,
2136                            bool uses_fence,
2137                            unsigned long *out_flags)
2138 {
2139         struct drm_device *dev = fb->dev;
2140         struct drm_i915_private *dev_priv = to_i915(dev);
2141         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2142         intel_wakeref_t wakeref;
2143         struct i915_vma *vma;
2144         unsigned int pinctl;
2145         u32 alignment;
2146
2147         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2148                 return ERR_PTR(-EINVAL);
2149
2150         alignment = intel_surf_alignment(fb, 0);
2151
2152         /* Note that the w/a also requires 64 PTE of padding following the
2153          * bo. We currently fill all unused PTE with the shadow page and so
2154          * we should always have valid PTE following the scanout preventing
2155          * the VT-d warning.
2156          */
2157         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2158                 alignment = 256 * 1024;
2159
2160         /*
2161          * Global gtt pte registers are special registers which actually forward
2162          * writes to a chunk of system memory. Which means that there is no risk
2163          * that the register values disappear as soon as we call
2164          * intel_runtime_pm_put(), so it is correct to wrap only the
2165          * pin/unpin/fence and not more.
2166          */
2167         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2168
2169         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2170
2171         /*
2172          * Valleyview is definitely limited to scanning out the first
2173          * 512MiB. Lets presume this behaviour was inherited from the
2174          * g4x display engine and that all earlier gen are similarly
2175          * limited. Testing suggests that it is a little more
2176          * complicated than this. For example, Cherryview appears quite
2177          * happy to scanout from anywhere within its global aperture.
2178          */
2179         pinctl = 0;
2180         if (HAS_GMCH(dev_priv))
2181                 pinctl |= PIN_MAPPABLE;
2182
2183         vma = i915_gem_object_pin_to_display_plane(obj,
2184                                                    alignment, view, pinctl);
2185         if (IS_ERR(vma))
2186                 goto err;
2187
2188         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2189                 int ret;
2190
2191                 /*
2192                  * Install a fence for tiled scan-out. Pre-i965 always needs a
2193                  * fence, whereas 965+ only requires a fence if using
2194                  * framebuffer compression.  For simplicity, we always, when
2195                  * possible, install a fence as the cost is not that onerous.
2196                  *
2197                  * If we fail to fence the tiled scanout, then either the
2198                  * modeset will reject the change (which is highly unlikely as
2199                  * the affected systems, all but one, do not have unmappable
2200                  * space) or we will not be able to enable full powersaving
2201                  * techniques (also likely not to apply due to various limits
2202                  * FBC and the like impose on the size of the buffer, which
2203                  * presumably we violated anyway with this unmappable buffer).
2204                  * Anyway, it is presumably better to stumble onwards with
2205                  * something and try to run the system in a "less than optimal"
2206                  * mode that matches the user configuration.
2207                  */
2208                 ret = i915_vma_pin_fence(vma);
2209                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2210                         i915_gem_object_unpin_from_display_plane(vma);
2211                         vma = ERR_PTR(ret);
2212                         goto err;
2213                 }
2214
2215                 if (ret == 0 && vma->fence)
2216                         *out_flags |= PLANE_HAS_FENCE;
2217         }
2218
2219         i915_vma_get(vma);
2220 err:
2221         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2222         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2223         return vma;
2224 }
2225
2226 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2227 {
2228         i915_gem_object_lock(vma->obj);
2229         if (flags & PLANE_HAS_FENCE)
2230                 i915_vma_unpin_fence(vma);
2231         i915_gem_object_unpin_from_display_plane(vma);
2232         i915_gem_object_unlock(vma->obj);
2233
2234         i915_vma_put(vma);
2235 }
2236
2237 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2238                           unsigned int rotation)
2239 {
2240         if (drm_rotation_90_or_270(rotation))
2241                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2242         else
2243                 return fb->pitches[color_plane];
2244 }
2245
2246 /*
2247  * Convert the x/y offsets into a linear offset.
2248  * Only valid with 0/180 degree rotation, which is fine since linear
2249  * offset is only used with linear buffers on pre-hsw and tiled buffers
2250  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2251  */
2252 u32 intel_fb_xy_to_linear(int x, int y,
2253                           const struct intel_plane_state *state,
2254                           int color_plane)
2255 {
2256         const struct drm_framebuffer *fb = state->hw.fb;
2257         unsigned int cpp = fb->format->cpp[color_plane];
2258         unsigned int pitch = state->color_plane[color_plane].stride;
2259
2260         return y * pitch + x * cpp;
2261 }
2262
2263 /*
2264  * Add the x/y offsets derived from fb->offsets[] to the user
2265  * specified plane src x/y offsets. The resulting x/y offsets
2266  * specify the start of scanout from the beginning of the gtt mapping.
2267  */
2268 void intel_add_fb_offsets(int *x, int *y,
2269                           const struct intel_plane_state *state,
2270                           int color_plane)
2271
2272 {
2273         *x += state->color_plane[color_plane].x;
2274         *y += state->color_plane[color_plane].y;
2275 }
2276
2277 static u32 intel_adjust_tile_offset(int *x, int *y,
2278                                     unsigned int tile_width,
2279                                     unsigned int tile_height,
2280                                     unsigned int tile_size,
2281                                     unsigned int pitch_tiles,
2282                                     u32 old_offset,
2283                                     u32 new_offset)
2284 {
2285         unsigned int pitch_pixels = pitch_tiles * tile_width;
2286         unsigned int tiles;
2287
2288         WARN_ON(old_offset & (tile_size - 1));
2289         WARN_ON(new_offset & (tile_size - 1));
2290         WARN_ON(new_offset > old_offset);
2291
2292         tiles = (old_offset - new_offset) / tile_size;
2293
2294         *y += tiles / pitch_tiles * tile_height;
2295         *x += tiles % pitch_tiles * tile_width;
2296
2297         /* minimize x in case it got needlessly big */
2298         *y += *x / pitch_pixels * tile_height;
2299         *x %= pitch_pixels;
2300
2301         return new_offset;
2302 }
2303
2304 static bool is_surface_linear(u64 modifier, int color_plane)
2305 {
2306         return modifier == DRM_FORMAT_MOD_LINEAR;
2307 }
2308
2309 static u32 intel_adjust_aligned_offset(int *x, int *y,
2310                                        const struct drm_framebuffer *fb,
2311                                        int color_plane,
2312                                        unsigned int rotation,
2313                                        unsigned int pitch,
2314                                        u32 old_offset, u32 new_offset)
2315 {
2316         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2317         unsigned int cpp = fb->format->cpp[color_plane];
2318
2319         WARN_ON(new_offset > old_offset);
2320
2321         if (!is_surface_linear(fb->modifier, color_plane)) {
2322                 unsigned int tile_size, tile_width, tile_height;
2323                 unsigned int pitch_tiles;
2324
2325                 tile_size = intel_tile_size(dev_priv);
2326                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2327
2328                 if (drm_rotation_90_or_270(rotation)) {
2329                         pitch_tiles = pitch / tile_height;
2330                         swap(tile_width, tile_height);
2331                 } else {
2332                         pitch_tiles = pitch / (tile_width * cpp);
2333                 }
2334
2335                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2336                                          tile_size, pitch_tiles,
2337                                          old_offset, new_offset);
2338         } else {
2339                 old_offset += *y * pitch + *x * cpp;
2340
2341                 *y = (old_offset - new_offset) / pitch;
2342                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2343         }
2344
2345         return new_offset;
2346 }
2347
2348 /*
2349  * Adjust the tile offset by moving the difference into
2350  * the x/y offsets.
2351  */
2352 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2353                                              const struct intel_plane_state *state,
2354                                              int color_plane,
2355                                              u32 old_offset, u32 new_offset)
2356 {
2357         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2358                                            state->hw.rotation,
2359                                            state->color_plane[color_plane].stride,
2360                                            old_offset, new_offset);
2361 }
2362
2363 /*
2364  * Computes the aligned offset to the base tile and adjusts
2365  * x, y. bytes per pixel is assumed to be a power-of-two.
2366  *
2367  * In the 90/270 rotated case, x and y are assumed
2368  * to be already rotated to match the rotated GTT view, and
2369  * pitch is the tile_height aligned framebuffer height.
2370  *
2371  * This function is used when computing the derived information
2372  * under intel_framebuffer, so using any of that information
2373  * here is not allowed. Anything under drm_framebuffer can be
2374  * used. This is why the user has to pass in the pitch since it
2375  * is specified in the rotated orientation.
2376  */
2377 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2378                                         int *x, int *y,
2379                                         const struct drm_framebuffer *fb,
2380                                         int color_plane,
2381                                         unsigned int pitch,
2382                                         unsigned int rotation,
2383                                         u32 alignment)
2384 {
2385         unsigned int cpp = fb->format->cpp[color_plane];
2386         u32 offset, offset_aligned;
2387
2388         if (alignment)
2389                 alignment--;
2390
2391         if (!is_surface_linear(fb->modifier, color_plane)) {
2392                 unsigned int tile_size, tile_width, tile_height;
2393                 unsigned int tile_rows, tiles, pitch_tiles;
2394
2395                 tile_size = intel_tile_size(dev_priv);
2396                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2397
2398                 if (drm_rotation_90_or_270(rotation)) {
2399                         pitch_tiles = pitch / tile_height;
2400                         swap(tile_width, tile_height);
2401                 } else {
2402                         pitch_tiles = pitch / (tile_width * cpp);
2403                 }
2404
2405                 tile_rows = *y / tile_height;
2406                 *y %= tile_height;
2407
2408                 tiles = *x / tile_width;
2409                 *x %= tile_width;
2410
2411                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2412                 offset_aligned = offset & ~alignment;
2413
2414                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2415                                          tile_size, pitch_tiles,
2416                                          offset, offset_aligned);
2417         } else {
2418                 offset = *y * pitch + *x * cpp;
2419                 offset_aligned = offset & ~alignment;
2420
2421                 *y = (offset & alignment) / pitch;
2422                 *x = ((offset & alignment) - *y * pitch) / cpp;
2423         }
2424
2425         return offset_aligned;
2426 }
2427
2428 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2429                                               const struct intel_plane_state *state,
2430                                               int color_plane)
2431 {
2432         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2433         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2434         const struct drm_framebuffer *fb = state->hw.fb;
2435         unsigned int rotation = state->hw.rotation;
2436         int pitch = state->color_plane[color_plane].stride;
2437         u32 alignment;
2438
2439         if (intel_plane->id == PLANE_CURSOR)
2440                 alignment = intel_cursor_alignment(dev_priv);
2441         else
2442                 alignment = intel_surf_alignment(fb, color_plane);
2443
2444         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2445                                             pitch, rotation, alignment);
2446 }
2447
2448 /* Convert the fb->offset[] into x/y offsets */
2449 static int intel_fb_offset_to_xy(int *x, int *y,
2450                                  const struct drm_framebuffer *fb,
2451                                  int color_plane)
2452 {
2453         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2454         unsigned int height;
2455
2456         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2457             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2458                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2459                               fb->offsets[color_plane], color_plane);
2460                 return -EINVAL;
2461         }
2462
2463         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2464         height = ALIGN(height, intel_tile_height(fb, color_plane));
2465
2466         /* Catch potential overflows early */
2467         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2468                             fb->offsets[color_plane])) {
2469                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2470                               fb->offsets[color_plane], fb->pitches[color_plane],
2471                               color_plane);
2472                 return -ERANGE;
2473         }
2474
2475         *x = 0;
2476         *y = 0;
2477
2478         intel_adjust_aligned_offset(x, y,
2479                                     fb, color_plane, DRM_MODE_ROTATE_0,
2480                                     fb->pitches[color_plane],
2481                                     fb->offsets[color_plane], 0);
2482
2483         return 0;
2484 }
2485
2486 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2487 {
2488         switch (fb_modifier) {
2489         case I915_FORMAT_MOD_X_TILED:
2490                 return I915_TILING_X;
2491         case I915_FORMAT_MOD_Y_TILED:
2492         case I915_FORMAT_MOD_Y_TILED_CCS:
2493                 return I915_TILING_Y;
2494         default:
2495                 return I915_TILING_NONE;
2496         }
2497 }
2498
2499 /*
2500  * From the Sky Lake PRM:
2501  * "The Color Control Surface (CCS) contains the compression status of
2502  *  the cache-line pairs. The compression state of the cache-line pair
2503  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2504  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2505  *  cache-line-pairs. CCS is always Y tiled."
2506  *
2507  * Since cache line pairs refers to horizontally adjacent cache lines,
2508  * each cache line in the CCS corresponds to an area of 32x16 cache
2509  * lines on the main surface. Since each pixel is 4 bytes, this gives
2510  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2511  * main surface.
2512  */
2513 static const struct drm_format_info ccs_formats[] = {
2514         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2515           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2516         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2517           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2518         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2519           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2520         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2521           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2522 };
2523
2524 static const struct drm_format_info *
2525 lookup_format_info(const struct drm_format_info formats[],
2526                    int num_formats, u32 format)
2527 {
2528         int i;
2529
2530         for (i = 0; i < num_formats; i++) {
2531                 if (formats[i].format == format)
2532                         return &formats[i];
2533         }
2534
2535         return NULL;
2536 }
2537
2538 static const struct drm_format_info *
2539 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2540 {
2541         switch (cmd->modifier[0]) {
2542         case I915_FORMAT_MOD_Y_TILED_CCS:
2543         case I915_FORMAT_MOD_Yf_TILED_CCS:
2544                 return lookup_format_info(ccs_formats,
2545                                           ARRAY_SIZE(ccs_formats),
2546                                           cmd->pixel_format);
2547         default:
2548                 return NULL;
2549         }
2550 }
2551
2552 bool is_ccs_modifier(u64 modifier)
2553 {
2554         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2555                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2556 }
2557
2558 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2559                               u32 pixel_format, u64 modifier)
2560 {
2561         struct intel_crtc *crtc;
2562         struct intel_plane *plane;
2563
2564         /*
2565          * We assume the primary plane for pipe A has
2566          * the highest stride limits of them all.
2567          */
2568         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2569         if (!crtc)
2570                 return 0;
2571
2572         plane = to_intel_plane(crtc->base.primary);
2573
2574         return plane->max_stride(plane, pixel_format, modifier,
2575                                  DRM_MODE_ROTATE_0);
2576 }
2577
2578 static
2579 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2580                         u32 pixel_format, u64 modifier)
2581 {
2582         /*
2583          * Arbitrary limit for gen4+ chosen to match the
2584          * render engine max stride.
2585          *
2586          * The new CCS hash mode makes remapping impossible
2587          */
2588         if (!is_ccs_modifier(modifier)) {
2589                 if (INTEL_GEN(dev_priv) >= 7)
2590                         return 256*1024;
2591                 else if (INTEL_GEN(dev_priv) >= 4)
2592                         return 128*1024;
2593         }
2594
2595         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2596 }
2597
2598 static u32
2599 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2600 {
2601         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2602
2603         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2604                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2605                                                            fb->format->format,
2606                                                            fb->modifier);
2607
2608                 /*
2609                  * To make remapping with linear generally feasible
2610                  * we need the stride to be page aligned.
2611                  */
2612                 if (fb->pitches[color_plane] > max_stride)
2613                         return intel_tile_size(dev_priv);
2614                 else
2615                         return 64;
2616         } else {
2617                 return intel_tile_width_bytes(fb, color_plane);
2618         }
2619 }
2620
2621 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2622 {
2623         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2624         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2625         const struct drm_framebuffer *fb = plane_state->hw.fb;
2626         int i;
2627
2628         /* We don't want to deal with remapping with cursors */
2629         if (plane->id == PLANE_CURSOR)
2630                 return false;
2631
2632         /*
2633          * The display engine limits already match/exceed the
2634          * render engine limits, so not much point in remapping.
2635          * Would also need to deal with the fence POT alignment
2636          * and gen2 2KiB GTT tile size.
2637          */
2638         if (INTEL_GEN(dev_priv) < 4)
2639                 return false;
2640
2641         /*
2642          * The new CCS hash mode isn't compatible with remapping as
2643          * the virtual address of the pages affects the compressed data.
2644          */
2645         if (is_ccs_modifier(fb->modifier))
2646                 return false;
2647
2648         /* Linear needs a page aligned stride for remapping */
2649         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2650                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2651
2652                 for (i = 0; i < fb->format->num_planes; i++) {
2653                         if (fb->pitches[i] & alignment)
2654                                 return false;
2655                 }
2656         }
2657
2658         return true;
2659 }
2660
2661 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2662 {
2663         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2664         const struct drm_framebuffer *fb = plane_state->hw.fb;
2665         unsigned int rotation = plane_state->hw.rotation;
2666         u32 stride, max_stride;
2667
2668         /*
2669          * No remapping for invisible planes since we don't have
2670          * an actual source viewport to remap.
2671          */
2672         if (!plane_state->uapi.visible)
2673                 return false;
2674
2675         if (!intel_plane_can_remap(plane_state))
2676                 return false;
2677
2678         /*
2679          * FIXME: aux plane limits on gen9+ are
2680          * unclear in Bspec, for now no checking.
2681          */
2682         stride = intel_fb_pitch(fb, 0, rotation);
2683         max_stride = plane->max_stride(plane, fb->format->format,
2684                                        fb->modifier, rotation);
2685
2686         return stride > max_stride;
2687 }
2688
2689 static int
2690 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2691                    struct drm_framebuffer *fb)
2692 {
2693         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2694         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2695         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2696         u32 gtt_offset_rotated = 0;
2697         unsigned int max_size = 0;
2698         int i, num_planes = fb->format->num_planes;
2699         unsigned int tile_size = intel_tile_size(dev_priv);
2700
2701         for (i = 0; i < num_planes; i++) {
2702                 unsigned int width, height;
2703                 unsigned int cpp, size;
2704                 u32 offset;
2705                 int x, y;
2706                 int ret;
2707
2708                 cpp = fb->format->cpp[i];
2709                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2710                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2711
2712                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2713                 if (ret) {
2714                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2715                                       i, fb->offsets[i]);
2716                         return ret;
2717                 }
2718
2719                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2720                         int hsub = fb->format->hsub;
2721                         int vsub = fb->format->vsub;
2722                         int tile_width, tile_height;
2723                         int main_x, main_y;
2724                         int ccs_x, ccs_y;
2725
2726                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2727                         tile_width *= hsub;
2728                         tile_height *= vsub;
2729
2730                         ccs_x = (x * hsub) % tile_width;
2731                         ccs_y = (y * vsub) % tile_height;
2732                         main_x = intel_fb->normal[0].x % tile_width;
2733                         main_y = intel_fb->normal[0].y % tile_height;
2734
2735                         /*
2736                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2737                          * x/y offsets must match between CCS and the main surface.
2738                          */
2739                         if (main_x != ccs_x || main_y != ccs_y) {
2740                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2741                                               main_x, main_y,
2742                                               ccs_x, ccs_y,
2743                                               intel_fb->normal[0].x,
2744                                               intel_fb->normal[0].y,
2745                                               x, y);
2746                                 return -EINVAL;
2747                         }
2748                 }
2749
2750                 /*
2751                  * The fence (if used) is aligned to the start of the object
2752                  * so having the framebuffer wrap around across the edge of the
2753                  * fenced region doesn't really work. We have no API to configure
2754                  * the fence start offset within the object (nor could we probably
2755                  * on gen2/3). So it's just easier if we just require that the
2756                  * fb layout agrees with the fence layout. We already check that the
2757                  * fb stride matches the fence stride elsewhere.
2758                  */
2759                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2760                     (x + width) * cpp > fb->pitches[i]) {
2761                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2762                                       i, fb->offsets[i]);
2763                         return -EINVAL;
2764                 }
2765
2766                 /*
2767                  * First pixel of the framebuffer from
2768                  * the start of the normal gtt mapping.
2769                  */
2770                 intel_fb->normal[i].x = x;
2771                 intel_fb->normal[i].y = y;
2772
2773                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2774                                                       fb->pitches[i],
2775                                                       DRM_MODE_ROTATE_0,
2776                                                       tile_size);
2777                 offset /= tile_size;
2778
2779                 if (!is_surface_linear(fb->modifier, i)) {
2780                         unsigned int tile_width, tile_height;
2781                         unsigned int pitch_tiles;
2782                         struct drm_rect r;
2783
2784                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2785
2786                         rot_info->plane[i].offset = offset;
2787                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2788                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2789                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2790
2791                         intel_fb->rotated[i].pitch =
2792                                 rot_info->plane[i].height * tile_height;
2793
2794                         /* how many tiles does this plane need */
2795                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2796                         /*
2797                          * If the plane isn't horizontally tile aligned,
2798                          * we need one more tile.
2799                          */
2800                         if (x != 0)
2801                                 size++;
2802
2803                         /* rotate the x/y offsets to match the GTT view */
2804                         drm_rect_init(&r, x, y, width, height);
2805                         drm_rect_rotate(&r,
2806                                         rot_info->plane[i].width * tile_width,
2807                                         rot_info->plane[i].height * tile_height,
2808                                         DRM_MODE_ROTATE_270);
2809                         x = r.x1;
2810                         y = r.y1;
2811
2812                         /* rotate the tile dimensions to match the GTT view */
2813                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2814                         swap(tile_width, tile_height);
2815
2816                         /*
2817                          * We only keep the x/y offsets, so push all of the
2818                          * gtt offset into the x/y offsets.
2819                          */
2820                         intel_adjust_tile_offset(&x, &y,
2821                                                  tile_width, tile_height,
2822                                                  tile_size, pitch_tiles,
2823                                                  gtt_offset_rotated * tile_size, 0);
2824
2825                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2826
2827                         /*
2828                          * First pixel of the framebuffer from
2829                          * the start of the rotated gtt mapping.
2830                          */
2831                         intel_fb->rotated[i].x = x;
2832                         intel_fb->rotated[i].y = y;
2833                 } else {
2834                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2835                                             x * cpp, tile_size);
2836                 }
2837
2838                 /* how many tiles in total needed in the bo */
2839                 max_size = max(max_size, offset + size);
2840         }
2841
2842         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2843                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2844                               mul_u32_u32(max_size, tile_size), obj->base.size);
2845                 return -EINVAL;
2846         }
2847
2848         return 0;
2849 }
2850
2851 static void
2852 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2853 {
2854         struct drm_i915_private *dev_priv =
2855                 to_i915(plane_state->uapi.plane->dev);
2856         struct drm_framebuffer *fb = plane_state->hw.fb;
2857         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2858         struct intel_rotation_info *info = &plane_state->view.rotated;
2859         unsigned int rotation = plane_state->hw.rotation;
2860         int i, num_planes = fb->format->num_planes;
2861         unsigned int tile_size = intel_tile_size(dev_priv);
2862         unsigned int src_x, src_y;
2863         unsigned int src_w, src_h;
2864         u32 gtt_offset = 0;
2865
2866         memset(&plane_state->view, 0, sizeof(plane_state->view));
2867         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2868                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2869
2870         src_x = plane_state->uapi.src.x1 >> 16;
2871         src_y = plane_state->uapi.src.y1 >> 16;
2872         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2873         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2874
2875         WARN_ON(is_ccs_modifier(fb->modifier));
2876
2877         /* Make src coordinates relative to the viewport */
2878         drm_rect_translate(&plane_state->uapi.src,
2879                            -(src_x << 16), -(src_y << 16));
2880
2881         /* Rotate src coordinates to match rotated GTT view */
2882         if (drm_rotation_90_or_270(rotation))
2883                 drm_rect_rotate(&plane_state->uapi.src,
2884                                 src_w << 16, src_h << 16,
2885                                 DRM_MODE_ROTATE_270);
2886
2887         for (i = 0; i < num_planes; i++) {
2888                 unsigned int hsub = i ? fb->format->hsub : 1;
2889                 unsigned int vsub = i ? fb->format->vsub : 1;
2890                 unsigned int cpp = fb->format->cpp[i];
2891                 unsigned int tile_width, tile_height;
2892                 unsigned int width, height;
2893                 unsigned int pitch_tiles;
2894                 unsigned int x, y;
2895                 u32 offset;
2896
2897                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2898
2899                 x = src_x / hsub;
2900                 y = src_y / vsub;
2901                 width = src_w / hsub;
2902                 height = src_h / vsub;
2903
2904                 /*
2905                  * First pixel of the src viewport from the
2906                  * start of the normal gtt mapping.
2907                  */
2908                 x += intel_fb->normal[i].x;
2909                 y += intel_fb->normal[i].y;
2910
2911                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2912                                                       fb, i, fb->pitches[i],
2913                                                       DRM_MODE_ROTATE_0, tile_size);
2914                 offset /= tile_size;
2915
2916                 info->plane[i].offset = offset;
2917                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2918                                                      tile_width * cpp);
2919                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2920                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2921
2922                 if (drm_rotation_90_or_270(rotation)) {
2923                         struct drm_rect r;
2924
2925                         /* rotate the x/y offsets to match the GTT view */
2926                         drm_rect_init(&r, x, y, width, height);
2927                         drm_rect_rotate(&r,
2928                                         info->plane[i].width * tile_width,
2929                                         info->plane[i].height * tile_height,
2930                                         DRM_MODE_ROTATE_270);
2931                         x = r.x1;
2932                         y = r.y1;
2933
2934                         pitch_tiles = info->plane[i].height;
2935                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2936
2937                         /* rotate the tile dimensions to match the GTT view */
2938                         swap(tile_width, tile_height);
2939                 } else {
2940                         pitch_tiles = info->plane[i].width;
2941                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2942                 }
2943
2944                 /*
2945                  * We only keep the x/y offsets, so push all of the
2946                  * gtt offset into the x/y offsets.
2947                  */
2948                 intel_adjust_tile_offset(&x, &y,
2949                                          tile_width, tile_height,
2950                                          tile_size, pitch_tiles,
2951                                          gtt_offset * tile_size, 0);
2952
2953                 gtt_offset += info->plane[i].width * info->plane[i].height;
2954
2955                 plane_state->color_plane[i].offset = 0;
2956                 plane_state->color_plane[i].x = x;
2957                 plane_state->color_plane[i].y = y;
2958         }
2959 }
2960
2961 static int
2962 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2963 {
2964         const struct intel_framebuffer *fb =
2965                 to_intel_framebuffer(plane_state->hw.fb);
2966         unsigned int rotation = plane_state->hw.rotation;
2967         int i, num_planes;
2968
2969         if (!fb)
2970                 return 0;
2971
2972         num_planes = fb->base.format->num_planes;
2973
2974         if (intel_plane_needs_remap(plane_state)) {
2975                 intel_plane_remap_gtt(plane_state);
2976
2977                 /*
2978                  * Sometimes even remapping can't overcome
2979                  * the stride limitations :( Can happen with
2980                  * big plane sizes and suitably misaligned
2981                  * offsets.
2982                  */
2983                 return intel_plane_check_stride(plane_state);
2984         }
2985
2986         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2987
2988         for (i = 0; i < num_planes; i++) {
2989                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2990                 plane_state->color_plane[i].offset = 0;
2991
2992                 if (drm_rotation_90_or_270(rotation)) {
2993                         plane_state->color_plane[i].x = fb->rotated[i].x;
2994                         plane_state->color_plane[i].y = fb->rotated[i].y;
2995                 } else {
2996                         plane_state->color_plane[i].x = fb->normal[i].x;
2997                         plane_state->color_plane[i].y = fb->normal[i].y;
2998                 }
2999         }
3000
3001         /* Rotate src coordinates to match rotated GTT view */
3002         if (drm_rotation_90_or_270(rotation))
3003                 drm_rect_rotate(&plane_state->uapi.src,
3004                                 fb->base.width << 16, fb->base.height << 16,
3005                                 DRM_MODE_ROTATE_270);
3006
3007         return intel_plane_check_stride(plane_state);
3008 }
3009
3010 static int i9xx_format_to_fourcc(int format)
3011 {
3012         switch (format) {
3013         case DISPPLANE_8BPP:
3014                 return DRM_FORMAT_C8;
3015         case DISPPLANE_BGRA555:
3016                 return DRM_FORMAT_ARGB1555;
3017         case DISPPLANE_BGRX555:
3018                 return DRM_FORMAT_XRGB1555;
3019         case DISPPLANE_BGRX565:
3020                 return DRM_FORMAT_RGB565;
3021         default:
3022         case DISPPLANE_BGRX888:
3023                 return DRM_FORMAT_XRGB8888;
3024         case DISPPLANE_RGBX888:
3025                 return DRM_FORMAT_XBGR8888;
3026         case DISPPLANE_BGRA888:
3027                 return DRM_FORMAT_ARGB8888;
3028         case DISPPLANE_RGBA888:
3029                 return DRM_FORMAT_ABGR8888;
3030         case DISPPLANE_BGRX101010:
3031                 return DRM_FORMAT_XRGB2101010;
3032         case DISPPLANE_RGBX101010:
3033                 return DRM_FORMAT_XBGR2101010;
3034         case DISPPLANE_BGRA101010:
3035                 return DRM_FORMAT_ARGB2101010;
3036         case DISPPLANE_RGBA101010:
3037                 return DRM_FORMAT_ABGR2101010;
3038         case DISPPLANE_RGBX161616:
3039                 return DRM_FORMAT_XBGR16161616F;
3040         }
3041 }
3042
3043 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3044 {
3045         switch (format) {
3046         case PLANE_CTL_FORMAT_RGB_565:
3047                 return DRM_FORMAT_RGB565;
3048         case PLANE_CTL_FORMAT_NV12:
3049                 return DRM_FORMAT_NV12;
3050         case PLANE_CTL_FORMAT_P010:
3051                 return DRM_FORMAT_P010;
3052         case PLANE_CTL_FORMAT_P012:
3053                 return DRM_FORMAT_P012;
3054         case PLANE_CTL_FORMAT_P016:
3055                 return DRM_FORMAT_P016;
3056         case PLANE_CTL_FORMAT_Y210:
3057                 return DRM_FORMAT_Y210;
3058         case PLANE_CTL_FORMAT_Y212:
3059                 return DRM_FORMAT_Y212;
3060         case PLANE_CTL_FORMAT_Y216:
3061                 return DRM_FORMAT_Y216;
3062         case PLANE_CTL_FORMAT_Y410:
3063                 return DRM_FORMAT_XVYU2101010;
3064         case PLANE_CTL_FORMAT_Y412:
3065                 return DRM_FORMAT_XVYU12_16161616;
3066         case PLANE_CTL_FORMAT_Y416:
3067                 return DRM_FORMAT_XVYU16161616;
3068         default:
3069         case PLANE_CTL_FORMAT_XRGB_8888:
3070                 if (rgb_order) {
3071                         if (alpha)
3072                                 return DRM_FORMAT_ABGR8888;
3073                         else
3074                                 return DRM_FORMAT_XBGR8888;
3075                 } else {
3076                         if (alpha)
3077                                 return DRM_FORMAT_ARGB8888;
3078                         else
3079                                 return DRM_FORMAT_XRGB8888;
3080                 }
3081         case PLANE_CTL_FORMAT_XRGB_2101010:
3082                 if (rgb_order) {
3083                         if (alpha)
3084                                 return DRM_FORMAT_ABGR2101010;
3085                         else
3086                                 return DRM_FORMAT_XBGR2101010;
3087                 } else {
3088                         if (alpha)
3089                                 return DRM_FORMAT_ARGB2101010;
3090                         else
3091                                 return DRM_FORMAT_XRGB2101010;
3092                 }
3093         case PLANE_CTL_FORMAT_XRGB_16161616F:
3094                 if (rgb_order) {
3095                         if (alpha)
3096                                 return DRM_FORMAT_ABGR16161616F;
3097                         else
3098                                 return DRM_FORMAT_XBGR16161616F;
3099                 } else {
3100                         if (alpha)
3101                                 return DRM_FORMAT_ARGB16161616F;
3102                         else
3103                                 return DRM_FORMAT_XRGB16161616F;
3104                 }
3105         }
3106 }
3107
3108 static bool
3109 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3110                               struct intel_initial_plane_config *plane_config)
3111 {
3112         struct drm_device *dev = crtc->base.dev;
3113         struct drm_i915_private *dev_priv = to_i915(dev);
3114         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3115         struct drm_framebuffer *fb = &plane_config->fb->base;
3116         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3117         u32 size_aligned = round_up(plane_config->base + plane_config->size,
3118                                     PAGE_SIZE);
3119         struct drm_i915_gem_object *obj;
3120         bool ret = false;
3121
3122         size_aligned -= base_aligned;
3123
3124         if (plane_config->size == 0)
3125                 return false;
3126
3127         /* If the FB is too big, just don't use it since fbdev is not very
3128          * important and we should probably use that space with FBC or other
3129          * features. */
3130         if (size_aligned * 2 > dev_priv->stolen_usable_size)
3131                 return false;
3132
3133         switch (fb->modifier) {
3134         case DRM_FORMAT_MOD_LINEAR:
3135         case I915_FORMAT_MOD_X_TILED:
3136         case I915_FORMAT_MOD_Y_TILED:
3137                 break;
3138         default:
3139                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3140                                  fb->modifier);
3141                 return false;
3142         }
3143
3144         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3145                                                              base_aligned,
3146                                                              base_aligned,
3147                                                              size_aligned);
3148         if (IS_ERR(obj))
3149                 return false;
3150
3151         switch (plane_config->tiling) {
3152         case I915_TILING_NONE:
3153                 break;
3154         case I915_TILING_X:
3155         case I915_TILING_Y:
3156                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3157                 break;
3158         default:
3159                 MISSING_CASE(plane_config->tiling);
3160                 goto out;
3161         }
3162
3163         mode_cmd.pixel_format = fb->format->format;
3164         mode_cmd.width = fb->width;
3165         mode_cmd.height = fb->height;
3166         mode_cmd.pitches[0] = fb->pitches[0];
3167         mode_cmd.modifier[0] = fb->modifier;
3168         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3169
3170         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3171                 DRM_DEBUG_KMS("intel fb init failed\n");
3172                 goto out;
3173         }
3174
3175
3176         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3177         ret = true;
3178 out:
3179         i915_gem_object_put(obj);
3180         return ret;
3181 }
3182
3183 static void
3184 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3185                         struct intel_plane_state *plane_state,
3186                         bool visible)
3187 {
3188         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3189
3190         plane_state->uapi.visible = visible;
3191
3192         if (visible)
3193                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3194         else
3195                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3196 }
3197
3198 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3199 {
3200         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3201         struct drm_plane *plane;
3202
3203         /*
3204          * Active_planes aliases if multiple "primary" or cursor planes
3205          * have been used on the same (or wrong) pipe. plane_mask uses
3206          * unique ids, hence we can use that to reconstruct active_planes.
3207          */
3208         crtc_state->active_planes = 0;
3209
3210         drm_for_each_plane_mask(plane, &dev_priv->drm,
3211                                 crtc_state->uapi.plane_mask)
3212                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3213 }
3214
3215 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3216                                          struct intel_plane *plane)
3217 {
3218         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3219         struct intel_crtc_state *crtc_state =
3220                 to_intel_crtc_state(crtc->base.state);
3221         struct intel_plane_state *plane_state =
3222                 to_intel_plane_state(plane->base.state);
3223
3224         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3225                       plane->base.base.id, plane->base.name,
3226                       crtc->base.base.id, crtc->base.name);
3227
3228         intel_set_plane_visible(crtc_state, plane_state, false);
3229         fixup_active_planes(crtc_state);
3230         crtc_state->data_rate[plane->id] = 0;
3231         crtc_state->min_cdclk[plane->id] = 0;
3232
3233         if (plane->id == PLANE_PRIMARY)
3234                 hsw_disable_ips(crtc_state);
3235
3236         /*
3237          * Vblank time updates from the shadow to live plane control register
3238          * are blocked if the memory self-refresh mode is active at that
3239          * moment. So to make sure the plane gets truly disabled, disable
3240          * first the self-refresh mode. The self-refresh enable bit in turn
3241          * will be checked/applied by the HW only at the next frame start
3242          * event which is after the vblank start event, so we need to have a
3243          * wait-for-vblank between disabling the plane and the pipe.
3244          */
3245         if (HAS_GMCH(dev_priv) &&
3246             intel_set_memory_cxsr(dev_priv, false))
3247                 intel_wait_for_vblank(dev_priv, crtc->pipe);
3248
3249         /*
3250          * Gen2 reports pipe underruns whenever all planes are disabled.
3251          * So disable underrun reporting before all the planes get disabled.
3252          */
3253         if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3254                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3255
3256         intel_disable_plane(plane, crtc_state);
3257 }
3258
3259 static struct intel_frontbuffer *
3260 to_intel_frontbuffer(struct drm_framebuffer *fb)
3261 {
3262         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3263 }
3264
3265 static void
3266 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3267                              struct intel_initial_plane_config *plane_config)
3268 {
3269         struct drm_device *dev = intel_crtc->base.dev;
3270         struct drm_i915_private *dev_priv = to_i915(dev);
3271         struct drm_crtc *c;
3272         struct drm_plane *primary = intel_crtc->base.primary;
3273         struct drm_plane_state *plane_state = primary->state;
3274         struct intel_plane *intel_plane = to_intel_plane(primary);
3275         struct intel_plane_state *intel_state =
3276                 to_intel_plane_state(plane_state);
3277         struct drm_framebuffer *fb;
3278
3279         if (!plane_config->fb)
3280                 return;
3281
3282         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3283                 fb = &plane_config->fb->base;
3284                 goto valid_fb;
3285         }
3286
3287         kfree(plane_config->fb);
3288
3289         /*
3290          * Failed to alloc the obj, check to see if we should share
3291          * an fb with another CRTC instead
3292          */
3293         for_each_crtc(dev, c) {
3294                 struct intel_plane_state *state;
3295
3296                 if (c == &intel_crtc->base)
3297                         continue;
3298
3299                 if (!to_intel_crtc(c)->active)
3300                         continue;
3301
3302                 state = to_intel_plane_state(c->primary->state);
3303                 if (!state->vma)
3304                         continue;
3305
3306                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3307                         fb = state->hw.fb;
3308                         drm_framebuffer_get(fb);
3309                         goto valid_fb;
3310                 }
3311         }
3312
3313         /*
3314          * We've failed to reconstruct the BIOS FB.  Current display state
3315          * indicates that the primary plane is visible, but has a NULL FB,
3316          * which will lead to problems later if we don't fix it up.  The
3317          * simplest solution is to just disable the primary plane now and
3318          * pretend the BIOS never had it enabled.
3319          */
3320         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3321
3322         return;
3323
3324 valid_fb:
3325         intel_state->hw.rotation = plane_config->rotation;
3326         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3327                                 intel_state->hw.rotation);
3328         intel_state->color_plane[0].stride =
3329                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3330
3331         intel_state->vma =
3332                 intel_pin_and_fence_fb_obj(fb,
3333                                            &intel_state->view,
3334                                            intel_plane_uses_fence(intel_state),
3335                                            &intel_state->flags);
3336         if (IS_ERR(intel_state->vma)) {
3337                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3338                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
3339
3340                 intel_state->vma = NULL;
3341                 drm_framebuffer_put(fb);
3342                 return;
3343         }
3344
3345         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3346
3347         plane_state->src_x = 0;
3348         plane_state->src_y = 0;
3349         plane_state->src_w = fb->width << 16;
3350         plane_state->src_h = fb->height << 16;
3351
3352         plane_state->crtc_x = 0;
3353         plane_state->crtc_y = 0;
3354         plane_state->crtc_w = fb->width;
3355         plane_state->crtc_h = fb->height;
3356
3357         intel_state->uapi.src = drm_plane_state_src(plane_state);
3358         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3359
3360         if (plane_config->tiling)
3361                 dev_priv->preserve_bios_swizzle = true;
3362
3363         plane_state->fb = fb;
3364         plane_state->crtc = &intel_crtc->base;
3365         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3366
3367         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3368                   &to_intel_frontbuffer(fb)->bits);
3369 }
3370
3371 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3372                                int color_plane,
3373                                unsigned int rotation)
3374 {
3375         int cpp = fb->format->cpp[color_plane];
3376
3377         switch (fb->modifier) {
3378         case DRM_FORMAT_MOD_LINEAR:
3379         case I915_FORMAT_MOD_X_TILED:
3380                 /*
3381                  * Validated limit is 4k, but has 5k should
3382                  * work apart from the following features:
3383                  * - Ytile (already limited to 4k)
3384                  * - FP16 (already limited to 4k)
3385                  * - render compression (already limited to 4k)
3386                  * - KVMR sprite and cursor (don't care)
3387                  * - horizontal panning (TODO verify this)
3388                  * - pipe and plane scaling (TODO verify this)
3389                  */
3390                 if (cpp == 8)
3391                         return 4096;
3392                 else
3393                         return 5120;
3394         case I915_FORMAT_MOD_Y_TILED_CCS:
3395         case I915_FORMAT_MOD_Yf_TILED_CCS:
3396                 /* FIXME AUX plane? */
3397         case I915_FORMAT_MOD_Y_TILED:
3398         case I915_FORMAT_MOD_Yf_TILED:
3399                 if (cpp == 8)
3400                         return 2048;
3401                 else
3402                         return 4096;
3403         default:
3404                 MISSING_CASE(fb->modifier);
3405                 return 2048;
3406         }
3407 }
3408
3409 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3410                                int color_plane,
3411                                unsigned int rotation)
3412 {
3413         int cpp = fb->format->cpp[color_plane];
3414
3415         switch (fb->modifier) {
3416         case DRM_FORMAT_MOD_LINEAR:
3417         case I915_FORMAT_MOD_X_TILED:
3418                 if (cpp == 8)
3419                         return 4096;
3420                 else
3421                         return 5120;
3422         case I915_FORMAT_MOD_Y_TILED_CCS:
3423         case I915_FORMAT_MOD_Yf_TILED_CCS:
3424                 /* FIXME AUX plane? */
3425         case I915_FORMAT_MOD_Y_TILED:
3426         case I915_FORMAT_MOD_Yf_TILED:
3427                 if (cpp == 8)
3428                         return 2048;
3429                 else
3430                         return 5120;
3431         default:
3432                 MISSING_CASE(fb->modifier);
3433                 return 2048;
3434         }
3435 }
3436
3437 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3438                                int color_plane,
3439                                unsigned int rotation)
3440 {
3441         return 5120;
3442 }
3443
3444 static int skl_max_plane_height(void)
3445 {
3446         return 4096;
3447 }
3448
3449 static int icl_max_plane_height(void)
3450 {
3451         return 4320;
3452 }
3453
3454 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3455                                            int main_x, int main_y, u32 main_offset)
3456 {
3457         const struct drm_framebuffer *fb = plane_state->hw.fb;
3458         int hsub = fb->format->hsub;
3459         int vsub = fb->format->vsub;
3460         int aux_x = plane_state->color_plane[1].x;
3461         int aux_y = plane_state->color_plane[1].y;
3462         u32 aux_offset = plane_state->color_plane[1].offset;
3463         u32 alignment = intel_surf_alignment(fb, 1);
3464
3465         while (aux_offset >= main_offset && aux_y <= main_y) {
3466                 int x, y;
3467
3468                 if (aux_x == main_x && aux_y == main_y)
3469                         break;
3470
3471                 if (aux_offset == 0)
3472                         break;
3473
3474                 x = aux_x / hsub;
3475                 y = aux_y / vsub;
3476                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3477                                                                aux_offset, aux_offset - alignment);
3478                 aux_x = x * hsub + aux_x % hsub;
3479                 aux_y = y * vsub + aux_y % vsub;
3480         }
3481
3482         if (aux_x != main_x || aux_y != main_y)
3483                 return false;
3484
3485         plane_state->color_plane[1].offset = aux_offset;
3486         plane_state->color_plane[1].x = aux_x;
3487         plane_state->color_plane[1].y = aux_y;
3488
3489         return true;
3490 }
3491
3492 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3493 {
3494         struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3495         const struct drm_framebuffer *fb = plane_state->hw.fb;
3496         unsigned int rotation = plane_state->hw.rotation;
3497         int x = plane_state->uapi.src.x1 >> 16;
3498         int y = plane_state->uapi.src.y1 >> 16;
3499         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3500         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3501         int max_width;
3502         int max_height;
3503         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3504
3505         if (INTEL_GEN(dev_priv) >= 11)
3506                 max_width = icl_max_plane_width(fb, 0, rotation);
3507         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3508                 max_width = glk_max_plane_width(fb, 0, rotation);
3509         else
3510                 max_width = skl_max_plane_width(fb, 0, rotation);
3511
3512         if (INTEL_GEN(dev_priv) >= 11)
3513                 max_height = icl_max_plane_height();
3514         else
3515                 max_height = skl_max_plane_height();
3516
3517         if (w > max_width || h > max_height) {
3518                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3519                               w, h, max_width, max_height);
3520                 return -EINVAL;
3521         }
3522
3523         intel_add_fb_offsets(&x, &y, plane_state, 0);
3524         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3525         alignment = intel_surf_alignment(fb, 0);
3526
3527         /*
3528          * AUX surface offset is specified as the distance from the
3529          * main surface offset, and it must be non-negative. Make
3530          * sure that is what we will get.
3531          */
3532         if (offset > aux_offset)
3533                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3534                                                            offset, aux_offset & ~(alignment - 1));
3535
3536         /*
3537          * When using an X-tiled surface, the plane blows up
3538          * if the x offset + width exceed the stride.
3539          *
3540          * TODO: linear and Y-tiled seem fine, Yf untested,
3541          */
3542         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3543                 int cpp = fb->format->cpp[0];
3544
3545                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3546                         if (offset == 0) {
3547                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3548                                 return -EINVAL;
3549                         }
3550
3551                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3552                                                                    offset, offset - alignment);
3553                 }
3554         }
3555
3556         /*
3557          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3558          * they match with the main surface x/y offsets.
3559          */
3560         if (is_ccs_modifier(fb->modifier)) {
3561                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3562                         if (offset == 0)
3563                                 break;
3564
3565                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3566                                                                    offset, offset - alignment);
3567                 }
3568
3569                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3570                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3571                         return -EINVAL;
3572                 }
3573         }
3574
3575         plane_state->color_plane[0].offset = offset;
3576         plane_state->color_plane[0].x = x;
3577         plane_state->color_plane[0].y = y;
3578
3579         /*
3580          * Put the final coordinates back so that the src
3581          * coordinate checks will see the right values.
3582          */
3583         drm_rect_translate_to(&plane_state->uapi.src,
3584                               x << 16, y << 16);
3585
3586         return 0;
3587 }
3588
3589 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3590 {
3591         const struct drm_framebuffer *fb = plane_state->hw.fb;
3592         unsigned int rotation = plane_state->hw.rotation;
3593         int max_width = skl_max_plane_width(fb, 1, rotation);
3594         int max_height = 4096;
3595         int x = plane_state->uapi.src.x1 >> 17;
3596         int y = plane_state->uapi.src.y1 >> 17;
3597         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3598         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3599         u32 offset;
3600
3601         intel_add_fb_offsets(&x, &y, plane_state, 1);
3602         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3603
3604         /* FIXME not quite sure how/if these apply to the chroma plane */
3605         if (w > max_width || h > max_height) {
3606                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3607                               w, h, max_width, max_height);
3608                 return -EINVAL;
3609         }
3610
3611         plane_state->color_plane[1].offset = offset;
3612         plane_state->color_plane[1].x = x;
3613         plane_state->color_plane[1].y = y;
3614
3615         return 0;
3616 }
3617
3618 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3619 {
3620         const struct drm_framebuffer *fb = plane_state->hw.fb;
3621         int src_x = plane_state->uapi.src.x1 >> 16;
3622         int src_y = plane_state->uapi.src.y1 >> 16;
3623         int hsub = fb->format->hsub;
3624         int vsub = fb->format->vsub;
3625         int x = src_x / hsub;
3626         int y = src_y / vsub;
3627         u32 offset;
3628
3629         intel_add_fb_offsets(&x, &y, plane_state, 1);
3630         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3631
3632         plane_state->color_plane[1].offset = offset;
3633         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3634         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3635
3636         return 0;
3637 }
3638
3639 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3640 {
3641         const struct drm_framebuffer *fb = plane_state->hw.fb;
3642         int ret;
3643
3644         ret = intel_plane_compute_gtt(plane_state);
3645         if (ret)
3646                 return ret;
3647
3648         if (!plane_state->uapi.visible)
3649                 return 0;
3650
3651         /*
3652          * Handle the AUX surface first since
3653          * the main surface setup depends on it.
3654          */
3655         if (drm_format_info_is_yuv_semiplanar(fb->format)) {
3656                 ret = skl_check_nv12_aux_surface(plane_state);
3657                 if (ret)
3658                         return ret;
3659         } else if (is_ccs_modifier(fb->modifier)) {
3660                 ret = skl_check_ccs_aux_surface(plane_state);
3661                 if (ret)
3662                         return ret;
3663         } else {
3664                 plane_state->color_plane[1].offset = ~0xfff;
3665                 plane_state->color_plane[1].x = 0;
3666                 plane_state->color_plane[1].y = 0;
3667         }
3668
3669         ret = skl_check_main_surface(plane_state);
3670         if (ret)
3671                 return ret;
3672
3673         return 0;
3674 }
3675
3676 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
3677                              const struct intel_plane_state *plane_state,
3678                              unsigned int *num, unsigned int *den)
3679 {
3680         const struct drm_framebuffer *fb = plane_state->hw.fb;
3681         unsigned int cpp = fb->format->cpp[0];
3682
3683         /*
3684          * g4x bspec says 64bpp pixel rate can't exceed 80%
3685          * of cdclk when the sprite plane is enabled on the
3686          * same pipe. ilk/snb bspec says 64bpp pixel rate is
3687          * never allowed to exceed 80% of cdclk. Let's just go
3688          * with the ilk/snb limit always.
3689          */
3690         if (cpp == 8) {
3691                 *num = 10;
3692                 *den = 8;
3693         } else {
3694                 *num = 1;
3695                 *den = 1;
3696         }
3697 }
3698
3699 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
3700                                 const struct intel_plane_state *plane_state)
3701 {
3702         unsigned int pixel_rate;
3703         unsigned int num, den;
3704
3705         /*
3706          * Note that crtc_state->pixel_rate accounts for both
3707          * horizontal and vertical panel fitter downscaling factors.
3708          * Pre-HSW bspec tells us to only consider the horizontal
3709          * downscaling factor here. We ignore that and just consider
3710          * both for simplicity.
3711          */
3712         pixel_rate = crtc_state->pixel_rate;
3713
3714         i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
3715
3716         /* two pixels per clock with double wide pipe */
3717         if (crtc_state->double_wide)
3718                 den *= 2;
3719
3720         return DIV_ROUND_UP(pixel_rate * num, den);
3721 }
3722
3723 unsigned int
3724 i9xx_plane_max_stride(struct intel_plane *plane,
3725                       u32 pixel_format, u64 modifier,
3726                       unsigned int rotation)
3727 {
3728         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3729
3730         if (!HAS_GMCH(dev_priv)) {
3731                 return 32*1024;
3732         } else if (INTEL_GEN(dev_priv) >= 4) {
3733                 if (modifier == I915_FORMAT_MOD_X_TILED)
3734                         return 16*1024;
3735                 else
3736                         return 32*1024;
3737         } else if (INTEL_GEN(dev_priv) >= 3) {
3738                 if (modifier == I915_FORMAT_MOD_X_TILED)
3739                         return 8*1024;
3740                 else
3741                         return 16*1024;
3742         } else {
3743                 if (plane->i9xx_plane == PLANE_C)
3744                         return 4*1024;
3745                 else
3746                         return 8*1024;
3747         }
3748 }
3749
3750 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3751 {
3752         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3753         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3754         u32 dspcntr = 0;
3755
3756         if (crtc_state->gamma_enable)
3757                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3758
3759         if (crtc_state->csc_enable)
3760                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3761
3762         if (INTEL_GEN(dev_priv) < 5)
3763                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3764
3765         return dspcntr;
3766 }
3767
3768 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3769                           const struct intel_plane_state *plane_state)
3770 {
3771         struct drm_i915_private *dev_priv =
3772                 to_i915(plane_state->uapi.plane->dev);
3773         const struct drm_framebuffer *fb = plane_state->hw.fb;
3774         unsigned int rotation = plane_state->hw.rotation;
3775         u32 dspcntr;
3776
3777         dspcntr = DISPLAY_PLANE_ENABLE;
3778
3779         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3780             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3781                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3782
3783         switch (fb->format->format) {
3784         case DRM_FORMAT_C8:
3785                 dspcntr |= DISPPLANE_8BPP;
3786                 break;
3787         case DRM_FORMAT_XRGB1555:
3788                 dspcntr |= DISPPLANE_BGRX555;
3789                 break;
3790         case DRM_FORMAT_ARGB1555:
3791                 dspcntr |= DISPPLANE_BGRA555;
3792                 break;
3793         case DRM_FORMAT_RGB565:
3794                 dspcntr |= DISPPLANE_BGRX565;
3795                 break;
3796         case DRM_FORMAT_XRGB8888:
3797                 dspcntr |= DISPPLANE_BGRX888;
3798                 break;
3799         case DRM_FORMAT_XBGR8888:
3800                 dspcntr |= DISPPLANE_RGBX888;
3801                 break;
3802         case DRM_FORMAT_ARGB8888:
3803                 dspcntr |= DISPPLANE_BGRA888;
3804                 break;
3805         case DRM_FORMAT_ABGR8888:
3806                 dspcntr |= DISPPLANE_RGBA888;
3807                 break;
3808         case DRM_FORMAT_XRGB2101010:
3809                 dspcntr |= DISPPLANE_BGRX101010;
3810                 break;
3811         case DRM_FORMAT_XBGR2101010:
3812                 dspcntr |= DISPPLANE_RGBX101010;
3813                 break;
3814         case DRM_FORMAT_ARGB2101010:
3815                 dspcntr |= DISPPLANE_BGRA101010;
3816                 break;
3817         case DRM_FORMAT_ABGR2101010:
3818                 dspcntr |= DISPPLANE_RGBA101010;
3819                 break;
3820         case DRM_FORMAT_XBGR16161616F:
3821                 dspcntr |= DISPPLANE_RGBX161616;
3822                 break;
3823         default:
3824                 MISSING_CASE(fb->format->format);
3825                 return 0;
3826         }
3827
3828         if (INTEL_GEN(dev_priv) >= 4 &&
3829             fb->modifier == I915_FORMAT_MOD_X_TILED)
3830                 dspcntr |= DISPPLANE_TILED;
3831
3832         if (rotation & DRM_MODE_ROTATE_180)
3833                 dspcntr |= DISPPLANE_ROTATE_180;
3834
3835         if (rotation & DRM_MODE_REFLECT_X)
3836                 dspcntr |= DISPPLANE_MIRROR;
3837
3838         return dspcntr;
3839 }
3840
3841 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3842 {
3843         struct drm_i915_private *dev_priv =
3844                 to_i915(plane_state->uapi.plane->dev);
3845         const struct drm_framebuffer *fb = plane_state->hw.fb;
3846         int src_x, src_y, src_w;
3847         u32 offset;
3848         int ret;
3849
3850         ret = intel_plane_compute_gtt(plane_state);
3851         if (ret)
3852                 return ret;
3853
3854         if (!plane_state->uapi.visible)
3855                 return 0;
3856
3857         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3858         src_x = plane_state->uapi.src.x1 >> 16;
3859         src_y = plane_state->uapi.src.y1 >> 16;
3860
3861         /* Undocumented hardware limit on i965/g4x/vlv/chv */
3862         if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
3863                 return -EINVAL;
3864
3865         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3866
3867         if (INTEL_GEN(dev_priv) >= 4)
3868                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3869                                                             plane_state, 0);
3870         else
3871                 offset = 0;
3872
3873         /*
3874          * Put the final coordinates back so that the src
3875          * coordinate checks will see the right values.
3876          */
3877         drm_rect_translate_to(&plane_state->uapi.src,
3878                               src_x << 16, src_y << 16);
3879
3880         /* HSW/BDW do this automagically in hardware */
3881         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3882                 unsigned int rotation = plane_state->hw.rotation;
3883                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3884                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3885
3886                 if (rotation & DRM_MODE_ROTATE_180) {
3887                         src_x += src_w - 1;
3888                         src_y += src_h - 1;
3889                 } else if (rotation & DRM_MODE_REFLECT_X) {
3890                         src_x += src_w - 1;
3891                 }
3892         }
3893
3894         plane_state->color_plane[0].offset = offset;
3895         plane_state->color_plane[0].x = src_x;
3896         plane_state->color_plane[0].y = src_y;
3897
3898         return 0;
3899 }
3900
3901 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
3902 {
3903         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3904         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3905
3906         if (IS_CHERRYVIEW(dev_priv))
3907                 return i9xx_plane == PLANE_B;
3908         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3909                 return false;
3910         else if (IS_GEN(dev_priv, 4))
3911                 return i9xx_plane == PLANE_C;
3912         else
3913                 return i9xx_plane == PLANE_B ||
3914                         i9xx_plane == PLANE_C;
3915 }
3916
3917 static int
3918 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3919                  struct intel_plane_state *plane_state)
3920 {
3921         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3922         int ret;
3923
3924         ret = chv_plane_check_rotation(plane_state);
3925         if (ret)
3926                 return ret;
3927
3928         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
3929                                                   &crtc_state->uapi,
3930                                                   DRM_PLANE_HELPER_NO_SCALING,
3931                                                   DRM_PLANE_HELPER_NO_SCALING,
3932                                                   i9xx_plane_has_windowing(plane),
3933                                                   true);
3934         if (ret)
3935                 return ret;
3936
3937         ret = i9xx_check_plane_surface(plane_state);
3938         if (ret)
3939                 return ret;
3940
3941         if (!plane_state->uapi.visible)
3942                 return 0;
3943
3944         ret = intel_plane_check_src_coordinates(plane_state);
3945         if (ret)
3946                 return ret;
3947
3948         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3949
3950         return 0;
3951 }
3952
3953 static void i9xx_update_plane(struct intel_plane *plane,
3954                               const struct intel_crtc_state *crtc_state,
3955                               const struct intel_plane_state *plane_state)
3956 {
3957         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3958         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3959         u32 linear_offset;
3960         int x = plane_state->color_plane[0].x;
3961         int y = plane_state->color_plane[0].y;
3962         int crtc_x = plane_state->uapi.dst.x1;
3963         int crtc_y = plane_state->uapi.dst.y1;
3964         int crtc_w = drm_rect_width(&plane_state->uapi.dst);
3965         int crtc_h = drm_rect_height(&plane_state->uapi.dst);
3966         unsigned long irqflags;
3967         u32 dspaddr_offset;
3968         u32 dspcntr;
3969
3970         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3971
3972         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3973
3974         if (INTEL_GEN(dev_priv) >= 4)
3975                 dspaddr_offset = plane_state->color_plane[0].offset;
3976         else
3977                 dspaddr_offset = linear_offset;
3978
3979         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3980
3981         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3982
3983         if (INTEL_GEN(dev_priv) < 4) {
3984                 /*
3985                  * PLANE_A doesn't actually have a full window
3986                  * generator but let's assume we still need to
3987                  * program whatever is there.
3988                  */
3989                 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3990                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3991                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3992         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3993                 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3994                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3995                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3996                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3997         }
3998
3999         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
4000                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
4001         } else if (INTEL_GEN(dev_priv) >= 4) {
4002                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
4003                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
4004         }
4005
4006         /*
4007          * The control register self-arms if the plane was previously
4008          * disabled. Try to make the plane enable atomic by writing
4009          * the control register just before the surface register.
4010          */
4011         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
4012         if (INTEL_GEN(dev_priv) >= 4)
4013                 I915_WRITE_FW(DSPSURF(i9xx_plane),
4014                               intel_plane_ggtt_offset(plane_state) +
4015                               dspaddr_offset);
4016         else
4017                 I915_WRITE_FW(DSPADDR(i9xx_plane),
4018                               intel_plane_ggtt_offset(plane_state) +
4019                               dspaddr_offset);
4020
4021         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4022 }
4023
4024 static void i9xx_disable_plane(struct intel_plane *plane,
4025                                const struct intel_crtc_state *crtc_state)
4026 {
4027         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4028         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4029         unsigned long irqflags;
4030         u32 dspcntr;
4031
4032         /*
4033          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4034          * enable on ilk+ affect the pipe bottom color as
4035          * well, so we must configure them even if the plane
4036          * is disabled.
4037          *
4038          * On pre-g4x there is no way to gamma correct the
4039          * pipe bottom color but we'll keep on doing this
4040          * anyway so that the crtc state readout works correctly.
4041          */
4042         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4043
4044         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4045
4046         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
4047         if (INTEL_GEN(dev_priv) >= 4)
4048                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
4049         else
4050                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
4051
4052         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4053 }
4054
4055 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4056                                     enum pipe *pipe)
4057 {
4058         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4059         enum intel_display_power_domain power_domain;
4060         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4061         intel_wakeref_t wakeref;
4062         bool ret;
4063         u32 val;
4064
4065         /*
4066          * Not 100% correct for planes that can move between pipes,
4067          * but that's only the case for gen2-4 which don't have any
4068          * display power wells.
4069          */
4070         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4071         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4072         if (!wakeref)
4073                 return false;
4074
4075         val = I915_READ(DSPCNTR(i9xx_plane));
4076
4077         ret = val & DISPLAY_PLANE_ENABLE;
4078
4079         if (INTEL_GEN(dev_priv) >= 5)
4080                 *pipe = plane->pipe;
4081         else
4082                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4083                         DISPPLANE_SEL_PIPE_SHIFT;
4084
4085         intel_display_power_put(dev_priv, power_domain, wakeref);
4086
4087         return ret;
4088 }
4089
4090 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4091 {
4092         struct drm_device *dev = intel_crtc->base.dev;
4093         struct drm_i915_private *dev_priv = to_i915(dev);
4094
4095         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4096         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4097         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4098 }
4099
4100 /*
4101  * This function detaches (aka. unbinds) unused scalers in hardware
4102  */
4103 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4104 {
4105         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4106         const struct intel_crtc_scaler_state *scaler_state =
4107                 &crtc_state->scaler_state;
4108         int i;
4109
4110         /* loop through and disable scalers that aren't in use */
4111         for (i = 0; i < intel_crtc->num_scalers; i++) {
4112                 if (!scaler_state->scalers[i].in_use)
4113                         skl_detach_scaler(intel_crtc, i);
4114         }
4115 }
4116
4117 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4118                                           int color_plane, unsigned int rotation)
4119 {
4120         /*
4121          * The stride is either expressed as a multiple of 64 bytes chunks for
4122          * linear buffers or in number of tiles for tiled buffers.
4123          */
4124         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
4125                 return 64;
4126         else if (drm_rotation_90_or_270(rotation))
4127                 return intel_tile_height(fb, color_plane);
4128         else
4129                 return intel_tile_width_bytes(fb, color_plane);
4130 }
4131
4132 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4133                      int color_plane)
4134 {
4135         const struct drm_framebuffer *fb = plane_state->hw.fb;
4136         unsigned int rotation = plane_state->hw.rotation;
4137         u32 stride = plane_state->color_plane[color_plane].stride;
4138
4139         if (color_plane >= fb->format->num_planes)
4140                 return 0;
4141
4142         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4143 }
4144
4145 static u32 skl_plane_ctl_format(u32 pixel_format)
4146 {
4147         switch (pixel_format) {
4148         case DRM_FORMAT_C8:
4149                 return PLANE_CTL_FORMAT_INDEXED;
4150         case DRM_FORMAT_RGB565:
4151                 return PLANE_CTL_FORMAT_RGB_565;
4152         case DRM_FORMAT_XBGR8888:
4153         case DRM_FORMAT_ABGR8888:
4154                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4155         case DRM_FORMAT_XRGB8888:
4156         case DRM_FORMAT_ARGB8888:
4157                 return PLANE_CTL_FORMAT_XRGB_8888;
4158         case DRM_FORMAT_XBGR2101010:
4159         case DRM_FORMAT_ABGR2101010:
4160                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4161         case DRM_FORMAT_XRGB2101010:
4162         case DRM_FORMAT_ARGB2101010:
4163                 return PLANE_CTL_FORMAT_XRGB_2101010;
4164         case DRM_FORMAT_XBGR16161616F:
4165         case DRM_FORMAT_ABGR16161616F:
4166                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4167         case DRM_FORMAT_XRGB16161616F:
4168         case DRM_FORMAT_ARGB16161616F:
4169                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4170         case DRM_FORMAT_YUYV:
4171                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4172         case DRM_FORMAT_YVYU:
4173                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4174         case DRM_FORMAT_UYVY:
4175                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4176         case DRM_FORMAT_VYUY:
4177                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4178         case DRM_FORMAT_NV12:
4179                 return PLANE_CTL_FORMAT_NV12;
4180         case DRM_FORMAT_P010:
4181                 return PLANE_CTL_FORMAT_P010;
4182         case DRM_FORMAT_P012:
4183                 return PLANE_CTL_FORMAT_P012;
4184         case DRM_FORMAT_P016:
4185                 return PLANE_CTL_FORMAT_P016;
4186         case DRM_FORMAT_Y210:
4187                 return PLANE_CTL_FORMAT_Y210;
4188         case DRM_FORMAT_Y212:
4189                 return PLANE_CTL_FORMAT_Y212;
4190         case DRM_FORMAT_Y216:
4191                 return PLANE_CTL_FORMAT_Y216;
4192         case DRM_FORMAT_XVYU2101010:
4193                 return PLANE_CTL_FORMAT_Y410;
4194         case DRM_FORMAT_XVYU12_16161616:
4195                 return PLANE_CTL_FORMAT_Y412;
4196         case DRM_FORMAT_XVYU16161616:
4197                 return PLANE_CTL_FORMAT_Y416;
4198         default:
4199                 MISSING_CASE(pixel_format);
4200         }
4201
4202         return 0;
4203 }
4204
4205 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4206 {
4207         if (!plane_state->hw.fb->format->has_alpha)
4208                 return PLANE_CTL_ALPHA_DISABLE;
4209
4210         switch (plane_state->hw.pixel_blend_mode) {
4211         case DRM_MODE_BLEND_PIXEL_NONE:
4212                 return PLANE_CTL_ALPHA_DISABLE;
4213         case DRM_MODE_BLEND_PREMULTI:
4214                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4215         case DRM_MODE_BLEND_COVERAGE:
4216                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4217         default:
4218                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4219                 return PLANE_CTL_ALPHA_DISABLE;
4220         }
4221 }
4222
4223 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4224 {
4225         if (!plane_state->hw.fb->format->has_alpha)
4226                 return PLANE_COLOR_ALPHA_DISABLE;
4227
4228         switch (plane_state->hw.pixel_blend_mode) {
4229         case DRM_MODE_BLEND_PIXEL_NONE:
4230                 return PLANE_COLOR_ALPHA_DISABLE;
4231         case DRM_MODE_BLEND_PREMULTI:
4232                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4233         case DRM_MODE_BLEND_COVERAGE:
4234                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4235         default:
4236                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4237                 return PLANE_COLOR_ALPHA_DISABLE;
4238         }
4239 }
4240
4241 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4242 {
4243         switch (fb_modifier) {
4244         case DRM_FORMAT_MOD_LINEAR:
4245                 break;
4246         case I915_FORMAT_MOD_X_TILED:
4247                 return PLANE_CTL_TILED_X;
4248         case I915_FORMAT_MOD_Y_TILED:
4249                 return PLANE_CTL_TILED_Y;
4250         case I915_FORMAT_MOD_Y_TILED_CCS:
4251                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4252         case I915_FORMAT_MOD_Yf_TILED:
4253                 return PLANE_CTL_TILED_YF;
4254         case I915_FORMAT_MOD_Yf_TILED_CCS:
4255                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4256         default:
4257                 MISSING_CASE(fb_modifier);
4258         }
4259
4260         return 0;
4261 }
4262
4263 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4264 {
4265         switch (rotate) {
4266         case DRM_MODE_ROTATE_0:
4267                 break;
4268         /*
4269          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4270          * while i915 HW rotation is clockwise, thats why this swapping.
4271          */
4272         case DRM_MODE_ROTATE_90:
4273                 return PLANE_CTL_ROTATE_270;
4274         case DRM_MODE_ROTATE_180:
4275                 return PLANE_CTL_ROTATE_180;
4276         case DRM_MODE_ROTATE_270:
4277                 return PLANE_CTL_ROTATE_90;
4278         default:
4279                 MISSING_CASE(rotate);
4280         }
4281
4282         return 0;
4283 }
4284
4285 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4286 {
4287         switch (reflect) {
4288         case 0:
4289                 break;
4290         case DRM_MODE_REFLECT_X:
4291                 return PLANE_CTL_FLIP_HORIZONTAL;
4292         case DRM_MODE_REFLECT_Y:
4293         default:
4294                 MISSING_CASE(reflect);
4295         }
4296
4297         return 0;
4298 }
4299
4300 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4301 {
4302         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4303         u32 plane_ctl = 0;
4304
4305         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4306                 return plane_ctl;
4307
4308         if (crtc_state->gamma_enable)
4309                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4310
4311         if (crtc_state->csc_enable)
4312                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4313
4314         return plane_ctl;
4315 }
4316
4317 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4318                   const struct intel_plane_state *plane_state)
4319 {
4320         struct drm_i915_private *dev_priv =
4321                 to_i915(plane_state->uapi.plane->dev);
4322         const struct drm_framebuffer *fb = plane_state->hw.fb;
4323         unsigned int rotation = plane_state->hw.rotation;
4324         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4325         u32 plane_ctl;
4326
4327         plane_ctl = PLANE_CTL_ENABLE;
4328
4329         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4330                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4331                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4332
4333                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4334                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4335
4336                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4337                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4338         }
4339
4340         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4341         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4342         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4343
4344         if (INTEL_GEN(dev_priv) >= 10)
4345                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4346                                                 DRM_MODE_REFLECT_MASK);
4347
4348         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4349                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4350         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4351                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4352
4353         return plane_ctl;
4354 }
4355
4356 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4357 {
4358         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4359         u32 plane_color_ctl = 0;
4360
4361         if (INTEL_GEN(dev_priv) >= 11)
4362                 return plane_color_ctl;
4363
4364         if (crtc_state->gamma_enable)
4365                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4366
4367         if (crtc_state->csc_enable)
4368                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4369
4370         return plane_color_ctl;
4371 }
4372
4373 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4374                         const struct intel_plane_state *plane_state)
4375 {
4376         struct drm_i915_private *dev_priv =
4377                 to_i915(plane_state->uapi.plane->dev);
4378         const struct drm_framebuffer *fb = plane_state->hw.fb;
4379         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4380         u32 plane_color_ctl = 0;
4381
4382         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4383         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4384
4385         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4386                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4387                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4388                 else
4389                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4390
4391                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4392                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4393         } else if (fb->format->is_yuv) {
4394                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4395         }
4396
4397         return plane_color_ctl;
4398 }
4399
4400 static int
4401 __intel_display_resume(struct drm_device *dev,
4402                        struct drm_atomic_state *state,
4403                        struct drm_modeset_acquire_ctx *ctx)
4404 {
4405         struct drm_crtc_state *crtc_state;
4406         struct drm_crtc *crtc;
4407         int i, ret;
4408
4409         intel_modeset_setup_hw_state(dev, ctx);
4410         intel_vga_redisable(to_i915(dev));
4411
4412         if (!state)
4413                 return 0;
4414
4415         /*
4416          * We've duplicated the state, pointers to the old state are invalid.
4417          *
4418          * Don't attempt to use the old state until we commit the duplicated state.
4419          */
4420         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4421                 /*
4422                  * Force recalculation even if we restore
4423                  * current state. With fast modeset this may not result
4424                  * in a modeset when the state is compatible.
4425                  */
4426                 crtc_state->mode_changed = true;
4427         }
4428
4429         /* ignore any reset values/BIOS leftovers in the WM registers */
4430         if (!HAS_GMCH(to_i915(dev)))
4431                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4432
4433         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4434
4435         WARN_ON(ret == -EDEADLK);
4436         return ret;
4437 }
4438
4439 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4440 {
4441         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4442                 intel_has_gpu_reset(&dev_priv->gt));
4443 }
4444
4445 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4446 {
4447         struct drm_device *dev = &dev_priv->drm;
4448         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4449         struct drm_atomic_state *state;
4450         int ret;
4451
4452         /* reset doesn't touch the display */
4453         if (!i915_modparams.force_reset_modeset_test &&
4454             !gpu_reset_clobbers_display(dev_priv))
4455                 return;
4456
4457         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4458         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4459         smp_mb__after_atomic();
4460         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4461
4462         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4463                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4464                 intel_gt_set_wedged(&dev_priv->gt);
4465         }
4466
4467         /*
4468          * Need mode_config.mutex so that we don't
4469          * trample ongoing ->detect() and whatnot.
4470          */
4471         mutex_lock(&dev->mode_config.mutex);
4472         drm_modeset_acquire_init(ctx, 0);
4473         while (1) {
4474                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4475                 if (ret != -EDEADLK)
4476                         break;
4477
4478                 drm_modeset_backoff(ctx);
4479         }
4480         /*
4481          * Disabling the crtcs gracefully seems nicer. Also the
4482          * g33 docs say we should at least disable all the planes.
4483          */
4484         state = drm_atomic_helper_duplicate_state(dev, ctx);
4485         if (IS_ERR(state)) {
4486                 ret = PTR_ERR(state);
4487                 DRM_ERROR("Duplicating state failed with %i\n", ret);
4488                 return;
4489         }
4490
4491         ret = drm_atomic_helper_disable_all(dev, ctx);
4492         if (ret) {
4493                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4494                 drm_atomic_state_put(state);
4495                 return;
4496         }
4497
4498         dev_priv->modeset_restore_state = state;
4499         state->acquire_ctx = ctx;
4500 }
4501
4502 void intel_finish_reset(struct drm_i915_private *dev_priv)
4503 {
4504         struct drm_device *dev = &dev_priv->drm;
4505         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4506         struct drm_atomic_state *state;
4507         int ret;
4508
4509         /* reset doesn't touch the display */
4510         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4511                 return;
4512
4513         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4514         if (!state)
4515                 goto unlock;
4516
4517         /* reset doesn't touch the display */
4518         if (!gpu_reset_clobbers_display(dev_priv)) {
4519                 /* for testing only restore the display */
4520                 ret = __intel_display_resume(dev, state, ctx);
4521                 if (ret)
4522                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4523         } else {
4524                 /*
4525                  * The display has been reset as well,
4526                  * so need a full re-initialization.
4527                  */
4528                 intel_pps_unlock_regs_wa(dev_priv);
4529                 intel_modeset_init_hw(dev_priv);
4530                 intel_init_clock_gating(dev_priv);
4531
4532                 spin_lock_irq(&dev_priv->irq_lock);
4533                 if (dev_priv->display.hpd_irq_setup)
4534                         dev_priv->display.hpd_irq_setup(dev_priv);
4535                 spin_unlock_irq(&dev_priv->irq_lock);
4536
4537                 ret = __intel_display_resume(dev, state, ctx);
4538                 if (ret)
4539                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4540
4541                 intel_hpd_init(dev_priv);
4542         }
4543
4544         drm_atomic_state_put(state);
4545 unlock:
4546         drm_modeset_drop_locks(ctx);
4547         drm_modeset_acquire_fini(ctx);
4548         mutex_unlock(&dev->mode_config.mutex);
4549
4550         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4551 }
4552
4553 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4554 {
4555         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4556         enum pipe pipe = crtc->pipe;
4557         u32 tmp;
4558
4559         tmp = I915_READ(PIPE_CHICKEN(pipe));
4560
4561         /*
4562          * Display WA #1153: icl
4563          * enable hardware to bypass the alpha math
4564          * and rounding for per-pixel values 00 and 0xff
4565          */
4566         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4567         /*
4568          * Display WA # 1605353570: icl
4569          * Set the pixel rounding bit to 1 for allowing
4570          * passthrough of Frame buffer pixels unmodified
4571          * across pipe
4572          */
4573         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4574         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4575 }
4576
4577 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4578 {
4579         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4580         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4581         u32 trans_ddi_func_ctl2_val;
4582         u8 master_select;
4583
4584         /*
4585          * Configure the master select and enable Transcoder Port Sync for
4586          * Slave CRTCs transcoder.
4587          */
4588         if (crtc_state->master_transcoder == INVALID_TRANSCODER)
4589                 return;
4590
4591         if (crtc_state->master_transcoder == TRANSCODER_EDP)
4592                 master_select = 0;
4593         else
4594                 master_select = crtc_state->master_transcoder + 1;
4595
4596         /* Set the master select bits for Tranascoder Port Sync */
4597         trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
4598                                    PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
4599                 PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
4600         /* Enable Transcoder Port Sync */
4601         trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
4602
4603         I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
4604                    trans_ddi_func_ctl2_val);
4605 }
4606
4607 static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
4608 {
4609         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4610         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4611         i915_reg_t reg;
4612         u32 trans_ddi_func_ctl2_val;
4613
4614         if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
4615                 return;
4616
4617         DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
4618                       transcoder_name(old_crtc_state->cpu_transcoder));
4619
4620         reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
4621         trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
4622                                     PORT_SYNC_MODE_MASTER_SELECT_MASK);
4623         I915_WRITE(reg, trans_ddi_func_ctl2_val);
4624 }
4625
4626 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4627 {
4628         struct drm_device *dev = crtc->base.dev;
4629         struct drm_i915_private *dev_priv = to_i915(dev);
4630         enum pipe pipe = crtc->pipe;
4631         i915_reg_t reg;
4632         u32 temp;
4633
4634         /* enable normal train */
4635         reg = FDI_TX_CTL(pipe);
4636         temp = I915_READ(reg);
4637         if (IS_IVYBRIDGE(dev_priv)) {
4638                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4639                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4640         } else {
4641                 temp &= ~FDI_LINK_TRAIN_NONE;
4642                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4643         }
4644         I915_WRITE(reg, temp);
4645
4646         reg = FDI_RX_CTL(pipe);
4647         temp = I915_READ(reg);
4648         if (HAS_PCH_CPT(dev_priv)) {
4649                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4650                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4651         } else {
4652                 temp &= ~FDI_LINK_TRAIN_NONE;
4653                 temp |= FDI_LINK_TRAIN_NONE;
4654         }
4655         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4656
4657         /* wait one idle pattern time */
4658         POSTING_READ(reg);
4659         udelay(1000);
4660
4661         /* IVB wants error correction enabled */
4662         if (IS_IVYBRIDGE(dev_priv))
4663                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4664                            FDI_FE_ERRC_ENABLE);
4665 }
4666
4667 /* The FDI link training functions for ILK/Ibexpeak. */
4668 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4669                                     const struct intel_crtc_state *crtc_state)
4670 {
4671         struct drm_device *dev = crtc->base.dev;
4672         struct drm_i915_private *dev_priv = to_i915(dev);
4673         enum pipe pipe = crtc->pipe;
4674         i915_reg_t reg;
4675         u32 temp, tries;
4676
4677         /* FDI needs bits from pipe first */
4678         assert_pipe_enabled(dev_priv, pipe);
4679
4680         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4681            for train result */
4682         reg = FDI_RX_IMR(pipe);
4683         temp = I915_READ(reg);
4684         temp &= ~FDI_RX_SYMBOL_LOCK;
4685         temp &= ~FDI_RX_BIT_LOCK;
4686         I915_WRITE(reg, temp);
4687         I915_READ(reg);
4688         udelay(150);
4689
4690         /* enable CPU FDI TX and PCH FDI RX */
4691         reg = FDI_TX_CTL(pipe);
4692         temp = I915_READ(reg);
4693         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4694         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4695         temp &= ~FDI_LINK_TRAIN_NONE;
4696         temp |= FDI_LINK_TRAIN_PATTERN_1;
4697         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4698
4699         reg = FDI_RX_CTL(pipe);
4700         temp = I915_READ(reg);
4701         temp &= ~FDI_LINK_TRAIN_NONE;
4702         temp |= FDI_LINK_TRAIN_PATTERN_1;
4703         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4704
4705         POSTING_READ(reg);
4706         udelay(150);
4707
4708         /* Ironlake workaround, enable clock pointer after FDI enable*/
4709         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4710         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4711                    FDI_RX_PHASE_SYNC_POINTER_EN);
4712
4713         reg = FDI_RX_IIR(pipe);
4714         for (tries = 0; tries < 5; tries++) {
4715                 temp = I915_READ(reg);
4716                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4717
4718                 if ((temp & FDI_RX_BIT_LOCK)) {
4719                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4720                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4721                         break;
4722                 }
4723         }
4724         if (tries == 5)
4725                 DRM_ERROR("FDI train 1 fail!\n");
4726
4727         /* Train 2 */
4728         reg = FDI_TX_CTL(pipe);
4729         temp = I915_READ(reg);
4730         temp &= ~FDI_LINK_TRAIN_NONE;
4731         temp |= FDI_LINK_TRAIN_PATTERN_2;
4732         I915_WRITE(reg, temp);
4733
4734         reg = FDI_RX_CTL(pipe);
4735         temp = I915_READ(reg);
4736         temp &= ~FDI_LINK_TRAIN_NONE;
4737         temp |= FDI_LINK_TRAIN_PATTERN_2;
4738         I915_WRITE(reg, temp);
4739
4740         POSTING_READ(reg);
4741         udelay(150);
4742
4743         reg = FDI_RX_IIR(pipe);
4744         for (tries = 0; tries < 5; tries++) {
4745                 temp = I915_READ(reg);
4746                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4747
4748                 if (temp & FDI_RX_SYMBOL_LOCK) {
4749                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4750                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4751                         break;
4752                 }
4753         }
4754         if (tries == 5)
4755                 DRM_ERROR("FDI train 2 fail!\n");
4756
4757         DRM_DEBUG_KMS("FDI train done\n");
4758
4759 }
4760
4761 static const int snb_b_fdi_train_param[] = {
4762         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4763         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4764         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4765         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4766 };
4767
4768 /* The FDI link training functions for SNB/Cougarpoint. */
4769 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4770                                 const struct intel_crtc_state *crtc_state)
4771 {
4772         struct drm_device *dev = crtc->base.dev;
4773         struct drm_i915_private *dev_priv = to_i915(dev);
4774         enum pipe pipe = crtc->pipe;
4775         i915_reg_t reg;
4776         u32 temp, i, retry;
4777
4778         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4779            for train result */
4780         reg = FDI_RX_IMR(pipe);
4781         temp = I915_READ(reg);
4782         temp &= ~FDI_RX_SYMBOL_LOCK;
4783         temp &= ~FDI_RX_BIT_LOCK;
4784         I915_WRITE(reg, temp);
4785
4786         POSTING_READ(reg);
4787         udelay(150);
4788
4789         /* enable CPU FDI TX and PCH FDI RX */
4790         reg = FDI_TX_CTL(pipe);
4791         temp = I915_READ(reg);
4792         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4793         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4794         temp &= ~FDI_LINK_TRAIN_NONE;
4795         temp |= FDI_LINK_TRAIN_PATTERN_1;
4796         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4797         /* SNB-B */
4798         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4799         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4800
4801         I915_WRITE(FDI_RX_MISC(pipe),
4802                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4803
4804         reg = FDI_RX_CTL(pipe);
4805         temp = I915_READ(reg);
4806         if (HAS_PCH_CPT(dev_priv)) {
4807                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4808                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4809         } else {
4810                 temp &= ~FDI_LINK_TRAIN_NONE;
4811                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4812         }
4813         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4814
4815         POSTING_READ(reg);
4816         udelay(150);
4817
4818         for (i = 0; i < 4; i++) {
4819                 reg = FDI_TX_CTL(pipe);
4820                 temp = I915_READ(reg);
4821                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4822                 temp |= snb_b_fdi_train_param[i];
4823                 I915_WRITE(reg, temp);
4824
4825                 POSTING_READ(reg);
4826                 udelay(500);
4827
4828                 for (retry = 0; retry < 5; retry++) {
4829                         reg = FDI_RX_IIR(pipe);
4830                         temp = I915_READ(reg);
4831                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4832                         if (temp & FDI_RX_BIT_LOCK) {
4833                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4834                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4835                                 break;
4836                         }
4837                         udelay(50);
4838                 }
4839                 if (retry < 5)
4840                         break;
4841         }
4842         if (i == 4)
4843                 DRM_ERROR("FDI train 1 fail!\n");
4844
4845         /* Train 2 */
4846         reg = FDI_TX_CTL(pipe);
4847         temp = I915_READ(reg);
4848         temp &= ~FDI_LINK_TRAIN_NONE;
4849         temp |= FDI_LINK_TRAIN_PATTERN_2;
4850         if (IS_GEN(dev_priv, 6)) {
4851                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4852                 /* SNB-B */
4853                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4854         }
4855         I915_WRITE(reg, temp);
4856
4857         reg = FDI_RX_CTL(pipe);
4858         temp = I915_READ(reg);
4859         if (HAS_PCH_CPT(dev_priv)) {
4860                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4861                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4862         } else {
4863                 temp &= ~FDI_LINK_TRAIN_NONE;
4864                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4865         }
4866         I915_WRITE(reg, temp);
4867
4868         POSTING_READ(reg);
4869         udelay(150);
4870
4871         for (i = 0; i < 4; i++) {
4872                 reg = FDI_TX_CTL(pipe);
4873                 temp = I915_READ(reg);
4874                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4875                 temp |= snb_b_fdi_train_param[i];
4876                 I915_WRITE(reg, temp);
4877
4878                 POSTING_READ(reg);
4879                 udelay(500);
4880
4881                 for (retry = 0; retry < 5; retry++) {
4882                         reg = FDI_RX_IIR(pipe);
4883                         temp = I915_READ(reg);
4884                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4885                         if (temp & FDI_RX_SYMBOL_LOCK) {
4886                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4887                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4888                                 break;
4889                         }
4890                         udelay(50);
4891                 }
4892                 if (retry < 5)
4893                         break;
4894         }
4895         if (i == 4)
4896                 DRM_ERROR("FDI train 2 fail!\n");
4897
4898         DRM_DEBUG_KMS("FDI train done.\n");
4899 }
4900
4901 /* Manual link training for Ivy Bridge A0 parts */
4902 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4903                                       const struct intel_crtc_state *crtc_state)
4904 {
4905         struct drm_device *dev = crtc->base.dev;
4906         struct drm_i915_private *dev_priv = to_i915(dev);
4907         enum pipe pipe = crtc->pipe;
4908         i915_reg_t reg;
4909         u32 temp, i, j;
4910
4911         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4912            for train result */
4913         reg = FDI_RX_IMR(pipe);
4914         temp = I915_READ(reg);
4915         temp &= ~FDI_RX_SYMBOL_LOCK;
4916         temp &= ~FDI_RX_BIT_LOCK;
4917         I915_WRITE(reg, temp);
4918
4919         POSTING_READ(reg);
4920         udelay(150);
4921
4922         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4923                       I915_READ(FDI_RX_IIR(pipe)));
4924
4925         /* Try each vswing and preemphasis setting twice before moving on */
4926         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4927                 /* disable first in case we need to retry */
4928                 reg = FDI_TX_CTL(pipe);
4929                 temp = I915_READ(reg);
4930                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4931                 temp &= ~FDI_TX_ENABLE;
4932                 I915_WRITE(reg, temp);
4933
4934                 reg = FDI_RX_CTL(pipe);
4935                 temp = I915_READ(reg);
4936                 temp &= ~FDI_LINK_TRAIN_AUTO;
4937                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4938                 temp &= ~FDI_RX_ENABLE;
4939                 I915_WRITE(reg, temp);
4940
4941                 /* enable CPU FDI TX and PCH FDI RX */
4942                 reg = FDI_TX_CTL(pipe);
4943                 temp = I915_READ(reg);
4944                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4945                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4946                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4947                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4948                 temp |= snb_b_fdi_train_param[j/2];
4949                 temp |= FDI_COMPOSITE_SYNC;
4950                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4951
4952                 I915_WRITE(FDI_RX_MISC(pipe),
4953                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4954
4955                 reg = FDI_RX_CTL(pipe);
4956                 temp = I915_READ(reg);
4957                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4958                 temp |= FDI_COMPOSITE_SYNC;
4959                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4960
4961                 POSTING_READ(reg);
4962                 udelay(1); /* should be 0.5us */
4963
4964                 for (i = 0; i < 4; i++) {
4965                         reg = FDI_RX_IIR(pipe);
4966                         temp = I915_READ(reg);
4967                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4968
4969                         if (temp & FDI_RX_BIT_LOCK ||
4970                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4971                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4972                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4973                                               i);
4974                                 break;
4975                         }
4976                         udelay(1); /* should be 0.5us */
4977                 }
4978                 if (i == 4) {
4979                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4980                         continue;
4981                 }
4982
4983                 /* Train 2 */
4984                 reg = FDI_TX_CTL(pipe);
4985                 temp = I915_READ(reg);
4986                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4987                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4988                 I915_WRITE(reg, temp);
4989
4990                 reg = FDI_RX_CTL(pipe);
4991                 temp = I915_READ(reg);
4992                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4993                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4994                 I915_WRITE(reg, temp);
4995
4996                 POSTING_READ(reg);
4997                 udelay(2); /* should be 1.5us */
4998
4999                 for (i = 0; i < 4; i++) {
5000                         reg = FDI_RX_IIR(pipe);
5001                         temp = I915_READ(reg);
5002                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
5003
5004                         if (temp & FDI_RX_SYMBOL_LOCK ||
5005                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
5006                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
5007                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
5008                                               i);
5009                                 goto train_done;
5010                         }
5011                         udelay(2); /* should be 1.5us */
5012                 }
5013                 if (i == 4)
5014                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
5015         }
5016
5017 train_done:
5018         DRM_DEBUG_KMS("FDI train done.\n");
5019 }
5020
5021 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
5022 {
5023         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5024         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5025         enum pipe pipe = intel_crtc->pipe;
5026         i915_reg_t reg;
5027         u32 temp;
5028
5029         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5030         reg = FDI_RX_CTL(pipe);
5031         temp = I915_READ(reg);
5032         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5033         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5034         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5035         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
5036
5037         POSTING_READ(reg);
5038         udelay(200);
5039
5040         /* Switch from Rawclk to PCDclk */
5041         temp = I915_READ(reg);
5042         I915_WRITE(reg, temp | FDI_PCDCLK);
5043
5044         POSTING_READ(reg);
5045         udelay(200);
5046
5047         /* Enable CPU FDI TX PLL, always on for Ironlake */
5048         reg = FDI_TX_CTL(pipe);
5049         temp = I915_READ(reg);
5050         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5051                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5052
5053                 POSTING_READ(reg);
5054                 udelay(100);
5055         }
5056 }
5057
5058 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
5059 {
5060         struct drm_device *dev = intel_crtc->base.dev;
5061         struct drm_i915_private *dev_priv = to_i915(dev);
5062         enum pipe pipe = intel_crtc->pipe;
5063         i915_reg_t reg;
5064         u32 temp;
5065
5066         /* Switch from PCDclk to Rawclk */
5067         reg = FDI_RX_CTL(pipe);
5068         temp = I915_READ(reg);
5069         I915_WRITE(reg, temp & ~FDI_PCDCLK);
5070
5071         /* Disable CPU FDI TX PLL */
5072         reg = FDI_TX_CTL(pipe);
5073         temp = I915_READ(reg);
5074         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
5075
5076         POSTING_READ(reg);
5077         udelay(100);
5078
5079         reg = FDI_RX_CTL(pipe);
5080         temp = I915_READ(reg);
5081         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
5082
5083         /* Wait for the clocks to turn off. */
5084         POSTING_READ(reg);
5085         udelay(100);
5086 }
5087
5088 static void ironlake_fdi_disable(struct intel_crtc *crtc)
5089 {
5090         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5091         enum pipe pipe = crtc->pipe;
5092         i915_reg_t reg;
5093         u32 temp;
5094
5095         /* disable CPU FDI tx and PCH FDI rx */
5096         reg = FDI_TX_CTL(pipe);
5097         temp = I915_READ(reg);
5098         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
5099         POSTING_READ(reg);
5100
5101         reg = FDI_RX_CTL(pipe);
5102         temp = I915_READ(reg);
5103         temp &= ~(0x7 << 16);
5104         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5105         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
5106
5107         POSTING_READ(reg);
5108         udelay(100);
5109
5110         /* Ironlake workaround, disable clock pointer after downing FDI */
5111         if (HAS_PCH_IBX(dev_priv))
5112                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
5113
5114         /* still set train pattern 1 */
5115         reg = FDI_TX_CTL(pipe);
5116         temp = I915_READ(reg);
5117         temp &= ~FDI_LINK_TRAIN_NONE;
5118         temp |= FDI_LINK_TRAIN_PATTERN_1;
5119         I915_WRITE(reg, temp);
5120
5121         reg = FDI_RX_CTL(pipe);
5122         temp = I915_READ(reg);
5123         if (HAS_PCH_CPT(dev_priv)) {
5124                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5125                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5126         } else {
5127                 temp &= ~FDI_LINK_TRAIN_NONE;
5128                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5129         }
5130         /* BPC in FDI rx is consistent with that in PIPECONF */
5131         temp &= ~(0x07 << 16);
5132         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5133         I915_WRITE(reg, temp);
5134
5135         POSTING_READ(reg);
5136         udelay(100);
5137 }
5138
5139 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5140 {
5141         struct drm_crtc *crtc;
5142         bool cleanup_done;
5143
5144         drm_for_each_crtc(crtc, &dev_priv->drm) {
5145                 struct drm_crtc_commit *commit;
5146                 spin_lock(&crtc->commit_lock);
5147                 commit = list_first_entry_or_null(&crtc->commit_list,
5148                                                   struct drm_crtc_commit, commit_entry);
5149                 cleanup_done = commit ?
5150                         try_wait_for_completion(&commit->cleanup_done) : true;
5151                 spin_unlock(&crtc->commit_lock);
5152
5153                 if (cleanup_done)
5154                         continue;
5155
5156                 drm_crtc_wait_one_vblank(crtc);
5157
5158                 return true;
5159         }
5160
5161         return false;
5162 }
5163
5164 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5165 {
5166         u32 temp;
5167
5168         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
5169
5170         mutex_lock(&dev_priv->sb_lock);
5171
5172         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5173         temp |= SBI_SSCCTL_DISABLE;
5174         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5175
5176         mutex_unlock(&dev_priv->sb_lock);
5177 }
5178
5179 /* Program iCLKIP clock to the desired frequency */
5180 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5181 {
5182         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5183         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5184         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5185         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5186         u32 temp;
5187
5188         lpt_disable_iclkip(dev_priv);
5189
5190         /* The iCLK virtual clock root frequency is in MHz,
5191          * but the adjusted_mode->crtc_clock in in KHz. To get the
5192          * divisors, it is necessary to divide one by another, so we
5193          * convert the virtual clock precision to KHz here for higher
5194          * precision.
5195          */
5196         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5197                 u32 iclk_virtual_root_freq = 172800 * 1000;
5198                 u32 iclk_pi_range = 64;
5199                 u32 desired_divisor;
5200
5201                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5202                                                     clock << auxdiv);
5203                 divsel = (desired_divisor / iclk_pi_range) - 2;
5204                 phaseinc = desired_divisor % iclk_pi_range;
5205
5206                 /*
5207                  * Near 20MHz is a corner case which is
5208                  * out of range for the 7-bit divisor
5209                  */
5210                 if (divsel <= 0x7f)
5211                         break;
5212         }
5213
5214         /* This should not happen with any sane values */
5215         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5216                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5217         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5218                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5219
5220         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5221                         clock,
5222                         auxdiv,
5223                         divsel,
5224                         phasedir,
5225                         phaseinc);
5226
5227         mutex_lock(&dev_priv->sb_lock);
5228
5229         /* Program SSCDIVINTPHASE6 */
5230         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5231         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5232         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5233         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5234         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5235         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5236         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5237         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5238
5239         /* Program SSCAUXDIV */
5240         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5241         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5242         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5243         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5244
5245         /* Enable modulator and associated divider */
5246         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5247         temp &= ~SBI_SSCCTL_DISABLE;
5248         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5249
5250         mutex_unlock(&dev_priv->sb_lock);
5251
5252         /* Wait for initialization time */
5253         udelay(24);
5254
5255         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5256 }
5257
5258 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5259 {
5260         u32 divsel, phaseinc, auxdiv;
5261         u32 iclk_virtual_root_freq = 172800 * 1000;
5262         u32 iclk_pi_range = 64;
5263         u32 desired_divisor;
5264         u32 temp;
5265
5266         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5267                 return 0;
5268
5269         mutex_lock(&dev_priv->sb_lock);
5270
5271         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5272         if (temp & SBI_SSCCTL_DISABLE) {
5273                 mutex_unlock(&dev_priv->sb_lock);
5274                 return 0;
5275         }
5276
5277         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5278         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5279                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5280         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5281                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5282
5283         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5284         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5285                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5286
5287         mutex_unlock(&dev_priv->sb_lock);
5288
5289         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5290
5291         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5292                                  desired_divisor << auxdiv);
5293 }
5294
5295 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5296                                                 enum pipe pch_transcoder)
5297 {
5298         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5299         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5300         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5301
5302         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5303                    I915_READ(HTOTAL(cpu_transcoder)));
5304         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5305                    I915_READ(HBLANK(cpu_transcoder)));
5306         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5307                    I915_READ(HSYNC(cpu_transcoder)));
5308
5309         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5310                    I915_READ(VTOTAL(cpu_transcoder)));
5311         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5312                    I915_READ(VBLANK(cpu_transcoder)));
5313         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5314                    I915_READ(VSYNC(cpu_transcoder)));
5315         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5316                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
5317 }
5318
5319 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5320 {
5321         u32 temp;
5322
5323         temp = I915_READ(SOUTH_CHICKEN1);
5324         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5325                 return;
5326
5327         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5328         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5329
5330         temp &= ~FDI_BC_BIFURCATION_SELECT;
5331         if (enable)
5332                 temp |= FDI_BC_BIFURCATION_SELECT;
5333
5334         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5335         I915_WRITE(SOUTH_CHICKEN1, temp);
5336         POSTING_READ(SOUTH_CHICKEN1);
5337 }
5338
5339 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5340 {
5341         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5342         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5343
5344         switch (crtc->pipe) {
5345         case PIPE_A:
5346                 break;
5347         case PIPE_B:
5348                 if (crtc_state->fdi_lanes > 2)
5349                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5350                 else
5351                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5352
5353                 break;
5354         case PIPE_C:
5355                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5356
5357                 break;
5358         default:
5359                 BUG();
5360         }
5361 }
5362
5363 /*
5364  * Finds the encoder associated with the given CRTC. This can only be
5365  * used when we know that the CRTC isn't feeding multiple encoders!
5366  */
5367 static struct intel_encoder *
5368 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5369                            const struct intel_crtc_state *crtc_state)
5370 {
5371         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5372         const struct drm_connector_state *connector_state;
5373         const struct drm_connector *connector;
5374         struct intel_encoder *encoder = NULL;
5375         int num_encoders = 0;
5376         int i;
5377
5378         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5379                 if (connector_state->crtc != &crtc->base)
5380                         continue;
5381
5382                 encoder = to_intel_encoder(connector_state->best_encoder);
5383                 num_encoders++;
5384         }
5385
5386         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5387              num_encoders, pipe_name(crtc->pipe));
5388
5389         return encoder;
5390 }
5391
5392 /*
5393  * Enable PCH resources required for PCH ports:
5394  *   - PCH PLLs
5395  *   - FDI training & RX/TX
5396  *   - update transcoder timings
5397  *   - DP transcoding bits
5398  *   - transcoder
5399  */
5400 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5401                                 const struct intel_crtc_state *crtc_state)
5402 {
5403         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5404         struct drm_device *dev = crtc->base.dev;
5405         struct drm_i915_private *dev_priv = to_i915(dev);
5406         enum pipe pipe = crtc->pipe;
5407         u32 temp;
5408
5409         assert_pch_transcoder_disabled(dev_priv, pipe);
5410
5411         if (IS_IVYBRIDGE(dev_priv))
5412                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5413
5414         /* Write the TU size bits before fdi link training, so that error
5415          * detection works. */
5416         I915_WRITE(FDI_RX_TUSIZE1(pipe),
5417                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5418
5419         /* For PCH output, training FDI link */
5420         dev_priv->display.fdi_link_train(crtc, crtc_state);
5421
5422         /* We need to program the right clock selection before writing the pixel
5423          * mutliplier into the DPLL. */
5424         if (HAS_PCH_CPT(dev_priv)) {
5425                 u32 sel;
5426
5427                 temp = I915_READ(PCH_DPLL_SEL);
5428                 temp |= TRANS_DPLL_ENABLE(pipe);
5429                 sel = TRANS_DPLLB_SEL(pipe);
5430                 if (crtc_state->shared_dpll ==
5431                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5432                         temp |= sel;
5433                 else
5434                         temp &= ~sel;
5435                 I915_WRITE(PCH_DPLL_SEL, temp);
5436         }
5437
5438         /* XXX: pch pll's can be enabled any time before we enable the PCH
5439          * transcoder, and we actually should do this to not upset any PCH
5440          * transcoder that already use the clock when we share it.
5441          *
5442          * Note that enable_shared_dpll tries to do the right thing, but
5443          * get_shared_dpll unconditionally resets the pll - we need that to have
5444          * the right LVDS enable sequence. */
5445         intel_enable_shared_dpll(crtc_state);
5446
5447         /* set transcoder timing, panel must allow it */
5448         assert_panel_unlocked(dev_priv, pipe);
5449         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5450
5451         intel_fdi_normal_train(crtc);
5452
5453         /* For PCH DP, enable TRANS_DP_CTL */
5454         if (HAS_PCH_CPT(dev_priv) &&
5455             intel_crtc_has_dp_encoder(crtc_state)) {
5456                 const struct drm_display_mode *adjusted_mode =
5457                         &crtc_state->hw.adjusted_mode;
5458                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5459                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5460                 enum port port;
5461
5462                 temp = I915_READ(reg);
5463                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5464                           TRANS_DP_SYNC_MASK |
5465                           TRANS_DP_BPC_MASK);
5466                 temp |= TRANS_DP_OUTPUT_ENABLE;
5467                 temp |= bpc << 9; /* same format but at 11:9 */
5468
5469                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5470                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5471                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5472                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5473
5474                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5475                 WARN_ON(port < PORT_B || port > PORT_D);
5476                 temp |= TRANS_DP_PORT_SEL(port);
5477
5478                 I915_WRITE(reg, temp);
5479         }
5480
5481         ironlake_enable_pch_transcoder(crtc_state);
5482 }
5483
5484 static void lpt_pch_enable(const struct intel_atomic_state *state,
5485                            const struct intel_crtc_state *crtc_state)
5486 {
5487         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5488         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5489         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5490
5491         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5492
5493         lpt_program_iclkip(crtc_state);
5494
5495         /* Set transcoder timing. */
5496         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5497
5498         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5499 }
5500
5501 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5502                                enum pipe pipe)
5503 {
5504         i915_reg_t dslreg = PIPEDSL(pipe);
5505         u32 temp;
5506
5507         temp = I915_READ(dslreg);
5508         udelay(500);
5509         if (wait_for(I915_READ(dslreg) != temp, 5)) {
5510                 if (wait_for(I915_READ(dslreg) != temp, 5))
5511                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5512         }
5513 }
5514
5515 /*
5516  * The hardware phase 0.0 refers to the center of the pixel.
5517  * We want to start from the top/left edge which is phase
5518  * -0.5. That matches how the hardware calculates the scaling
5519  * factors (from top-left of the first pixel to bottom-right
5520  * of the last pixel, as opposed to the pixel centers).
5521  *
5522  * For 4:2:0 subsampled chroma planes we obviously have to
5523  * adjust that so that the chroma sample position lands in
5524  * the right spot.
5525  *
5526  * Note that for packed YCbCr 4:2:2 formats there is no way to
5527  * control chroma siting. The hardware simply replicates the
5528  * chroma samples for both of the luma samples, and thus we don't
5529  * actually get the expected MPEG2 chroma siting convention :(
5530  * The same behaviour is observed on pre-SKL platforms as well.
5531  *
5532  * Theory behind the formula (note that we ignore sub-pixel
5533  * source coordinates):
5534  * s = source sample position
5535  * d = destination sample position
5536  *
5537  * Downscaling 4:1:
5538  * -0.5
5539  * | 0.0
5540  * | |     1.5 (initial phase)
5541  * | |     |
5542  * v v     v
5543  * | s | s | s | s |
5544  * |       d       |
5545  *
5546  * Upscaling 1:4:
5547  * -0.5
5548  * | -0.375 (initial phase)
5549  * | |     0.0
5550  * | |     |
5551  * v v     v
5552  * |       s       |
5553  * | d | d | d | d |
5554  */
5555 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5556 {
5557         int phase = -0x8000;
5558         u16 trip = 0;
5559
5560         if (chroma_cosited)
5561                 phase += (sub - 1) * 0x8000 / sub;
5562
5563         phase += scale / (2 * sub);
5564
5565         /*
5566          * Hardware initial phase limited to [-0.5:1.5].
5567          * Since the max hardware scale factor is 3.0, we
5568          * should never actually excdeed 1.0 here.
5569          */
5570         WARN_ON(phase < -0x8000 || phase > 0x18000);
5571
5572         if (phase < 0)
5573                 phase = 0x10000 + phase;
5574         else
5575                 trip = PS_PHASE_TRIP;
5576
5577         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5578 }
5579
5580 #define SKL_MIN_SRC_W 8
5581 #define SKL_MAX_SRC_W 4096
5582 #define SKL_MIN_SRC_H 8
5583 #define SKL_MAX_SRC_H 4096
5584 #define SKL_MIN_DST_W 8
5585 #define SKL_MAX_DST_W 4096
5586 #define SKL_MIN_DST_H 8
5587 #define SKL_MAX_DST_H 4096
5588 #define ICL_MAX_SRC_W 5120
5589 #define ICL_MAX_SRC_H 4096
5590 #define ICL_MAX_DST_W 5120
5591 #define ICL_MAX_DST_H 4096
5592 #define SKL_MIN_YUV_420_SRC_W 16
5593 #define SKL_MIN_YUV_420_SRC_H 16
5594
5595 static int
5596 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5597                   unsigned int scaler_user, int *scaler_id,
5598                   int src_w, int src_h, int dst_w, int dst_h,
5599                   const struct drm_format_info *format, bool need_scaler)
5600 {
5601         struct intel_crtc_scaler_state *scaler_state =
5602                 &crtc_state->scaler_state;
5603         struct intel_crtc *intel_crtc =
5604                 to_intel_crtc(crtc_state->uapi.crtc);
5605         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5606         const struct drm_display_mode *adjusted_mode =
5607                 &crtc_state->hw.adjusted_mode;
5608
5609         /*
5610          * Src coordinates are already rotated by 270 degrees for
5611          * the 90/270 degree plane rotation cases (to match the
5612          * GTT mapping), hence no need to account for rotation here.
5613          */
5614         if (src_w != dst_w || src_h != dst_h)
5615                 need_scaler = true;
5616
5617         /*
5618          * Scaling/fitting not supported in IF-ID mode in GEN9+
5619          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5620          * Once NV12 is enabled, handle it here while allocating scaler
5621          * for NV12.
5622          */
5623         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
5624             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5625                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5626                 return -EINVAL;
5627         }
5628
5629         /*
5630          * if plane is being disabled or scaler is no more required or force detach
5631          *  - free scaler binded to this plane/crtc
5632          *  - in order to do this, update crtc->scaler_usage
5633          *
5634          * Here scaler state in crtc_state is set free so that
5635          * scaler can be assigned to other user. Actual register
5636          * update to free the scaler is done in plane/panel-fit programming.
5637          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5638          */
5639         if (force_detach || !need_scaler) {
5640                 if (*scaler_id >= 0) {
5641                         scaler_state->scaler_users &= ~(1 << scaler_user);
5642                         scaler_state->scalers[*scaler_id].in_use = 0;
5643
5644                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5645                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5646                                 intel_crtc->pipe, scaler_user, *scaler_id,
5647                                 scaler_state->scaler_users);
5648                         *scaler_id = -1;
5649                 }
5650                 return 0;
5651         }
5652
5653         if (format && drm_format_info_is_yuv_semiplanar(format) &&
5654             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5655                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5656                 return -EINVAL;
5657         }
5658
5659         /* range checks */
5660         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5661             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5662             (INTEL_GEN(dev_priv) >= 11 &&
5663              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5664               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5665             (INTEL_GEN(dev_priv) < 11 &&
5666              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5667               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5668                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5669                         "size is out of scaler range\n",
5670                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5671                 return -EINVAL;
5672         }
5673
5674         /* mark this plane as a scaler user in crtc_state */
5675         scaler_state->scaler_users |= (1 << scaler_user);
5676         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5677                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5678                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5679                 scaler_state->scaler_users);
5680
5681         return 0;
5682 }
5683
5684 /**
5685  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5686  *
5687  * @state: crtc's scaler state
5688  *
5689  * Return
5690  *     0 - scaler_usage updated successfully
5691  *    error - requested scaling cannot be supported or other error condition
5692  */
5693 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5694 {
5695         const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
5696         bool need_scaler = false;
5697
5698         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5699                 need_scaler = true;
5700
5701         return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
5702                                  &state->scaler_state.scaler_id,
5703                                  state->pipe_src_w, state->pipe_src_h,
5704                                  adjusted_mode->crtc_hdisplay,
5705                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5706 }
5707
5708 /**
5709  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5710  * @crtc_state: crtc's scaler state
5711  * @plane_state: atomic plane state to update
5712  *
5713  * Return
5714  *     0 - scaler_usage updated successfully
5715  *    error - requested scaling cannot be supported or other error condition
5716  */
5717 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5718                                    struct intel_plane_state *plane_state)
5719 {
5720         struct intel_plane *intel_plane =
5721                 to_intel_plane(plane_state->uapi.plane);
5722         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5723         struct drm_framebuffer *fb = plane_state->hw.fb;
5724         int ret;
5725         bool force_detach = !fb || !plane_state->uapi.visible;
5726         bool need_scaler = false;
5727
5728         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5729         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5730             fb && drm_format_info_is_yuv_semiplanar(fb->format))
5731                 need_scaler = true;
5732
5733         ret = skl_update_scaler(crtc_state, force_detach,
5734                                 drm_plane_index(&intel_plane->base),
5735                                 &plane_state->scaler_id,
5736                                 drm_rect_width(&plane_state->uapi.src) >> 16,
5737                                 drm_rect_height(&plane_state->uapi.src) >> 16,
5738                                 drm_rect_width(&plane_state->uapi.dst),
5739                                 drm_rect_height(&plane_state->uapi.dst),
5740                                 fb ? fb->format : NULL, need_scaler);
5741
5742         if (ret || plane_state->scaler_id < 0)
5743                 return ret;
5744
5745         /* check colorkey */
5746         if (plane_state->ckey.flags) {
5747                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5748                               intel_plane->base.base.id,
5749                               intel_plane->base.name);
5750                 return -EINVAL;
5751         }
5752
5753         /* Check src format */
5754         switch (fb->format->format) {
5755         case DRM_FORMAT_RGB565:
5756         case DRM_FORMAT_XBGR8888:
5757         case DRM_FORMAT_XRGB8888:
5758         case DRM_FORMAT_ABGR8888:
5759         case DRM_FORMAT_ARGB8888:
5760         case DRM_FORMAT_XRGB2101010:
5761         case DRM_FORMAT_XBGR2101010:
5762         case DRM_FORMAT_ARGB2101010:
5763         case DRM_FORMAT_ABGR2101010:
5764         case DRM_FORMAT_YUYV:
5765         case DRM_FORMAT_YVYU:
5766         case DRM_FORMAT_UYVY:
5767         case DRM_FORMAT_VYUY:
5768         case DRM_FORMAT_NV12:
5769         case DRM_FORMAT_P010:
5770         case DRM_FORMAT_P012:
5771         case DRM_FORMAT_P016:
5772         case DRM_FORMAT_Y210:
5773         case DRM_FORMAT_Y212:
5774         case DRM_FORMAT_Y216:
5775         case DRM_FORMAT_XVYU2101010:
5776         case DRM_FORMAT_XVYU12_16161616:
5777         case DRM_FORMAT_XVYU16161616:
5778                 break;
5779         case DRM_FORMAT_XBGR16161616F:
5780         case DRM_FORMAT_ABGR16161616F:
5781         case DRM_FORMAT_XRGB16161616F:
5782         case DRM_FORMAT_ARGB16161616F:
5783                 if (INTEL_GEN(dev_priv) >= 11)
5784                         break;
5785                 /* fall through */
5786         default:
5787                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5788                               intel_plane->base.base.id, intel_plane->base.name,
5789                               fb->base.id, fb->format->format);
5790                 return -EINVAL;
5791         }
5792
5793         return 0;
5794 }
5795
5796 static void skylake_scaler_disable(struct intel_crtc *crtc)
5797 {
5798         int i;
5799
5800         for (i = 0; i < crtc->num_scalers; i++)
5801                 skl_detach_scaler(crtc, i);
5802 }
5803
5804 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5805 {
5806         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5807         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5808         enum pipe pipe = crtc->pipe;
5809         const struct intel_crtc_scaler_state *scaler_state =
5810                 &crtc_state->scaler_state;
5811
5812         if (crtc_state->pch_pfit.enabled) {
5813                 u16 uv_rgb_hphase, uv_rgb_vphase;
5814                 int pfit_w, pfit_h, hscale, vscale;
5815                 int id;
5816
5817                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5818                         return;
5819
5820                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5821                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5822
5823                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5824                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5825
5826                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5827                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5828
5829                 id = scaler_state->scaler_id;
5830                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5831                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5832                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5833                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5834                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5835                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5836                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5837                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5838         }
5839 }
5840
5841 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5842 {
5843         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5844         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5845         enum pipe pipe = crtc->pipe;
5846
5847         if (crtc_state->pch_pfit.enabled) {
5848                 /* Force use of hard-coded filter coefficients
5849                  * as some pre-programmed values are broken,
5850                  * e.g. x201.
5851                  */
5852                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5853                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5854                                                  PF_PIPE_SEL_IVB(pipe));
5855                 else
5856                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5857                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5858                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5859         }
5860 }
5861
5862 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5863 {
5864         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5865         struct drm_device *dev = crtc->base.dev;
5866         struct drm_i915_private *dev_priv = to_i915(dev);
5867
5868         if (!crtc_state->ips_enabled)
5869                 return;
5870
5871         /*
5872          * We can only enable IPS after we enable a plane and wait for a vblank
5873          * This function is called from post_plane_update, which is run after
5874          * a vblank wait.
5875          */
5876         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5877
5878         if (IS_BROADWELL(dev_priv)) {
5879                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5880                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5881                 /* Quoting Art Runyan: "its not safe to expect any particular
5882                  * value in IPS_CTL bit 31 after enabling IPS through the
5883                  * mailbox." Moreover, the mailbox may return a bogus state,
5884                  * so we need to just enable it and continue on.
5885                  */
5886         } else {
5887                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5888                 /* The bit only becomes 1 in the next vblank, so this wait here
5889                  * is essentially intel_wait_for_vblank. If we don't have this
5890                  * and don't wait for vblanks until the end of crtc_enable, then
5891                  * the HW state readout code will complain that the expected
5892                  * IPS_CTL value is not the one we read. */
5893                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5894                         DRM_ERROR("Timed out waiting for IPS enable\n");
5895         }
5896 }
5897
5898 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5899 {
5900         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5901         struct drm_device *dev = crtc->base.dev;
5902         struct drm_i915_private *dev_priv = to_i915(dev);
5903
5904         if (!crtc_state->ips_enabled)
5905                 return;
5906
5907         if (IS_BROADWELL(dev_priv)) {
5908                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5909                 /*
5910                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5911                  * 42ms timeout value leads to occasional timeouts so use 100ms
5912                  * instead.
5913                  */
5914                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5915                         DRM_ERROR("Timed out waiting for IPS disable\n");
5916         } else {
5917                 I915_WRITE(IPS_CTL, 0);
5918                 POSTING_READ(IPS_CTL);
5919         }
5920
5921         /* We need to wait for a vblank before we can disable the plane. */
5922         intel_wait_for_vblank(dev_priv, crtc->pipe);
5923 }
5924
5925 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5926 {
5927         if (intel_crtc->overlay)
5928                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5929
5930         /* Let userspace switch the overlay on again. In most cases userspace
5931          * has to recompute where to put it anyway.
5932          */
5933 }
5934
5935 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5936                                        const struct intel_crtc_state *new_crtc_state)
5937 {
5938         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5939         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5940
5941         if (!old_crtc_state->ips_enabled)
5942                 return false;
5943
5944         if (needs_modeset(new_crtc_state))
5945                 return true;
5946
5947         /*
5948          * Workaround : Do not read or write the pipe palette/gamma data while
5949          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5950          *
5951          * Disable IPS before we program the LUT.
5952          */
5953         if (IS_HASWELL(dev_priv) &&
5954             (new_crtc_state->uapi.color_mgmt_changed ||
5955              new_crtc_state->update_pipe) &&
5956             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5957                 return true;
5958
5959         return !new_crtc_state->ips_enabled;
5960 }
5961
5962 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5963                                        const struct intel_crtc_state *new_crtc_state)
5964 {
5965         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5966         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5967
5968         if (!new_crtc_state->ips_enabled)
5969                 return false;
5970
5971         if (needs_modeset(new_crtc_state))
5972                 return true;
5973
5974         /*
5975          * Workaround : Do not read or write the pipe palette/gamma data while
5976          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5977          *
5978          * Re-enable IPS after the LUT has been programmed.
5979          */
5980         if (IS_HASWELL(dev_priv) &&
5981             (new_crtc_state->uapi.color_mgmt_changed ||
5982              new_crtc_state->update_pipe) &&
5983             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5984                 return true;
5985
5986         /*
5987          * We can't read out IPS on broadwell, assume the worst and
5988          * forcibly enable IPS on the first fastset.
5989          */
5990         if (new_crtc_state->update_pipe &&
5991             old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5992                 return true;
5993
5994         return !old_crtc_state->ips_enabled;
5995 }
5996
5997 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
5998 {
5999         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6000
6001         if (!crtc_state->nv12_planes)
6002                 return false;
6003
6004         /* WA Display #0827: Gen9:all */
6005         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6006                 return true;
6007
6008         return false;
6009 }
6010
6011 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6012 {
6013         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6014
6015         /* Wa_2006604312:icl */
6016         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
6017                 return true;
6018
6019         return false;
6020 }
6021
6022 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6023                             const struct intel_crtc_state *new_crtc_state)
6024 {
6025         return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
6026                 new_crtc_state->active_planes;
6027 }
6028
6029 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6030                              const struct intel_crtc_state *new_crtc_state)
6031 {
6032         return old_crtc_state->active_planes &&
6033                 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
6034 }
6035
6036 static void intel_post_plane_update(struct intel_atomic_state *state,
6037                                     struct intel_crtc *crtc)
6038 {
6039         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6040         struct intel_plane *primary = to_intel_plane(crtc->base.primary);
6041         const struct intel_crtc_state *old_crtc_state =
6042                 intel_atomic_get_old_crtc_state(state, crtc);
6043         const struct intel_crtc_state *new_crtc_state =
6044                 intel_atomic_get_new_crtc_state(state, crtc);
6045         const struct intel_plane_state *new_primary_state =
6046                 intel_atomic_get_new_plane_state(state, primary);
6047         enum pipe pipe = crtc->pipe;
6048
6049         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6050
6051         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6052                 intel_update_watermarks(crtc);
6053
6054         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6055                 hsw_enable_ips(new_crtc_state);
6056
6057         if (new_primary_state)
6058                 intel_fbc_post_update(crtc);
6059
6060         if (needs_nv12_wa(old_crtc_state) &&
6061             !needs_nv12_wa(new_crtc_state))
6062                 skl_wa_827(dev_priv, pipe, false);
6063
6064         if (needs_scalerclk_wa(old_crtc_state) &&
6065             !needs_scalerclk_wa(new_crtc_state))
6066                 icl_wa_scalerclkgating(dev_priv, pipe, false);
6067 }
6068
6069 static void intel_pre_plane_update(struct intel_atomic_state *state,
6070                                    struct intel_crtc *crtc)
6071 {
6072         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6073         struct intel_plane *primary = to_intel_plane(crtc->base.primary);
6074         const struct intel_crtc_state *old_crtc_state =
6075                 intel_atomic_get_old_crtc_state(state, crtc);
6076         const struct intel_crtc_state *new_crtc_state =
6077                 intel_atomic_get_new_crtc_state(state, crtc);
6078         const struct intel_plane_state *new_primary_state =
6079                 intel_atomic_get_new_plane_state(state, primary);
6080         enum pipe pipe = crtc->pipe;
6081
6082         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6083                 hsw_disable_ips(old_crtc_state);
6084
6085         if (new_primary_state)
6086                 intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state);
6087
6088         /* Display WA 827 */
6089         if (!needs_nv12_wa(old_crtc_state) &&
6090             needs_nv12_wa(new_crtc_state))
6091                 skl_wa_827(dev_priv, pipe, true);
6092
6093         /* Wa_2006604312:icl */
6094         if (!needs_scalerclk_wa(old_crtc_state) &&
6095             needs_scalerclk_wa(new_crtc_state))
6096                 icl_wa_scalerclkgating(dev_priv, pipe, true);
6097
6098         /*
6099          * Vblank time updates from the shadow to live plane control register
6100          * are blocked if the memory self-refresh mode is active at that
6101          * moment. So to make sure the plane gets truly disabled, disable
6102          * first the self-refresh mode. The self-refresh enable bit in turn
6103          * will be checked/applied by the HW only at the next frame start
6104          * event which is after the vblank start event, so we need to have a
6105          * wait-for-vblank between disabling the plane and the pipe.
6106          */
6107         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6108             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6109                 intel_wait_for_vblank(dev_priv, pipe);
6110
6111         /*
6112          * IVB workaround: must disable low power watermarks for at least
6113          * one frame before enabling scaling.  LP watermarks can be re-enabled
6114          * when scaling is disabled.
6115          *
6116          * WaCxSRDisabledForSpriteScaling:ivb
6117          */
6118         if (old_crtc_state->hw.active &&
6119             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6120                 intel_wait_for_vblank(dev_priv, pipe);
6121
6122         /*
6123          * If we're doing a modeset we don't need to do any
6124          * pre-vblank watermark programming here.
6125          */
6126         if (!needs_modeset(new_crtc_state)) {
6127                 /*
6128                  * For platforms that support atomic watermarks, program the
6129                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6130                  * will be the intermediate values that are safe for both pre- and
6131                  * post- vblank; when vblank happens, the 'active' values will be set
6132                  * to the final 'target' values and we'll do this again to get the
6133                  * optimal watermarks.  For gen9+ platforms, the values we program here
6134                  * will be the final target values which will get automatically latched
6135                  * at vblank time; no further programming will be necessary.
6136                  *
6137                  * If a platform hasn't been transitioned to atomic watermarks yet,
6138                  * we'll continue to update watermarks the old way, if flags tell
6139                  * us to.
6140                  */
6141                 if (dev_priv->display.initial_watermarks)
6142                         dev_priv->display.initial_watermarks(state, crtc);
6143                 else if (new_crtc_state->update_wm_pre)
6144                         intel_update_watermarks(crtc);
6145         }
6146
6147         /*
6148          * Gen2 reports pipe underruns whenever all planes are disabled.
6149          * So disable underrun reporting before all the planes get disabled.
6150          *
6151          * We do this after .initial_watermarks() so that we have a
6152          * chance of catching underruns with the intermediate watermarks
6153          * vs. the old plane configuration.
6154          */
6155         if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6156                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6157 }
6158
6159 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6160                                       struct intel_crtc *crtc)
6161 {
6162         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6163         const struct intel_crtc_state *new_crtc_state =
6164                 intel_atomic_get_new_crtc_state(state, crtc);
6165         unsigned int update_mask = new_crtc_state->update_planes;
6166         const struct intel_plane_state *old_plane_state;
6167         struct intel_plane *plane;
6168         unsigned fb_bits = 0;
6169         int i;
6170
6171         intel_crtc_dpms_overlay_disable(crtc);
6172
6173         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6174                 if (crtc->pipe != plane->pipe ||
6175                     !(update_mask & BIT(plane->id)))
6176                         continue;
6177
6178                 intel_disable_plane(plane, new_crtc_state);
6179
6180                 if (old_plane_state->uapi.visible)
6181                         fb_bits |= plane->frontbuffer_bit;
6182         }
6183
6184         intel_frontbuffer_flip(dev_priv, fb_bits);
6185 }
6186
6187 /*
6188  * intel_connector_primary_encoder - get the primary encoder for a connector
6189  * @connector: connector for which to return the encoder
6190  *
6191  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6192  * all connectors to their encoder, except for DP-MST connectors which have
6193  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6194  * pointed to by as many DP-MST connectors as there are pipes.
6195  */
6196 static struct intel_encoder *
6197 intel_connector_primary_encoder(struct intel_connector *connector)
6198 {
6199         struct intel_encoder *encoder;
6200
6201         if (connector->mst_port)
6202                 return &dp_to_dig_port(connector->mst_port)->base;
6203
6204         encoder = intel_attached_encoder(&connector->base);
6205         WARN_ON(!encoder);
6206
6207         return encoder;
6208 }
6209
6210 static bool
6211 intel_connector_needs_modeset(struct intel_atomic_state *state,
6212                               const struct drm_connector_state *old_conn_state,
6213                               const struct drm_connector_state *new_conn_state)
6214 {
6215         struct intel_crtc *old_crtc = old_conn_state->crtc ?
6216                                       to_intel_crtc(old_conn_state->crtc) : NULL;
6217         struct intel_crtc *new_crtc = new_conn_state->crtc ?
6218                                       to_intel_crtc(new_conn_state->crtc) : NULL;
6219
6220         return new_crtc != old_crtc ||
6221                (new_crtc &&
6222                 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6223 }
6224
6225 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6226 {
6227         struct drm_connector_state *old_conn_state;
6228         struct drm_connector_state *new_conn_state;
6229         struct drm_connector *conn;
6230         int i;
6231
6232         for_each_oldnew_connector_in_state(&state->base, conn,
6233                                            old_conn_state, new_conn_state, i) {
6234                 struct intel_encoder *encoder;
6235                 struct intel_crtc *crtc;
6236
6237                 if (!intel_connector_needs_modeset(state,
6238                                                    old_conn_state,
6239                                                    new_conn_state))
6240                         continue;
6241
6242                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6243                 if (!encoder->update_prepare)
6244                         continue;
6245
6246                 crtc = new_conn_state->crtc ?
6247                         to_intel_crtc(new_conn_state->crtc) : NULL;
6248                 encoder->update_prepare(state, encoder, crtc);
6249         }
6250 }
6251
6252 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6253 {
6254         struct drm_connector_state *old_conn_state;
6255         struct drm_connector_state *new_conn_state;
6256         struct drm_connector *conn;
6257         int i;
6258
6259         for_each_oldnew_connector_in_state(&state->base, conn,
6260                                            old_conn_state, new_conn_state, i) {
6261                 struct intel_encoder *encoder;
6262                 struct intel_crtc *crtc;
6263
6264                 if (!intel_connector_needs_modeset(state,
6265                                                    old_conn_state,
6266                                                    new_conn_state))
6267                         continue;
6268
6269                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6270                 if (!encoder->update_complete)
6271                         continue;
6272
6273                 crtc = new_conn_state->crtc ?
6274                         to_intel_crtc(new_conn_state->crtc) : NULL;
6275                 encoder->update_complete(state, encoder, crtc);
6276         }
6277 }
6278
6279 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6280                                           struct intel_crtc *crtc)
6281 {
6282         const struct intel_crtc_state *crtc_state =
6283                 intel_atomic_get_new_crtc_state(state, crtc);
6284         const struct drm_connector_state *conn_state;
6285         struct drm_connector *conn;
6286         int i;
6287
6288         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6289                 struct intel_encoder *encoder =
6290                         to_intel_encoder(conn_state->best_encoder);
6291
6292                 if (conn_state->crtc != &crtc->base)
6293                         continue;
6294
6295                 if (encoder->pre_pll_enable)
6296                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6297         }
6298 }
6299
6300 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6301                                       struct intel_crtc *crtc)
6302 {
6303         const struct intel_crtc_state *crtc_state =
6304                 intel_atomic_get_new_crtc_state(state, crtc);
6305         const struct drm_connector_state *conn_state;
6306         struct drm_connector *conn;
6307         int i;
6308
6309         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6310                 struct intel_encoder *encoder =
6311                         to_intel_encoder(conn_state->best_encoder);
6312
6313                 if (conn_state->crtc != &crtc->base)
6314                         continue;
6315
6316                 if (encoder->pre_enable)
6317                         encoder->pre_enable(encoder, crtc_state, conn_state);
6318         }
6319 }
6320
6321 static void intel_encoders_enable(struct intel_atomic_state *state,
6322                                   struct intel_crtc *crtc)
6323 {
6324         const struct intel_crtc_state *crtc_state =
6325                 intel_atomic_get_new_crtc_state(state, crtc);
6326         const struct drm_connector_state *conn_state;
6327         struct drm_connector *conn;
6328         int i;
6329
6330         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6331                 struct intel_encoder *encoder =
6332                         to_intel_encoder(conn_state->best_encoder);
6333
6334                 if (conn_state->crtc != &crtc->base)
6335                         continue;
6336
6337                 if (encoder->enable)
6338                         encoder->enable(encoder, crtc_state, conn_state);
6339                 intel_opregion_notify_encoder(encoder, true);
6340         }
6341 }
6342
6343 static void intel_encoders_disable(struct intel_atomic_state *state,
6344                                    struct intel_crtc *crtc)
6345 {
6346         const struct intel_crtc_state *old_crtc_state =
6347                 intel_atomic_get_old_crtc_state(state, crtc);
6348         const struct drm_connector_state *old_conn_state;
6349         struct drm_connector *conn;
6350         int i;
6351
6352         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6353                 struct intel_encoder *encoder =
6354                         to_intel_encoder(old_conn_state->best_encoder);
6355
6356                 if (old_conn_state->crtc != &crtc->base)
6357                         continue;
6358
6359                 intel_opregion_notify_encoder(encoder, false);
6360                 if (encoder->disable)
6361                         encoder->disable(encoder, old_crtc_state, old_conn_state);
6362         }
6363 }
6364
6365 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6366                                         struct intel_crtc *crtc)
6367 {
6368         const struct intel_crtc_state *old_crtc_state =
6369                 intel_atomic_get_old_crtc_state(state, crtc);
6370         const struct drm_connector_state *old_conn_state;
6371         struct drm_connector *conn;
6372         int i;
6373
6374         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6375                 struct intel_encoder *encoder =
6376                         to_intel_encoder(old_conn_state->best_encoder);
6377
6378                 if (old_conn_state->crtc != &crtc->base)
6379                         continue;
6380
6381                 if (encoder->post_disable)
6382                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6383         }
6384 }
6385
6386 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6387                                             struct intel_crtc *crtc)
6388 {
6389         const struct intel_crtc_state *old_crtc_state =
6390                 intel_atomic_get_old_crtc_state(state, crtc);
6391         const struct drm_connector_state *old_conn_state;
6392         struct drm_connector *conn;
6393         int i;
6394
6395         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6396                 struct intel_encoder *encoder =
6397                         to_intel_encoder(old_conn_state->best_encoder);
6398
6399                 if (old_conn_state->crtc != &crtc->base)
6400                         continue;
6401
6402                 if (encoder->post_pll_disable)
6403                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6404         }
6405 }
6406
6407 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6408                                        struct intel_crtc *crtc)
6409 {
6410         const struct intel_crtc_state *crtc_state =
6411                 intel_atomic_get_new_crtc_state(state, crtc);
6412         const struct drm_connector_state *conn_state;
6413         struct drm_connector *conn;
6414         int i;
6415
6416         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6417                 struct intel_encoder *encoder =
6418                         to_intel_encoder(conn_state->best_encoder);
6419
6420                 if (conn_state->crtc != &crtc->base)
6421                         continue;
6422
6423                 if (encoder->update_pipe)
6424                         encoder->update_pipe(encoder, crtc_state, conn_state);
6425         }
6426 }
6427
6428 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6429 {
6430         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6431         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6432
6433         plane->disable_plane(plane, crtc_state);
6434 }
6435
6436 static void ironlake_crtc_enable(struct intel_atomic_state *state,
6437                                  struct intel_crtc *crtc)
6438 {
6439         const struct intel_crtc_state *new_crtc_state =
6440                 intel_atomic_get_new_crtc_state(state, crtc);
6441         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6442         enum pipe pipe = crtc->pipe;
6443
6444         if (WARN_ON(crtc->active))
6445                 return;
6446
6447         /*
6448          * Sometimes spurious CPU pipe underruns happen during FDI
6449          * training, at least with VGA+HDMI cloning. Suppress them.
6450          *
6451          * On ILK we get an occasional spurious CPU pipe underruns
6452          * between eDP port A enable and vdd enable. Also PCH port
6453          * enable seems to result in the occasional CPU pipe underrun.
6454          *
6455          * Spurious PCH underruns also occur during PCH enabling.
6456          */
6457         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6458         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6459
6460         if (new_crtc_state->has_pch_encoder)
6461                 intel_prepare_shared_dpll(new_crtc_state);
6462
6463         if (intel_crtc_has_dp_encoder(new_crtc_state))
6464                 intel_dp_set_m_n(new_crtc_state, M1_N1);
6465
6466         intel_set_pipe_timings(new_crtc_state);
6467         intel_set_pipe_src_size(new_crtc_state);
6468
6469         if (new_crtc_state->has_pch_encoder)
6470                 intel_cpu_transcoder_set_m_n(new_crtc_state,
6471                                              &new_crtc_state->fdi_m_n, NULL);
6472
6473         ironlake_set_pipeconf(new_crtc_state);
6474
6475         crtc->active = true;
6476
6477         intel_encoders_pre_enable(state, crtc);
6478
6479         if (new_crtc_state->has_pch_encoder) {
6480                 /* Note: FDI PLL enabling _must_ be done before we enable the
6481                  * cpu pipes, hence this is separate from all the other fdi/pch
6482                  * enabling. */
6483                 ironlake_fdi_pll_enable(new_crtc_state);
6484         } else {
6485                 assert_fdi_tx_disabled(dev_priv, pipe);
6486                 assert_fdi_rx_disabled(dev_priv, pipe);
6487         }
6488
6489         ironlake_pfit_enable(new_crtc_state);
6490
6491         /*
6492          * On ILK+ LUT must be loaded before the pipe is running but with
6493          * clocks enabled
6494          */
6495         intel_color_load_luts(new_crtc_state);
6496         intel_color_commit(new_crtc_state);
6497         /* update DSPCNTR to configure gamma for pipe bottom color */
6498         intel_disable_primary_plane(new_crtc_state);
6499
6500         if (dev_priv->display.initial_watermarks)
6501                 dev_priv->display.initial_watermarks(state, crtc);
6502         intel_enable_pipe(new_crtc_state);
6503
6504         if (new_crtc_state->has_pch_encoder)
6505                 ironlake_pch_enable(state, new_crtc_state);
6506
6507         intel_crtc_vblank_on(new_crtc_state);
6508
6509         intel_encoders_enable(state, crtc);
6510
6511         if (HAS_PCH_CPT(dev_priv))
6512                 cpt_verify_modeset(dev_priv, pipe);
6513
6514         /*
6515          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6516          * And a second vblank wait is needed at least on ILK with
6517          * some interlaced HDMI modes. Let's do the double wait always
6518          * in case there are more corner cases we don't know about.
6519          */
6520         if (new_crtc_state->has_pch_encoder) {
6521                 intel_wait_for_vblank(dev_priv, pipe);
6522                 intel_wait_for_vblank(dev_priv, pipe);
6523         }
6524         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6525         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6526 }
6527
6528 /* IPS only exists on ULT machines and is tied to pipe A. */
6529 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6530 {
6531         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6532 }
6533
6534 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6535                                             enum pipe pipe, bool apply)
6536 {
6537         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6538         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6539
6540         if (apply)
6541                 val |= mask;
6542         else
6543                 val &= ~mask;
6544
6545         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6546 }
6547
6548 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6549 {
6550         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6551         enum pipe pipe = crtc->pipe;
6552         u32 val;
6553
6554         val = MBUS_DBOX_A_CREDIT(2);
6555
6556         if (INTEL_GEN(dev_priv) >= 12) {
6557                 val |= MBUS_DBOX_BW_CREDIT(2);
6558                 val |= MBUS_DBOX_B_CREDIT(12);
6559         } else {
6560                 val |= MBUS_DBOX_BW_CREDIT(1);
6561                 val |= MBUS_DBOX_B_CREDIT(8);
6562         }
6563
6564         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6565 }
6566
6567 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6568 {
6569         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6570         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6571         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6572         u32 val;
6573
6574         val = I915_READ(reg);
6575         val &= ~HSW_FRAME_START_DELAY_MASK;
6576         val |= HSW_FRAME_START_DELAY(0);
6577         I915_WRITE(reg, val);
6578 }
6579
6580 static void haswell_crtc_enable(struct intel_atomic_state *state,
6581                                 struct intel_crtc *crtc)
6582 {
6583         const struct intel_crtc_state *new_crtc_state =
6584                 intel_atomic_get_new_crtc_state(state, crtc);
6585         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6586         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
6587         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
6588         bool psl_clkgate_wa;
6589
6590         if (WARN_ON(crtc->active))
6591                 return;
6592
6593         intel_encoders_pre_pll_enable(state, crtc);
6594
6595         if (new_crtc_state->shared_dpll)
6596                 intel_enable_shared_dpll(new_crtc_state);
6597
6598         intel_encoders_pre_enable(state, crtc);
6599
6600         if (intel_crtc_has_dp_encoder(new_crtc_state))
6601                 intel_dp_set_m_n(new_crtc_state, M1_N1);
6602
6603         if (!transcoder_is_dsi(cpu_transcoder))
6604                 intel_set_pipe_timings(new_crtc_state);
6605
6606         if (INTEL_GEN(dev_priv) >= 11)
6607                 icl_enable_trans_port_sync(new_crtc_state);
6608
6609         intel_set_pipe_src_size(new_crtc_state);
6610
6611         if (cpu_transcoder != TRANSCODER_EDP &&
6612             !transcoder_is_dsi(cpu_transcoder))
6613                 I915_WRITE(PIPE_MULT(cpu_transcoder),
6614                            new_crtc_state->pixel_multiplier - 1);
6615
6616         if (new_crtc_state->has_pch_encoder)
6617                 intel_cpu_transcoder_set_m_n(new_crtc_state,
6618                                              &new_crtc_state->fdi_m_n, NULL);
6619
6620         if (!transcoder_is_dsi(cpu_transcoder)) {
6621                 hsw_set_frame_start_delay(new_crtc_state);
6622                 haswell_set_pipeconf(new_crtc_state);
6623         }
6624
6625         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6626                 bdw_set_pipemisc(new_crtc_state);
6627
6628         crtc->active = true;
6629
6630         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6631         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6632                 new_crtc_state->pch_pfit.enabled;
6633         if (psl_clkgate_wa)
6634                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6635
6636         if (INTEL_GEN(dev_priv) >= 9)
6637                 skylake_pfit_enable(new_crtc_state);
6638         else
6639                 ironlake_pfit_enable(new_crtc_state);
6640
6641         /*
6642          * On ILK+ LUT must be loaded before the pipe is running but with
6643          * clocks enabled
6644          */
6645         intel_color_load_luts(new_crtc_state);
6646         intel_color_commit(new_crtc_state);
6647         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6648         if (INTEL_GEN(dev_priv) < 9)
6649                 intel_disable_primary_plane(new_crtc_state);
6650
6651         if (INTEL_GEN(dev_priv) >= 11)
6652                 icl_set_pipe_chicken(crtc);
6653
6654         if (!transcoder_is_dsi(cpu_transcoder))
6655                 intel_ddi_enable_transcoder_func(new_crtc_state);
6656
6657         if (dev_priv->display.initial_watermarks)
6658                 dev_priv->display.initial_watermarks(state, crtc);
6659
6660         if (INTEL_GEN(dev_priv) >= 11)
6661                 icl_pipe_mbus_enable(crtc);
6662
6663         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6664         if (!transcoder_is_dsi(cpu_transcoder))
6665                 intel_enable_pipe(new_crtc_state);
6666
6667         if (new_crtc_state->has_pch_encoder)
6668                 lpt_pch_enable(state, new_crtc_state);
6669
6670         intel_crtc_vblank_on(new_crtc_state);
6671
6672         intel_encoders_enable(state, crtc);
6673
6674         if (psl_clkgate_wa) {
6675                 intel_wait_for_vblank(dev_priv, pipe);
6676                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6677         }
6678
6679         /* If we change the relative order between pipe/planes enabling, we need
6680          * to change the workaround. */
6681         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
6682         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6683                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6684                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6685         }
6686 }
6687
6688 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6689 {
6690         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6691         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6692         enum pipe pipe = crtc->pipe;
6693
6694         /* To avoid upsetting the power well on haswell only disable the pfit if
6695          * it's in use. The hw state code will make sure we get this right. */
6696         if (old_crtc_state->pch_pfit.enabled) {
6697                 I915_WRITE(PF_CTL(pipe), 0);
6698                 I915_WRITE(PF_WIN_POS(pipe), 0);
6699                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6700         }
6701 }
6702
6703 static void ironlake_crtc_disable(struct intel_atomic_state *state,
6704                                   struct intel_crtc *crtc)
6705 {
6706         const struct intel_crtc_state *old_crtc_state =
6707                 intel_atomic_get_old_crtc_state(state, crtc);
6708         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6709         enum pipe pipe = crtc->pipe;
6710
6711         /*
6712          * Sometimes spurious CPU pipe underruns happen when the
6713          * pipe is already disabled, but FDI RX/TX is still enabled.
6714          * Happens at least with VGA+HDMI cloning. Suppress them.
6715          */
6716         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6717         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6718
6719         intel_encoders_disable(state, crtc);
6720
6721         intel_crtc_vblank_off(crtc);
6722
6723         intel_disable_pipe(old_crtc_state);
6724
6725         ironlake_pfit_disable(old_crtc_state);
6726
6727         if (old_crtc_state->has_pch_encoder)
6728                 ironlake_fdi_disable(crtc);
6729
6730         intel_encoders_post_disable(state, crtc);
6731
6732         if (old_crtc_state->has_pch_encoder) {
6733                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6734
6735                 if (HAS_PCH_CPT(dev_priv)) {
6736                         i915_reg_t reg;
6737                         u32 temp;
6738
6739                         /* disable TRANS_DP_CTL */
6740                         reg = TRANS_DP_CTL(pipe);
6741                         temp = I915_READ(reg);
6742                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6743                                   TRANS_DP_PORT_SEL_MASK);
6744                         temp |= TRANS_DP_PORT_SEL_NONE;
6745                         I915_WRITE(reg, temp);
6746
6747                         /* disable DPLL_SEL */
6748                         temp = I915_READ(PCH_DPLL_SEL);
6749                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6750                         I915_WRITE(PCH_DPLL_SEL, temp);
6751                 }
6752
6753                 ironlake_fdi_pll_disable(crtc);
6754         }
6755
6756         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6757         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6758 }
6759
6760 static void haswell_crtc_disable(struct intel_atomic_state *state,
6761                                  struct intel_crtc *crtc)
6762 {
6763         const struct intel_crtc_state *old_crtc_state =
6764                 intel_atomic_get_old_crtc_state(state, crtc);
6765         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6766         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6767
6768         intel_encoders_disable(state, crtc);
6769
6770         intel_crtc_vblank_off(crtc);
6771
6772         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6773         if (!transcoder_is_dsi(cpu_transcoder))
6774                 intel_disable_pipe(old_crtc_state);
6775
6776         if (INTEL_GEN(dev_priv) >= 11)
6777                 icl_disable_transcoder_port_sync(old_crtc_state);
6778
6779         if (!transcoder_is_dsi(cpu_transcoder))
6780                 intel_ddi_disable_transcoder_func(old_crtc_state);
6781
6782         intel_dsc_disable(old_crtc_state);
6783
6784         if (INTEL_GEN(dev_priv) >= 9)
6785                 skylake_scaler_disable(crtc);
6786         else
6787                 ironlake_pfit_disable(old_crtc_state);
6788
6789         intel_encoders_post_disable(state, crtc);
6790
6791         intel_encoders_post_pll_disable(state, crtc);
6792 }
6793
6794 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6795 {
6796         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6797         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6798
6799         if (!crtc_state->gmch_pfit.control)
6800                 return;
6801
6802         /*
6803          * The panel fitter should only be adjusted whilst the pipe is disabled,
6804          * according to register description and PRM.
6805          */
6806         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6807         assert_pipe_disabled(dev_priv, crtc->pipe);
6808
6809         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6810         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6811
6812         /* Border color in case we don't scale up to the full screen. Black by
6813          * default, change to something else for debugging. */
6814         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6815 }
6816
6817 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6818 {
6819         if (phy == PHY_NONE)
6820                 return false;
6821
6822         if (IS_ELKHARTLAKE(dev_priv))
6823                 return phy <= PHY_C;
6824
6825         if (INTEL_GEN(dev_priv) >= 11)
6826                 return phy <= PHY_B;
6827
6828         return false;
6829 }
6830
6831 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6832 {
6833         if (INTEL_GEN(dev_priv) >= 12)
6834                 return phy >= PHY_D && phy <= PHY_I;
6835
6836         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6837                 return phy >= PHY_C && phy <= PHY_F;
6838
6839         return false;
6840 }
6841
6842 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6843 {
6844         if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6845                 return PHY_A;
6846
6847         return (enum phy)port;
6848 }
6849
6850 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6851 {
6852         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6853                 return PORT_TC_NONE;
6854
6855         if (INTEL_GEN(dev_priv) >= 12)
6856                 return port - PORT_D;
6857
6858         return port - PORT_C;
6859 }
6860
6861 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6862 {
6863         switch (port) {
6864         case PORT_A:
6865                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6866         case PORT_B:
6867                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6868         case PORT_C:
6869                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6870         case PORT_D:
6871                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6872         case PORT_E:
6873                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6874         case PORT_F:
6875                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6876         case PORT_G:
6877                 return POWER_DOMAIN_PORT_DDI_G_LANES;
6878         default:
6879                 MISSING_CASE(port);
6880                 return POWER_DOMAIN_PORT_OTHER;
6881         }
6882 }
6883
6884 enum intel_display_power_domain
6885 intel_aux_power_domain(struct intel_digital_port *dig_port)
6886 {
6887         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
6888         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
6889
6890         if (intel_phy_is_tc(dev_priv, phy) &&
6891             dig_port->tc_mode == TC_PORT_TBT_ALT) {
6892                 switch (dig_port->aux_ch) {
6893                 case AUX_CH_C:
6894                         return POWER_DOMAIN_AUX_C_TBT;
6895                 case AUX_CH_D:
6896                         return POWER_DOMAIN_AUX_D_TBT;
6897                 case AUX_CH_E:
6898                         return POWER_DOMAIN_AUX_E_TBT;
6899                 case AUX_CH_F:
6900                         return POWER_DOMAIN_AUX_F_TBT;
6901                 case AUX_CH_G:
6902                         return POWER_DOMAIN_AUX_G_TBT;
6903                 default:
6904                         MISSING_CASE(dig_port->aux_ch);
6905                         return POWER_DOMAIN_AUX_C_TBT;
6906                 }
6907         }
6908
6909         switch (dig_port->aux_ch) {
6910         case AUX_CH_A:
6911                 return POWER_DOMAIN_AUX_A;
6912         case AUX_CH_B:
6913                 return POWER_DOMAIN_AUX_B;
6914         case AUX_CH_C:
6915                 return POWER_DOMAIN_AUX_C;
6916         case AUX_CH_D:
6917                 return POWER_DOMAIN_AUX_D;
6918         case AUX_CH_E:
6919                 return POWER_DOMAIN_AUX_E;
6920         case AUX_CH_F:
6921                 return POWER_DOMAIN_AUX_F;
6922         case AUX_CH_G:
6923                 return POWER_DOMAIN_AUX_G;
6924         default:
6925                 MISSING_CASE(dig_port->aux_ch);
6926                 return POWER_DOMAIN_AUX_A;
6927         }
6928 }
6929
6930 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6931 {
6932         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6933         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6934         struct drm_encoder *encoder;
6935         enum pipe pipe = crtc->pipe;
6936         u64 mask;
6937         enum transcoder transcoder = crtc_state->cpu_transcoder;
6938
6939         if (!crtc_state->hw.active)
6940                 return 0;
6941
6942         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6943         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6944         if (crtc_state->pch_pfit.enabled ||
6945             crtc_state->pch_pfit.force_thru)
6946                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6947
6948         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
6949                                   crtc_state->uapi.encoder_mask) {
6950                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6951
6952                 mask |= BIT_ULL(intel_encoder->power_domain);
6953         }
6954
6955         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6956                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6957
6958         if (crtc_state->shared_dpll)
6959                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6960
6961         return mask;
6962 }
6963
6964 static u64
6965 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6966 {
6967         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6968         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6969         enum intel_display_power_domain domain;
6970         u64 domains, new_domains, old_domains;
6971
6972         old_domains = crtc->enabled_power_domains;
6973         crtc->enabled_power_domains = new_domains =
6974                 get_crtc_power_domains(crtc_state);
6975
6976         domains = new_domains & ~old_domains;
6977
6978         for_each_power_domain(domain, domains)
6979                 intel_display_power_get(dev_priv, domain);
6980
6981         return old_domains & ~new_domains;
6982 }
6983
6984 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6985                                       u64 domains)
6986 {
6987         enum intel_display_power_domain domain;
6988
6989         for_each_power_domain(domain, domains)
6990                 intel_display_power_put_unchecked(dev_priv, domain);
6991 }
6992
6993 static void valleyview_crtc_enable(struct intel_atomic_state *state,
6994                                    struct intel_crtc *crtc)
6995 {
6996         const struct intel_crtc_state *new_crtc_state =
6997                 intel_atomic_get_new_crtc_state(state, crtc);
6998         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6999         enum pipe pipe = crtc->pipe;
7000
7001         if (WARN_ON(crtc->active))
7002                 return;
7003
7004         if (intel_crtc_has_dp_encoder(new_crtc_state))
7005                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7006
7007         intel_set_pipe_timings(new_crtc_state);
7008         intel_set_pipe_src_size(new_crtc_state);
7009
7010         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7011                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7012                 I915_WRITE(CHV_CANVAS(pipe), 0);
7013         }
7014
7015         i9xx_set_pipeconf(new_crtc_state);
7016
7017         crtc->active = true;
7018
7019         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7020
7021         intel_encoders_pre_pll_enable(state, crtc);
7022
7023         if (IS_CHERRYVIEW(dev_priv)) {
7024                 chv_prepare_pll(crtc, new_crtc_state);
7025                 chv_enable_pll(crtc, new_crtc_state);
7026         } else {
7027                 vlv_prepare_pll(crtc, new_crtc_state);
7028                 vlv_enable_pll(crtc, new_crtc_state);
7029         }
7030
7031         intel_encoders_pre_enable(state, crtc);
7032
7033         i9xx_pfit_enable(new_crtc_state);
7034
7035         intel_color_load_luts(new_crtc_state);
7036         intel_color_commit(new_crtc_state);
7037         /* update DSPCNTR to configure gamma for pipe bottom color */
7038         intel_disable_primary_plane(new_crtc_state);
7039
7040         dev_priv->display.initial_watermarks(state, crtc);
7041         intel_enable_pipe(new_crtc_state);
7042
7043         intel_crtc_vblank_on(new_crtc_state);
7044
7045         intel_encoders_enable(state, crtc);
7046 }
7047
7048 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7049 {
7050         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7051         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7052
7053         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
7054         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
7055 }
7056
7057 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7058                              struct intel_crtc *crtc)
7059 {
7060         const struct intel_crtc_state *new_crtc_state =
7061                 intel_atomic_get_new_crtc_state(state, crtc);
7062         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7063         enum pipe pipe = crtc->pipe;
7064
7065         if (WARN_ON(crtc->active))
7066                 return;
7067
7068         i9xx_set_pll_dividers(new_crtc_state);
7069
7070         if (intel_crtc_has_dp_encoder(new_crtc_state))
7071                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7072
7073         intel_set_pipe_timings(new_crtc_state);
7074         intel_set_pipe_src_size(new_crtc_state);
7075
7076         i9xx_set_pipeconf(new_crtc_state);
7077
7078         crtc->active = true;
7079
7080         if (!IS_GEN(dev_priv, 2))
7081                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7082
7083         intel_encoders_pre_enable(state, crtc);
7084
7085         i9xx_enable_pll(crtc, new_crtc_state);
7086
7087         i9xx_pfit_enable(new_crtc_state);
7088
7089         intel_color_load_luts(new_crtc_state);
7090         intel_color_commit(new_crtc_state);
7091         /* update DSPCNTR to configure gamma for pipe bottom color */
7092         intel_disable_primary_plane(new_crtc_state);
7093
7094         if (dev_priv->display.initial_watermarks)
7095                 dev_priv->display.initial_watermarks(state, crtc);
7096         else
7097                 intel_update_watermarks(crtc);
7098         intel_enable_pipe(new_crtc_state);
7099
7100         intel_crtc_vblank_on(new_crtc_state);
7101
7102         intel_encoders_enable(state, crtc);
7103 }
7104
7105 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7106 {
7107         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7108         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7109
7110         if (!old_crtc_state->gmch_pfit.control)
7111                 return;
7112
7113         assert_pipe_disabled(dev_priv, crtc->pipe);
7114
7115         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
7116                       I915_READ(PFIT_CONTROL));
7117         I915_WRITE(PFIT_CONTROL, 0);
7118 }
7119
7120 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7121                               struct intel_crtc *crtc)
7122 {
7123         struct intel_crtc_state *old_crtc_state =
7124                 intel_atomic_get_old_crtc_state(state, crtc);
7125         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7126         enum pipe pipe = crtc->pipe;
7127
7128         /*
7129          * On gen2 planes are double buffered but the pipe isn't, so we must
7130          * wait for planes to fully turn off before disabling the pipe.
7131          */
7132         if (IS_GEN(dev_priv, 2))
7133                 intel_wait_for_vblank(dev_priv, pipe);
7134
7135         intel_encoders_disable(state, crtc);
7136
7137         intel_crtc_vblank_off(crtc);
7138
7139         intel_disable_pipe(old_crtc_state);
7140
7141         i9xx_pfit_disable(old_crtc_state);
7142
7143         intel_encoders_post_disable(state, crtc);
7144
7145         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7146                 if (IS_CHERRYVIEW(dev_priv))
7147                         chv_disable_pll(dev_priv, pipe);
7148                 else if (IS_VALLEYVIEW(dev_priv))
7149                         vlv_disable_pll(dev_priv, pipe);
7150                 else
7151                         i9xx_disable_pll(old_crtc_state);
7152         }
7153
7154         intel_encoders_post_pll_disable(state, crtc);
7155
7156         if (!IS_GEN(dev_priv, 2))
7157                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7158
7159         if (!dev_priv->display.initial_watermarks)
7160                 intel_update_watermarks(crtc);
7161
7162         /* clock the pipe down to 640x480@60 to potentially save power */
7163         if (IS_I830(dev_priv))
7164                 i830_enable_pipe(dev_priv, pipe);
7165 }
7166
7167 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7168                                         struct drm_modeset_acquire_ctx *ctx)
7169 {
7170         struct intel_encoder *encoder;
7171         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7172         struct intel_bw_state *bw_state =
7173                 to_intel_bw_state(dev_priv->bw_obj.state);
7174         struct intel_crtc_state *crtc_state =
7175                 to_intel_crtc_state(crtc->base.state);
7176         enum intel_display_power_domain domain;
7177         struct intel_plane *plane;
7178         struct drm_atomic_state *state;
7179         struct intel_crtc_state *temp_crtc_state;
7180         enum pipe pipe = crtc->pipe;
7181         u64 domains;
7182         int ret;
7183
7184         if (!crtc_state->hw.active)
7185                 return;
7186
7187         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7188                 const struct intel_plane_state *plane_state =
7189                         to_intel_plane_state(plane->base.state);
7190
7191                 if (plane_state->uapi.visible)
7192                         intel_plane_disable_noatomic(crtc, plane);
7193         }
7194
7195         state = drm_atomic_state_alloc(&dev_priv->drm);
7196         if (!state) {
7197                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7198                               crtc->base.base.id, crtc->base.name);
7199                 return;
7200         }
7201
7202         state->acquire_ctx = ctx;
7203
7204         /* Everything's already locked, -EDEADLK can't happen. */
7205         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7206         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7207
7208         WARN_ON(IS_ERR(temp_crtc_state) || ret);
7209
7210         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7211
7212         drm_atomic_state_put(state);
7213
7214         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7215                       crtc->base.base.id, crtc->base.name);
7216
7217         crtc->active = false;
7218         crtc->base.enabled = false;
7219
7220         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7221         crtc_state->uapi.active = false;
7222         crtc_state->uapi.connector_mask = 0;
7223         crtc_state->uapi.encoder_mask = 0;
7224         intel_crtc_free_hw_state(crtc_state);
7225         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7226
7227         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7228                 encoder->base.crtc = NULL;
7229
7230         intel_fbc_disable(crtc);
7231         intel_update_watermarks(crtc);
7232         intel_disable_shared_dpll(crtc_state);
7233
7234         domains = crtc->enabled_power_domains;
7235         for_each_power_domain(domain, domains)
7236                 intel_display_power_put_unchecked(dev_priv, domain);
7237         crtc->enabled_power_domains = 0;
7238
7239         dev_priv->active_pipes &= ~BIT(pipe);
7240         dev_priv->min_cdclk[pipe] = 0;
7241         dev_priv->min_voltage_level[pipe] = 0;
7242
7243         bw_state->data_rate[pipe] = 0;
7244         bw_state->num_active_planes[pipe] = 0;
7245 }
7246
7247 /*
7248  * turn all crtc's off, but do not adjust state
7249  * This has to be paired with a call to intel_modeset_setup_hw_state.
7250  */
7251 int intel_display_suspend(struct drm_device *dev)
7252 {
7253         struct drm_i915_private *dev_priv = to_i915(dev);
7254         struct drm_atomic_state *state;
7255         int ret;
7256
7257         state = drm_atomic_helper_suspend(dev);
7258         ret = PTR_ERR_OR_ZERO(state);
7259         if (ret)
7260                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7261         else
7262                 dev_priv->modeset_restore_state = state;
7263         return ret;
7264 }
7265
7266 void intel_encoder_destroy(struct drm_encoder *encoder)
7267 {
7268         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7269
7270         drm_encoder_cleanup(encoder);
7271         kfree(intel_encoder);
7272 }
7273
7274 /* Cross check the actual hw state with our own modeset state tracking (and it's
7275  * internal consistency). */
7276 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7277                                          struct drm_connector_state *conn_state)
7278 {
7279         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7280
7281         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7282                       connector->base.base.id,
7283                       connector->base.name);
7284
7285         if (connector->get_hw_state(connector)) {
7286                 struct intel_encoder *encoder = connector->encoder;
7287
7288                 I915_STATE_WARN(!crtc_state,
7289                          "connector enabled without attached crtc\n");
7290
7291                 if (!crtc_state)
7292                         return;
7293
7294                 I915_STATE_WARN(!crtc_state->hw.active,
7295                                 "connector is active, but attached crtc isn't\n");
7296
7297                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7298                         return;
7299
7300                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7301                         "atomic encoder doesn't match attached encoder\n");
7302
7303                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7304                         "attached encoder crtc differs from connector crtc\n");
7305         } else {
7306                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7307                                 "attached crtc is active, but connector isn't\n");
7308                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7309                         "best encoder set without crtc!\n");
7310         }
7311 }
7312
7313 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7314 {
7315         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7316                 return crtc_state->fdi_lanes;
7317
7318         return 0;
7319 }
7320
7321 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7322                                      struct intel_crtc_state *pipe_config)
7323 {
7324         struct drm_i915_private *dev_priv = to_i915(dev);
7325         struct drm_atomic_state *state = pipe_config->uapi.state;
7326         struct intel_crtc *other_crtc;
7327         struct intel_crtc_state *other_crtc_state;
7328
7329         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7330                       pipe_name(pipe), pipe_config->fdi_lanes);
7331         if (pipe_config->fdi_lanes > 4) {
7332                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7333                               pipe_name(pipe), pipe_config->fdi_lanes);
7334                 return -EINVAL;
7335         }
7336
7337         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7338                 if (pipe_config->fdi_lanes > 2) {
7339                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7340                                       pipe_config->fdi_lanes);
7341                         return -EINVAL;
7342                 } else {
7343                         return 0;
7344                 }
7345         }
7346
7347         if (INTEL_NUM_PIPES(dev_priv) == 2)
7348                 return 0;
7349
7350         /* Ivybridge 3 pipe is really complicated */
7351         switch (pipe) {
7352         case PIPE_A:
7353                 return 0;
7354         case PIPE_B:
7355                 if (pipe_config->fdi_lanes <= 2)
7356                         return 0;
7357
7358                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7359                 other_crtc_state =
7360                         intel_atomic_get_crtc_state(state, other_crtc);
7361                 if (IS_ERR(other_crtc_state))
7362                         return PTR_ERR(other_crtc_state);
7363
7364                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7365                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7366                                       pipe_name(pipe), pipe_config->fdi_lanes);
7367                         return -EINVAL;
7368                 }
7369                 return 0;
7370         case PIPE_C:
7371                 if (pipe_config->fdi_lanes > 2) {
7372                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7373                                       pipe_name(pipe), pipe_config->fdi_lanes);
7374                         return -EINVAL;
7375                 }
7376
7377                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7378                 other_crtc_state =
7379                         intel_atomic_get_crtc_state(state, other_crtc);
7380                 if (IS_ERR(other_crtc_state))
7381                         return PTR_ERR(other_crtc_state);
7382
7383                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7384                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7385                         return -EINVAL;
7386                 }
7387                 return 0;
7388         default:
7389                 BUG();
7390         }
7391 }
7392
7393 #define RETRY 1
7394 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7395                                        struct intel_crtc_state *pipe_config)
7396 {
7397         struct drm_device *dev = intel_crtc->base.dev;
7398         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7399         int lane, link_bw, fdi_dotclock, ret;
7400         bool needs_recompute = false;
7401
7402 retry:
7403         /* FDI is a binary signal running at ~2.7GHz, encoding
7404          * each output octet as 10 bits. The actual frequency
7405          * is stored as a divider into a 100MHz clock, and the
7406          * mode pixel clock is stored in units of 1KHz.
7407          * Hence the bw of each lane in terms of the mode signal
7408          * is:
7409          */
7410         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7411
7412         fdi_dotclock = adjusted_mode->crtc_clock;
7413
7414         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7415                                            pipe_config->pipe_bpp);
7416
7417         pipe_config->fdi_lanes = lane;
7418
7419         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7420                                link_bw, &pipe_config->fdi_m_n, false, false);
7421
7422         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7423         if (ret == -EDEADLK)
7424                 return ret;
7425
7426         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7427                 pipe_config->pipe_bpp -= 2*3;
7428                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7429                               pipe_config->pipe_bpp);
7430                 needs_recompute = true;
7431                 pipe_config->bw_constrained = true;
7432
7433                 goto retry;
7434         }
7435
7436         if (needs_recompute)
7437                 return RETRY;
7438
7439         return ret;
7440 }
7441
7442 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7443 {
7444         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7445         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7446
7447         /* IPS only exists on ULT machines and is tied to pipe A. */
7448         if (!hsw_crtc_supports_ips(crtc))
7449                 return false;
7450
7451         if (!i915_modparams.enable_ips)
7452                 return false;
7453
7454         if (crtc_state->pipe_bpp > 24)
7455                 return false;
7456
7457         /*
7458          * We compare against max which means we must take
7459          * the increased cdclk requirement into account when
7460          * calculating the new cdclk.
7461          *
7462          * Should measure whether using a lower cdclk w/o IPS
7463          */
7464         if (IS_BROADWELL(dev_priv) &&
7465             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7466                 return false;
7467
7468         return true;
7469 }
7470
7471 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7472 {
7473         struct drm_i915_private *dev_priv =
7474                 to_i915(crtc_state->uapi.crtc->dev);
7475         struct intel_atomic_state *intel_state =
7476                 to_intel_atomic_state(crtc_state->uapi.state);
7477
7478         if (!hsw_crtc_state_ips_capable(crtc_state))
7479                 return false;
7480
7481         /*
7482          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7483          * enabled and disabled dynamically based on package C states,
7484          * user space can't make reliable use of the CRCs, so let's just
7485          * completely disable it.
7486          */
7487         if (crtc_state->crc_enabled)
7488                 return false;
7489
7490         /* IPS should be fine as long as at least one plane is enabled. */
7491         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7492                 return false;
7493
7494         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7495         if (IS_BROADWELL(dev_priv) &&
7496             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7497                 return false;
7498
7499         return true;
7500 }
7501
7502 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7503 {
7504         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7505
7506         /* GDG double wide on either pipe, otherwise pipe A only */
7507         return INTEL_GEN(dev_priv) < 4 &&
7508                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7509 }
7510
7511 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7512 {
7513         u32 pixel_rate;
7514
7515         pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7516
7517         /*
7518          * We only use IF-ID interlacing. If we ever use
7519          * PF-ID we'll need to adjust the pixel_rate here.
7520          */
7521
7522         if (pipe_config->pch_pfit.enabled) {
7523                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7524                 u32 pfit_size = pipe_config->pch_pfit.size;
7525
7526                 pipe_w = pipe_config->pipe_src_w;
7527                 pipe_h = pipe_config->pipe_src_h;
7528
7529                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7530                 pfit_h = pfit_size & 0xFFFF;
7531                 if (pipe_w < pfit_w)
7532                         pipe_w = pfit_w;
7533                 if (pipe_h < pfit_h)
7534                         pipe_h = pfit_h;
7535
7536                 if (WARN_ON(!pfit_w || !pfit_h))
7537                         return pixel_rate;
7538
7539                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7540                                      pfit_w * pfit_h);
7541         }
7542
7543         return pixel_rate;
7544 }
7545
7546 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7547 {
7548         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7549
7550         if (HAS_GMCH(dev_priv))
7551                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7552                 crtc_state->pixel_rate =
7553                         crtc_state->hw.adjusted_mode.crtc_clock;
7554         else
7555                 crtc_state->pixel_rate =
7556                         ilk_pipe_pixel_rate(crtc_state);
7557 }
7558
7559 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7560                                      struct intel_crtc_state *pipe_config)
7561 {
7562         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7563         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7564         int clock_limit = dev_priv->max_dotclk_freq;
7565
7566         if (INTEL_GEN(dev_priv) < 4) {
7567                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7568
7569                 /*
7570                  * Enable double wide mode when the dot clock
7571                  * is > 90% of the (display) core speed.
7572                  */
7573                 if (intel_crtc_supports_double_wide(crtc) &&
7574                     adjusted_mode->crtc_clock > clock_limit) {
7575                         clock_limit = dev_priv->max_dotclk_freq;
7576                         pipe_config->double_wide = true;
7577                 }
7578         }
7579
7580         if (adjusted_mode->crtc_clock > clock_limit) {
7581                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7582                               adjusted_mode->crtc_clock, clock_limit,
7583                               yesno(pipe_config->double_wide));
7584                 return -EINVAL;
7585         }
7586
7587         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7588              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7589              pipe_config->hw.ctm) {
7590                 /*
7591                  * There is only one pipe CSC unit per pipe, and we need that
7592                  * for output conversion from RGB->YCBCR. So if CTM is already
7593                  * applied we can't support YCBCR420 output.
7594                  */
7595                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7596                 return -EINVAL;
7597         }
7598
7599         /*
7600          * Pipe horizontal size must be even in:
7601          * - DVO ganged mode
7602          * - LVDS dual channel mode
7603          * - Double wide pipe
7604          */
7605         if (pipe_config->pipe_src_w & 1) {
7606                 if (pipe_config->double_wide) {
7607                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7608                         return -EINVAL;
7609                 }
7610
7611                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7612                     intel_is_dual_link_lvds(dev_priv)) {
7613                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7614                         return -EINVAL;
7615                 }
7616         }
7617
7618         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7619          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7620          */
7621         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7622                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7623                 return -EINVAL;
7624
7625         intel_crtc_compute_pixel_rate(pipe_config);
7626
7627         if (pipe_config->has_pch_encoder)
7628                 return ironlake_fdi_compute_config(crtc, pipe_config);
7629
7630         return 0;
7631 }
7632
7633 static void
7634 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7635 {
7636         while (*num > DATA_LINK_M_N_MASK ||
7637                *den > DATA_LINK_M_N_MASK) {
7638                 *num >>= 1;
7639                 *den >>= 1;
7640         }
7641 }
7642
7643 static void compute_m_n(unsigned int m, unsigned int n,
7644                         u32 *ret_m, u32 *ret_n,
7645                         bool constant_n)
7646 {
7647         /*
7648          * Several DP dongles in particular seem to be fussy about
7649          * too large link M/N values. Give N value as 0x8000 that
7650          * should be acceptable by specific devices. 0x8000 is the
7651          * specified fixed N value for asynchronous clock mode,
7652          * which the devices expect also in synchronous clock mode.
7653          */
7654         if (constant_n)
7655                 *ret_n = 0x8000;
7656         else
7657                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7658
7659         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7660         intel_reduce_m_n_ratio(ret_m, ret_n);
7661 }
7662
7663 void
7664 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7665                        int pixel_clock, int link_clock,
7666                        struct intel_link_m_n *m_n,
7667                        bool constant_n, bool fec_enable)
7668 {
7669         u32 data_clock = bits_per_pixel * pixel_clock;
7670
7671         if (fec_enable)
7672                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7673
7674         m_n->tu = 64;
7675         compute_m_n(data_clock,
7676                     link_clock * nlanes * 8,
7677                     &m_n->gmch_m, &m_n->gmch_n,
7678                     constant_n);
7679
7680         compute_m_n(pixel_clock, link_clock,
7681                     &m_n->link_m, &m_n->link_n,
7682                     constant_n);
7683 }
7684
7685 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7686 {
7687         /*
7688          * There may be no VBT; and if the BIOS enabled SSC we can
7689          * just keep using it to avoid unnecessary flicker.  Whereas if the
7690          * BIOS isn't using it, don't assume it will work even if the VBT
7691          * indicates as much.
7692          */
7693         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7694                 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
7695                         DREF_SSC1_ENABLE;
7696
7697                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7698                         DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
7699                                       enableddisabled(bios_lvds_use_ssc),
7700                                       enableddisabled(dev_priv->vbt.lvds_use_ssc));
7701                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7702                 }
7703         }
7704 }
7705
7706 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7707 {
7708         if (i915_modparams.panel_use_ssc >= 0)
7709                 return i915_modparams.panel_use_ssc != 0;
7710         return dev_priv->vbt.lvds_use_ssc
7711                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7712 }
7713
7714 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7715 {
7716         return (1 << dpll->n) << 16 | dpll->m2;
7717 }
7718
7719 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7720 {
7721         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7722 }
7723
7724 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7725                                      struct intel_crtc_state *crtc_state,
7726                                      struct dpll *reduced_clock)
7727 {
7728         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7729         u32 fp, fp2 = 0;
7730
7731         if (IS_PINEVIEW(dev_priv)) {
7732                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7733                 if (reduced_clock)
7734                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7735         } else {
7736                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7737                 if (reduced_clock)
7738                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7739         }
7740
7741         crtc_state->dpll_hw_state.fp0 = fp;
7742
7743         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7744             reduced_clock) {
7745                 crtc_state->dpll_hw_state.fp1 = fp2;
7746         } else {
7747                 crtc_state->dpll_hw_state.fp1 = fp;
7748         }
7749 }
7750
7751 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7752                 pipe)
7753 {
7754         u32 reg_val;
7755
7756         /*
7757          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7758          * and set it to a reasonable value instead.
7759          */
7760         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7761         reg_val &= 0xffffff00;
7762         reg_val |= 0x00000030;
7763         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7764
7765         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7766         reg_val &= 0x00ffffff;
7767         reg_val |= 0x8c000000;
7768         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7769
7770         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7771         reg_val &= 0xffffff00;
7772         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7773
7774         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7775         reg_val &= 0x00ffffff;
7776         reg_val |= 0xb0000000;
7777         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7778 }
7779
7780 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7781                                          const struct intel_link_m_n *m_n)
7782 {
7783         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7784         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7785         enum pipe pipe = crtc->pipe;
7786
7787         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7788         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7789         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7790         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7791 }
7792
7793 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7794                                  enum transcoder transcoder)
7795 {
7796         if (IS_HASWELL(dev_priv))
7797                 return transcoder == TRANSCODER_EDP;
7798
7799         /*
7800          * Strictly speaking some registers are available before
7801          * gen7, but we only support DRRS on gen7+
7802          */
7803         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7804 }
7805
7806 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7807                                          const struct intel_link_m_n *m_n,
7808                                          const struct intel_link_m_n *m2_n2)
7809 {
7810         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7811         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7812         enum pipe pipe = crtc->pipe;
7813         enum transcoder transcoder = crtc_state->cpu_transcoder;
7814
7815         if (INTEL_GEN(dev_priv) >= 5) {
7816                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7817                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7818                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7819                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7820                 /*
7821                  *  M2_N2 registers are set only if DRRS is supported
7822                  * (to make sure the registers are not unnecessarily accessed).
7823                  */
7824                 if (m2_n2 && crtc_state->has_drrs &&
7825                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7826                         I915_WRITE(PIPE_DATA_M2(transcoder),
7827                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7828                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7829                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7830                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7831                 }
7832         } else {
7833                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7834                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7835                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7836                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7837         }
7838 }
7839
7840 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7841 {
7842         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7843
7844         if (m_n == M1_N1) {
7845                 dp_m_n = &crtc_state->dp_m_n;
7846                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7847         } else if (m_n == M2_N2) {
7848
7849                 /*
7850                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7851                  * needs to be programmed into M1_N1.
7852                  */
7853                 dp_m_n = &crtc_state->dp_m2_n2;
7854         } else {
7855                 DRM_ERROR("Unsupported divider value\n");
7856                 return;
7857         }
7858
7859         if (crtc_state->has_pch_encoder)
7860                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7861         else
7862                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7863 }
7864
7865 static void vlv_compute_dpll(struct intel_crtc *crtc,
7866                              struct intel_crtc_state *pipe_config)
7867 {
7868         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7869                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7870         if (crtc->pipe != PIPE_A)
7871                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7872
7873         /* DPLL not used with DSI, but still need the rest set up */
7874         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7875                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7876                         DPLL_EXT_BUFFER_ENABLE_VLV;
7877
7878         pipe_config->dpll_hw_state.dpll_md =
7879                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7880 }
7881
7882 static void chv_compute_dpll(struct intel_crtc *crtc,
7883                              struct intel_crtc_state *pipe_config)
7884 {
7885         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7886                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7887         if (crtc->pipe != PIPE_A)
7888                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7889
7890         /* DPLL not used with DSI, but still need the rest set up */
7891         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7892                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7893
7894         pipe_config->dpll_hw_state.dpll_md =
7895                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7896 }
7897
7898 static void vlv_prepare_pll(struct intel_crtc *crtc,
7899                             const struct intel_crtc_state *pipe_config)
7900 {
7901         struct drm_device *dev = crtc->base.dev;
7902         struct drm_i915_private *dev_priv = to_i915(dev);
7903         enum pipe pipe = crtc->pipe;
7904         u32 mdiv;
7905         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7906         u32 coreclk, reg_val;
7907
7908         /* Enable Refclk */
7909         I915_WRITE(DPLL(pipe),
7910                    pipe_config->dpll_hw_state.dpll &
7911                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7912
7913         /* No need to actually set up the DPLL with DSI */
7914         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7915                 return;
7916
7917         vlv_dpio_get(dev_priv);
7918
7919         bestn = pipe_config->dpll.n;
7920         bestm1 = pipe_config->dpll.m1;
7921         bestm2 = pipe_config->dpll.m2;
7922         bestp1 = pipe_config->dpll.p1;
7923         bestp2 = pipe_config->dpll.p2;
7924
7925         /* See eDP HDMI DPIO driver vbios notes doc */
7926
7927         /* PLL B needs special handling */
7928         if (pipe == PIPE_B)
7929                 vlv_pllb_recal_opamp(dev_priv, pipe);
7930
7931         /* Set up Tx target for periodic Rcomp update */
7932         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7933
7934         /* Disable target IRef on PLL */
7935         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7936         reg_val &= 0x00ffffff;
7937         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7938
7939         /* Disable fast lock */
7940         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7941
7942         /* Set idtafcrecal before PLL is enabled */
7943         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7944         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7945         mdiv |= ((bestn << DPIO_N_SHIFT));
7946         mdiv |= (1 << DPIO_K_SHIFT);
7947
7948         /*
7949          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7950          * but we don't support that).
7951          * Note: don't use the DAC post divider as it seems unstable.
7952          */
7953         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7954         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7955
7956         mdiv |= DPIO_ENABLE_CALIBRATION;
7957         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7958
7959         /* Set HBR and RBR LPF coefficients */
7960         if (pipe_config->port_clock == 162000 ||
7961             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7962             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7963                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7964                                  0x009f0003);
7965         else
7966                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7967                                  0x00d0000f);
7968
7969         if (intel_crtc_has_dp_encoder(pipe_config)) {
7970                 /* Use SSC source */
7971                 if (pipe == PIPE_A)
7972                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7973                                          0x0df40000);
7974                 else
7975                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7976                                          0x0df70000);
7977         } else { /* HDMI or VGA */
7978                 /* Use bend source */
7979                 if (pipe == PIPE_A)
7980                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7981                                          0x0df70000);
7982                 else
7983                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7984                                          0x0df40000);
7985         }
7986
7987         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7988         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7989         if (intel_crtc_has_dp_encoder(pipe_config))
7990                 coreclk |= 0x01000000;
7991         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7992
7993         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7994
7995         vlv_dpio_put(dev_priv);
7996 }
7997
7998 static void chv_prepare_pll(struct intel_crtc *crtc,
7999                             const struct intel_crtc_state *pipe_config)
8000 {
8001         struct drm_device *dev = crtc->base.dev;
8002         struct drm_i915_private *dev_priv = to_i915(dev);
8003         enum pipe pipe = crtc->pipe;
8004         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8005         u32 loopfilter, tribuf_calcntr;
8006         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8007         u32 dpio_val;
8008         int vco;
8009
8010         /* Enable Refclk and SSC */
8011         I915_WRITE(DPLL(pipe),
8012                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8013
8014         /* No need to actually set up the DPLL with DSI */
8015         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8016                 return;
8017
8018         bestn = pipe_config->dpll.n;
8019         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8020         bestm1 = pipe_config->dpll.m1;
8021         bestm2 = pipe_config->dpll.m2 >> 22;
8022         bestp1 = pipe_config->dpll.p1;
8023         bestp2 = pipe_config->dpll.p2;
8024         vco = pipe_config->dpll.vco;
8025         dpio_val = 0;
8026         loopfilter = 0;
8027
8028         vlv_dpio_get(dev_priv);
8029
8030         /* p1 and p2 divider */
8031         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8032                         5 << DPIO_CHV_S1_DIV_SHIFT |
8033                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8034                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8035                         1 << DPIO_CHV_K_DIV_SHIFT);
8036
8037         /* Feedback post-divider - m2 */
8038         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8039
8040         /* Feedback refclk divider - n and m1 */
8041         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8042                         DPIO_CHV_M1_DIV_BY_2 |
8043                         1 << DPIO_CHV_N_DIV_SHIFT);
8044
8045         /* M2 fraction division */
8046         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8047
8048         /* M2 fraction division enable */
8049         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8050         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8051         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8052         if (bestm2_frac)
8053                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8054         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8055
8056         /* Program digital lock detect threshold */
8057         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8058         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8059                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8060         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8061         if (!bestm2_frac)
8062                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8063         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8064
8065         /* Loop filter */
8066         if (vco == 5400000) {
8067                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8068                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8069                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8070                 tribuf_calcntr = 0x9;
8071         } else if (vco <= 6200000) {
8072                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8073                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8074                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8075                 tribuf_calcntr = 0x9;
8076         } else if (vco <= 6480000) {
8077                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8078                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8079                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8080                 tribuf_calcntr = 0x8;
8081         } else {
8082                 /* Not supported. Apply the same limits as in the max case */
8083                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8084                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8085                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8086                 tribuf_calcntr = 0;
8087         }
8088         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8089
8090         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8091         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8092         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8093         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8094
8095         /* AFC Recal */
8096         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8097                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8098                         DPIO_AFC_RECAL);
8099
8100         vlv_dpio_put(dev_priv);
8101 }
8102
8103 /**
8104  * vlv_force_pll_on - forcibly enable just the PLL
8105  * @dev_priv: i915 private structure
8106  * @pipe: pipe PLL to enable
8107  * @dpll: PLL configuration
8108  *
8109  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8110  * in cases where we need the PLL enabled even when @pipe is not going to
8111  * be enabled.
8112  */
8113 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8114                      const struct dpll *dpll)
8115 {
8116         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8117         struct intel_crtc_state *pipe_config;
8118
8119         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8120         if (!pipe_config)
8121                 return -ENOMEM;
8122
8123         pipe_config->uapi.crtc = &crtc->base;
8124         pipe_config->pixel_multiplier = 1;
8125         pipe_config->dpll = *dpll;
8126
8127         if (IS_CHERRYVIEW(dev_priv)) {
8128                 chv_compute_dpll(crtc, pipe_config);
8129                 chv_prepare_pll(crtc, pipe_config);
8130                 chv_enable_pll(crtc, pipe_config);
8131         } else {
8132                 vlv_compute_dpll(crtc, pipe_config);
8133                 vlv_prepare_pll(crtc, pipe_config);
8134                 vlv_enable_pll(crtc, pipe_config);
8135         }
8136
8137         kfree(pipe_config);
8138
8139         return 0;
8140 }
8141
8142 /**
8143  * vlv_force_pll_off - forcibly disable just the PLL
8144  * @dev_priv: i915 private structure
8145  * @pipe: pipe PLL to disable
8146  *
8147  * Disable the PLL for @pipe. To be used in cases where we need
8148  * the PLL enabled even when @pipe is not going to be enabled.
8149  */
8150 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8151 {
8152         if (IS_CHERRYVIEW(dev_priv))
8153                 chv_disable_pll(dev_priv, pipe);
8154         else
8155                 vlv_disable_pll(dev_priv, pipe);
8156 }
8157
8158 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8159                               struct intel_crtc_state *crtc_state,
8160                               struct dpll *reduced_clock)
8161 {
8162         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8163         u32 dpll;
8164         struct dpll *clock = &crtc_state->dpll;
8165
8166         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8167
8168         dpll = DPLL_VGA_MODE_DIS;
8169
8170         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8171                 dpll |= DPLLB_MODE_LVDS;
8172         else
8173                 dpll |= DPLLB_MODE_DAC_SERIAL;
8174
8175         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8176             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8177                 dpll |= (crtc_state->pixel_multiplier - 1)
8178                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8179         }
8180
8181         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8182             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8183                 dpll |= DPLL_SDVO_HIGH_SPEED;
8184
8185         if (intel_crtc_has_dp_encoder(crtc_state))
8186                 dpll |= DPLL_SDVO_HIGH_SPEED;
8187
8188         /* compute bitmask from p1 value */
8189         if (IS_PINEVIEW(dev_priv))
8190                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8191         else {
8192                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8193                 if (IS_G4X(dev_priv) && reduced_clock)
8194                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8195         }
8196         switch (clock->p2) {
8197         case 5:
8198                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8199                 break;
8200         case 7:
8201                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8202                 break;
8203         case 10:
8204                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8205                 break;
8206         case 14:
8207                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8208                 break;
8209         }
8210         if (INTEL_GEN(dev_priv) >= 4)
8211                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8212
8213         if (crtc_state->sdvo_tv_clock)
8214                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8215         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8216                  intel_panel_use_ssc(dev_priv))
8217                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8218         else
8219                 dpll |= PLL_REF_INPUT_DREFCLK;
8220
8221         dpll |= DPLL_VCO_ENABLE;
8222         crtc_state->dpll_hw_state.dpll = dpll;
8223
8224         if (INTEL_GEN(dev_priv) >= 4) {
8225                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8226                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8227                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8228         }
8229 }
8230
8231 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8232                               struct intel_crtc_state *crtc_state,
8233                               struct dpll *reduced_clock)
8234 {
8235         struct drm_device *dev = crtc->base.dev;
8236         struct drm_i915_private *dev_priv = to_i915(dev);
8237         u32 dpll;
8238         struct dpll *clock = &crtc_state->dpll;
8239
8240         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8241
8242         dpll = DPLL_VGA_MODE_DIS;
8243
8244         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8245                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8246         } else {
8247                 if (clock->p1 == 2)
8248                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8249                 else
8250                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8251                 if (clock->p2 == 4)
8252                         dpll |= PLL_P2_DIVIDE_BY_4;
8253         }
8254
8255         /*
8256          * Bspec:
8257          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8258          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8259          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8260          *  Enable) must be set to “1” in both the DPLL A Control Register
8261          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8262          *
8263          * For simplicity We simply keep both bits always enabled in
8264          * both DPLLS. The spec says we should disable the DVO 2X clock
8265          * when not needed, but this seems to work fine in practice.
8266          */
8267         if (IS_I830(dev_priv) ||
8268             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8269                 dpll |= DPLL_DVO_2X_MODE;
8270
8271         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8272             intel_panel_use_ssc(dev_priv))
8273                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8274         else
8275                 dpll |= PLL_REF_INPUT_DREFCLK;
8276
8277         dpll |= DPLL_VCO_ENABLE;
8278         crtc_state->dpll_hw_state.dpll = dpll;
8279 }
8280
8281 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8282 {
8283         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8284         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8285         enum pipe pipe = crtc->pipe;
8286         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8287         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8288         u32 crtc_vtotal, crtc_vblank_end;
8289         int vsyncshift = 0;
8290
8291         /* We need to be careful not to changed the adjusted mode, for otherwise
8292          * the hw state checker will get angry at the mismatch. */
8293         crtc_vtotal = adjusted_mode->crtc_vtotal;
8294         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8295
8296         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8297                 /* the chip adds 2 halflines automatically */
8298                 crtc_vtotal -= 1;
8299                 crtc_vblank_end -= 1;
8300
8301                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8302                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8303                 else
8304                         vsyncshift = adjusted_mode->crtc_hsync_start -
8305                                 adjusted_mode->crtc_htotal / 2;
8306                 if (vsyncshift < 0)
8307                         vsyncshift += adjusted_mode->crtc_htotal;
8308         }
8309
8310         if (INTEL_GEN(dev_priv) > 3)
8311                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8312
8313         I915_WRITE(HTOTAL(cpu_transcoder),
8314                    (adjusted_mode->crtc_hdisplay - 1) |
8315                    ((adjusted_mode->crtc_htotal - 1) << 16));
8316         I915_WRITE(HBLANK(cpu_transcoder),
8317                    (adjusted_mode->crtc_hblank_start - 1) |
8318                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
8319         I915_WRITE(HSYNC(cpu_transcoder),
8320                    (adjusted_mode->crtc_hsync_start - 1) |
8321                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
8322
8323         I915_WRITE(VTOTAL(cpu_transcoder),
8324                    (adjusted_mode->crtc_vdisplay - 1) |
8325                    ((crtc_vtotal - 1) << 16));
8326         I915_WRITE(VBLANK(cpu_transcoder),
8327                    (adjusted_mode->crtc_vblank_start - 1) |
8328                    ((crtc_vblank_end - 1) << 16));
8329         I915_WRITE(VSYNC(cpu_transcoder),
8330                    (adjusted_mode->crtc_vsync_start - 1) |
8331                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
8332
8333         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8334          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8335          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8336          * bits. */
8337         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8338             (pipe == PIPE_B || pipe == PIPE_C))
8339                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8340
8341 }
8342
8343 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8344 {
8345         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8346         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8347         enum pipe pipe = crtc->pipe;
8348
8349         /* pipesrc controls the size that is scaled from, which should
8350          * always be the user's requested size.
8351          */
8352         I915_WRITE(PIPESRC(pipe),
8353                    ((crtc_state->pipe_src_w - 1) << 16) |
8354                    (crtc_state->pipe_src_h - 1));
8355 }
8356
8357 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8358 {
8359         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8360         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8361
8362         if (IS_GEN(dev_priv, 2))
8363                 return false;
8364
8365         if (INTEL_GEN(dev_priv) >= 9 ||
8366             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8367                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8368         else
8369                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8370 }
8371
8372 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8373                                    struct intel_crtc_state *pipe_config)
8374 {
8375         struct drm_device *dev = crtc->base.dev;
8376         struct drm_i915_private *dev_priv = to_i915(dev);
8377         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8378         u32 tmp;
8379
8380         tmp = I915_READ(HTOTAL(cpu_transcoder));
8381         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8382         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8383
8384         if (!transcoder_is_dsi(cpu_transcoder)) {
8385                 tmp = I915_READ(HBLANK(cpu_transcoder));
8386                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8387                                                         (tmp & 0xffff) + 1;
8388                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8389                                                 ((tmp >> 16) & 0xffff) + 1;
8390         }
8391         tmp = I915_READ(HSYNC(cpu_transcoder));
8392         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8393         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8394
8395         tmp = I915_READ(VTOTAL(cpu_transcoder));
8396         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8397         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8398
8399         if (!transcoder_is_dsi(cpu_transcoder)) {
8400                 tmp = I915_READ(VBLANK(cpu_transcoder));
8401                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8402                                                         (tmp & 0xffff) + 1;
8403                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8404                                                 ((tmp >> 16) & 0xffff) + 1;
8405         }
8406         tmp = I915_READ(VSYNC(cpu_transcoder));
8407         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8408         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8409
8410         if (intel_pipe_is_interlaced(pipe_config)) {
8411                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8412                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8413                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8414         }
8415 }
8416
8417 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8418                                     struct intel_crtc_state *pipe_config)
8419 {
8420         struct drm_device *dev = crtc->base.dev;
8421         struct drm_i915_private *dev_priv = to_i915(dev);
8422         u32 tmp;
8423
8424         tmp = I915_READ(PIPESRC(crtc->pipe));
8425         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8426         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8427
8428         pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8429         pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8430 }
8431
8432 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8433                                  struct intel_crtc_state *pipe_config)
8434 {
8435         mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8436         mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8437         mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8438         mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8439
8440         mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8441         mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8442         mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8443         mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8444
8445         mode->flags = pipe_config->hw.adjusted_mode.flags;
8446         mode->type = DRM_MODE_TYPE_DRIVER;
8447
8448         mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8449
8450         mode->hsync = drm_mode_hsync(mode);
8451         mode->vrefresh = drm_mode_vrefresh(mode);
8452         drm_mode_set_name(mode);
8453 }
8454
8455 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8456 {
8457         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8458         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8459         u32 pipeconf;
8460
8461         pipeconf = 0;
8462
8463         /* we keep both pipes enabled on 830 */
8464         if (IS_I830(dev_priv))
8465                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8466
8467         if (crtc_state->double_wide)
8468                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8469
8470         /* only g4x and later have fancy bpc/dither controls */
8471         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8472             IS_CHERRYVIEW(dev_priv)) {
8473                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8474                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8475                         pipeconf |= PIPECONF_DITHER_EN |
8476                                     PIPECONF_DITHER_TYPE_SP;
8477
8478                 switch (crtc_state->pipe_bpp) {
8479                 case 18:
8480                         pipeconf |= PIPECONF_6BPC;
8481                         break;
8482                 case 24:
8483                         pipeconf |= PIPECONF_8BPC;
8484                         break;
8485                 case 30:
8486                         pipeconf |= PIPECONF_10BPC;
8487                         break;
8488                 default:
8489                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8490                         BUG();
8491                 }
8492         }
8493
8494         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8495                 if (INTEL_GEN(dev_priv) < 4 ||
8496                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8497                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8498                 else
8499                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8500         } else {
8501                 pipeconf |= PIPECONF_PROGRESSIVE;
8502         }
8503
8504         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8505              crtc_state->limited_color_range)
8506                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8507
8508         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8509
8510         pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8511
8512         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8513         POSTING_READ(PIPECONF(crtc->pipe));
8514 }
8515
8516 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8517                                    struct intel_crtc_state *crtc_state)
8518 {
8519         struct drm_device *dev = crtc->base.dev;
8520         struct drm_i915_private *dev_priv = to_i915(dev);
8521         const struct intel_limit *limit;
8522         int refclk = 48000;
8523
8524         memset(&crtc_state->dpll_hw_state, 0,
8525                sizeof(crtc_state->dpll_hw_state));
8526
8527         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8528                 if (intel_panel_use_ssc(dev_priv)) {
8529                         refclk = dev_priv->vbt.lvds_ssc_freq;
8530                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8531                 }
8532
8533                 limit = &intel_limits_i8xx_lvds;
8534         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8535                 limit = &intel_limits_i8xx_dvo;
8536         } else {
8537                 limit = &intel_limits_i8xx_dac;
8538         }
8539
8540         if (!crtc_state->clock_set &&
8541             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8542                                  refclk, NULL, &crtc_state->dpll)) {
8543                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8544                 return -EINVAL;
8545         }
8546
8547         i8xx_compute_dpll(crtc, crtc_state, NULL);
8548
8549         return 0;
8550 }
8551
8552 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8553                                   struct intel_crtc_state *crtc_state)
8554 {
8555         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8556         const struct intel_limit *limit;
8557         int refclk = 96000;
8558
8559         memset(&crtc_state->dpll_hw_state, 0,
8560                sizeof(crtc_state->dpll_hw_state));
8561
8562         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8563                 if (intel_panel_use_ssc(dev_priv)) {
8564                         refclk = dev_priv->vbt.lvds_ssc_freq;
8565                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8566                 }
8567
8568                 if (intel_is_dual_link_lvds(dev_priv))
8569                         limit = &intel_limits_g4x_dual_channel_lvds;
8570                 else
8571                         limit = &intel_limits_g4x_single_channel_lvds;
8572         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8573                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8574                 limit = &intel_limits_g4x_hdmi;
8575         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8576                 limit = &intel_limits_g4x_sdvo;
8577         } else {
8578                 /* The option is for other outputs */
8579                 limit = &intel_limits_i9xx_sdvo;
8580         }
8581
8582         if (!crtc_state->clock_set &&
8583             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8584                                 refclk, NULL, &crtc_state->dpll)) {
8585                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8586                 return -EINVAL;
8587         }
8588
8589         i9xx_compute_dpll(crtc, crtc_state, NULL);
8590
8591         return 0;
8592 }
8593
8594 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8595                                   struct intel_crtc_state *crtc_state)
8596 {
8597         struct drm_device *dev = crtc->base.dev;
8598         struct drm_i915_private *dev_priv = to_i915(dev);
8599         const struct intel_limit *limit;
8600         int refclk = 96000;
8601
8602         memset(&crtc_state->dpll_hw_state, 0,
8603                sizeof(crtc_state->dpll_hw_state));
8604
8605         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8606                 if (intel_panel_use_ssc(dev_priv)) {
8607                         refclk = dev_priv->vbt.lvds_ssc_freq;
8608                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8609                 }
8610
8611                 limit = &intel_limits_pineview_lvds;
8612         } else {
8613                 limit = &intel_limits_pineview_sdvo;
8614         }
8615
8616         if (!crtc_state->clock_set &&
8617             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8618                                 refclk, NULL, &crtc_state->dpll)) {
8619                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8620                 return -EINVAL;
8621         }
8622
8623         i9xx_compute_dpll(crtc, crtc_state, NULL);
8624
8625         return 0;
8626 }
8627
8628 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8629                                    struct intel_crtc_state *crtc_state)
8630 {
8631         struct drm_device *dev = crtc->base.dev;
8632         struct drm_i915_private *dev_priv = to_i915(dev);
8633         const struct intel_limit *limit;
8634         int refclk = 96000;
8635
8636         memset(&crtc_state->dpll_hw_state, 0,
8637                sizeof(crtc_state->dpll_hw_state));
8638
8639         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8640                 if (intel_panel_use_ssc(dev_priv)) {
8641                         refclk = dev_priv->vbt.lvds_ssc_freq;
8642                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8643                 }
8644
8645                 limit = &intel_limits_i9xx_lvds;
8646         } else {
8647                 limit = &intel_limits_i9xx_sdvo;
8648         }
8649
8650         if (!crtc_state->clock_set &&
8651             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8652                                  refclk, NULL, &crtc_state->dpll)) {
8653                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8654                 return -EINVAL;
8655         }
8656
8657         i9xx_compute_dpll(crtc, crtc_state, NULL);
8658
8659         return 0;
8660 }
8661
8662 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8663                                   struct intel_crtc_state *crtc_state)
8664 {
8665         int refclk = 100000;
8666         const struct intel_limit *limit = &intel_limits_chv;
8667
8668         memset(&crtc_state->dpll_hw_state, 0,
8669                sizeof(crtc_state->dpll_hw_state));
8670
8671         if (!crtc_state->clock_set &&
8672             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8673                                 refclk, NULL, &crtc_state->dpll)) {
8674                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8675                 return -EINVAL;
8676         }
8677
8678         chv_compute_dpll(crtc, crtc_state);
8679
8680         return 0;
8681 }
8682
8683 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8684                                   struct intel_crtc_state *crtc_state)
8685 {
8686         int refclk = 100000;
8687         const struct intel_limit *limit = &intel_limits_vlv;
8688
8689         memset(&crtc_state->dpll_hw_state, 0,
8690                sizeof(crtc_state->dpll_hw_state));
8691
8692         if (!crtc_state->clock_set &&
8693             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8694                                 refclk, NULL, &crtc_state->dpll)) {
8695                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8696                 return -EINVAL;
8697         }
8698
8699         vlv_compute_dpll(crtc, crtc_state);
8700
8701         return 0;
8702 }
8703
8704 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8705 {
8706         if (IS_I830(dev_priv))
8707                 return false;
8708
8709         return INTEL_GEN(dev_priv) >= 4 ||
8710                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8711 }
8712
8713 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8714                                  struct intel_crtc_state *pipe_config)
8715 {
8716         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8717         u32 tmp;
8718
8719         if (!i9xx_has_pfit(dev_priv))
8720                 return;
8721
8722         tmp = I915_READ(PFIT_CONTROL);
8723         if (!(tmp & PFIT_ENABLE))
8724                 return;
8725
8726         /* Check whether the pfit is attached to our pipe. */
8727         if (INTEL_GEN(dev_priv) < 4) {
8728                 if (crtc->pipe != PIPE_B)
8729                         return;
8730         } else {
8731                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8732                         return;
8733         }
8734
8735         pipe_config->gmch_pfit.control = tmp;
8736         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8737 }
8738
8739 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8740                                struct intel_crtc_state *pipe_config)
8741 {
8742         struct drm_device *dev = crtc->base.dev;
8743         struct drm_i915_private *dev_priv = to_i915(dev);
8744         enum pipe pipe = crtc->pipe;
8745         struct dpll clock;
8746         u32 mdiv;
8747         int refclk = 100000;
8748
8749         /* In case of DSI, DPLL will not be used */
8750         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8751                 return;
8752
8753         vlv_dpio_get(dev_priv);
8754         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8755         vlv_dpio_put(dev_priv);
8756
8757         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8758         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8759         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8760         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8761         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8762
8763         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8764 }
8765
8766 static void
8767 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8768                               struct intel_initial_plane_config *plane_config)
8769 {
8770         struct drm_device *dev = crtc->base.dev;
8771         struct drm_i915_private *dev_priv = to_i915(dev);
8772         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8773         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8774         enum pipe pipe;
8775         u32 val, base, offset;
8776         int fourcc, pixel_format;
8777         unsigned int aligned_height;
8778         struct drm_framebuffer *fb;
8779         struct intel_framebuffer *intel_fb;
8780
8781         if (!plane->get_hw_state(plane, &pipe))
8782                 return;
8783
8784         WARN_ON(pipe != crtc->pipe);
8785
8786         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8787         if (!intel_fb) {
8788                 DRM_DEBUG_KMS("failed to alloc fb\n");
8789                 return;
8790         }
8791
8792         fb = &intel_fb->base;
8793
8794         fb->dev = dev;
8795
8796         val = I915_READ(DSPCNTR(i9xx_plane));
8797
8798         if (INTEL_GEN(dev_priv) >= 4) {
8799                 if (val & DISPPLANE_TILED) {
8800                         plane_config->tiling = I915_TILING_X;
8801                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8802                 }
8803
8804                 if (val & DISPPLANE_ROTATE_180)
8805                         plane_config->rotation = DRM_MODE_ROTATE_180;
8806         }
8807
8808         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8809             val & DISPPLANE_MIRROR)
8810                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8811
8812         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8813         fourcc = i9xx_format_to_fourcc(pixel_format);
8814         fb->format = drm_format_info(fourcc);
8815
8816         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8817                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8818                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8819         } else if (INTEL_GEN(dev_priv) >= 4) {
8820                 if (plane_config->tiling)
8821                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8822                 else
8823                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8824                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8825         } else {
8826                 base = I915_READ(DSPADDR(i9xx_plane));
8827         }
8828         plane_config->base = base;
8829
8830         val = I915_READ(PIPESRC(pipe));
8831         fb->width = ((val >> 16) & 0xfff) + 1;
8832         fb->height = ((val >> 0) & 0xfff) + 1;
8833
8834         val = I915_READ(DSPSTRIDE(i9xx_plane));
8835         fb->pitches[0] = val & 0xffffffc0;
8836
8837         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8838
8839         plane_config->size = fb->pitches[0] * aligned_height;
8840
8841         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8842                       crtc->base.name, plane->base.name, fb->width, fb->height,
8843                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8844                       plane_config->size);
8845
8846         plane_config->fb = intel_fb;
8847 }
8848
8849 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8850                                struct intel_crtc_state *pipe_config)
8851 {
8852         struct drm_device *dev = crtc->base.dev;
8853         struct drm_i915_private *dev_priv = to_i915(dev);
8854         enum pipe pipe = crtc->pipe;
8855         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8856         struct dpll clock;
8857         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8858         int refclk = 100000;
8859
8860         /* In case of DSI, DPLL will not be used */
8861         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8862                 return;
8863
8864         vlv_dpio_get(dev_priv);
8865         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8866         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8867         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8868         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8869         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8870         vlv_dpio_put(dev_priv);
8871
8872         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8873         clock.m2 = (pll_dw0 & 0xff) << 22;
8874         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8875                 clock.m2 |= pll_dw2 & 0x3fffff;
8876         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8877         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8878         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8879
8880         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8881 }
8882
8883 static enum intel_output_format
8884 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
8885 {
8886         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8887         u32 tmp;
8888
8889         tmp = I915_READ(PIPEMISC(crtc->pipe));
8890
8891         if (tmp & PIPEMISC_YUV420_ENABLE) {
8892                 /* We support 4:2:0 in full blend mode only */
8893                 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
8894
8895                 return INTEL_OUTPUT_FORMAT_YCBCR420;
8896         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8897                 return INTEL_OUTPUT_FORMAT_YCBCR444;
8898         } else {
8899                 return INTEL_OUTPUT_FORMAT_RGB;
8900         }
8901 }
8902
8903 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8904 {
8905         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8906         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8907         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8908         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8909         u32 tmp;
8910
8911         tmp = I915_READ(DSPCNTR(i9xx_plane));
8912
8913         if (tmp & DISPPLANE_GAMMA_ENABLE)
8914                 crtc_state->gamma_enable = true;
8915
8916         if (!HAS_GMCH(dev_priv) &&
8917             tmp & DISPPLANE_PIPE_CSC_ENABLE)
8918                 crtc_state->csc_enable = true;
8919 }
8920
8921 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8922                                  struct intel_crtc_state *pipe_config)
8923 {
8924         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8925         enum intel_display_power_domain power_domain;
8926         intel_wakeref_t wakeref;
8927         u32 tmp;
8928         bool ret;
8929
8930         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8931         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8932         if (!wakeref)
8933                 return false;
8934
8935         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8936         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8937         pipe_config->shared_dpll = NULL;
8938         pipe_config->master_transcoder = INVALID_TRANSCODER;
8939
8940         ret = false;
8941
8942         tmp = I915_READ(PIPECONF(crtc->pipe));
8943         if (!(tmp & PIPECONF_ENABLE))
8944                 goto out;
8945
8946         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8947             IS_CHERRYVIEW(dev_priv)) {
8948                 switch (tmp & PIPECONF_BPC_MASK) {
8949                 case PIPECONF_6BPC:
8950                         pipe_config->pipe_bpp = 18;
8951                         break;
8952                 case PIPECONF_8BPC:
8953                         pipe_config->pipe_bpp = 24;
8954                         break;
8955                 case PIPECONF_10BPC:
8956                         pipe_config->pipe_bpp = 30;
8957                         break;
8958                 default:
8959                         break;
8960                 }
8961         }
8962
8963         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8964             (tmp & PIPECONF_COLOR_RANGE_SELECT))
8965                 pipe_config->limited_color_range = true;
8966
8967         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8968                 PIPECONF_GAMMA_MODE_SHIFT;
8969
8970         if (IS_CHERRYVIEW(dev_priv))
8971                 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8972
8973         i9xx_get_pipe_color_config(pipe_config);
8974         intel_color_get_config(pipe_config);
8975
8976         if (INTEL_GEN(dev_priv) < 4)
8977                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8978
8979         intel_get_pipe_timings(crtc, pipe_config);
8980         intel_get_pipe_src_size(crtc, pipe_config);
8981
8982         i9xx_get_pfit_config(crtc, pipe_config);
8983
8984         if (INTEL_GEN(dev_priv) >= 4) {
8985                 /* No way to read it out on pipes B and C */
8986                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8987                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
8988                 else
8989                         tmp = I915_READ(DPLL_MD(crtc->pipe));
8990                 pipe_config->pixel_multiplier =
8991                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8992                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8993                 pipe_config->dpll_hw_state.dpll_md = tmp;
8994         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8995                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8996                 tmp = I915_READ(DPLL(crtc->pipe));
8997                 pipe_config->pixel_multiplier =
8998                         ((tmp & SDVO_MULTIPLIER_MASK)
8999                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9000         } else {
9001                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9002                  * port and will be fixed up in the encoder->get_config
9003                  * function. */
9004                 pipe_config->pixel_multiplier = 1;
9005         }
9006         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
9007         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9008                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
9009                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
9010         } else {
9011                 /* Mask out read-only status bits. */
9012                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9013                                                      DPLL_PORTC_READY_MASK |
9014                                                      DPLL_PORTB_READY_MASK);
9015         }
9016
9017         if (IS_CHERRYVIEW(dev_priv))
9018                 chv_crtc_clock_get(crtc, pipe_config);
9019         else if (IS_VALLEYVIEW(dev_priv))
9020                 vlv_crtc_clock_get(crtc, pipe_config);
9021         else
9022                 i9xx_crtc_clock_get(crtc, pipe_config);
9023
9024         /*
9025          * Normally the dotclock is filled in by the encoder .get_config()
9026          * but in case the pipe is enabled w/o any ports we need a sane
9027          * default.
9028          */
9029         pipe_config->hw.adjusted_mode.crtc_clock =
9030                 pipe_config->port_clock / pipe_config->pixel_multiplier;
9031
9032         ret = true;
9033
9034 out:
9035         intel_display_power_put(dev_priv, power_domain, wakeref);
9036
9037         return ret;
9038 }
9039
9040 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
9041 {
9042         struct intel_encoder *encoder;
9043         int i;
9044         u32 val, final;
9045         bool has_lvds = false;
9046         bool has_cpu_edp = false;
9047         bool has_panel = false;
9048         bool has_ck505 = false;
9049         bool can_ssc = false;
9050         bool using_ssc_source = false;
9051
9052         /* We need to take the global config into account */
9053         for_each_intel_encoder(&dev_priv->drm, encoder) {
9054                 switch (encoder->type) {
9055                 case INTEL_OUTPUT_LVDS:
9056                         has_panel = true;
9057                         has_lvds = true;
9058                         break;
9059                 case INTEL_OUTPUT_EDP:
9060                         has_panel = true;
9061                         if (encoder->port == PORT_A)
9062                                 has_cpu_edp = true;
9063                         break;
9064                 default:
9065                         break;
9066                 }
9067         }
9068
9069         if (HAS_PCH_IBX(dev_priv)) {
9070                 has_ck505 = dev_priv->vbt.display_clock_mode;
9071                 can_ssc = has_ck505;
9072         } else {
9073                 has_ck505 = false;
9074                 can_ssc = true;
9075         }
9076
9077         /* Check if any DPLLs are using the SSC source */
9078         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9079                 u32 temp = I915_READ(PCH_DPLL(i));
9080
9081                 if (!(temp & DPLL_VCO_ENABLE))
9082                         continue;
9083
9084                 if ((temp & PLL_REF_INPUT_MASK) ==
9085                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9086                         using_ssc_source = true;
9087                         break;
9088                 }
9089         }
9090
9091         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9092                       has_panel, has_lvds, has_ck505, using_ssc_source);
9093
9094         /* Ironlake: try to setup display ref clock before DPLL
9095          * enabling. This is only under driver's control after
9096          * PCH B stepping, previous chipset stepping should be
9097          * ignoring this setting.
9098          */
9099         val = I915_READ(PCH_DREF_CONTROL);
9100
9101         /* As we must carefully and slowly disable/enable each source in turn,
9102          * compute the final state we want first and check if we need to
9103          * make any changes at all.
9104          */
9105         final = val;
9106         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9107         if (has_ck505)
9108                 final |= DREF_NONSPREAD_CK505_ENABLE;
9109         else
9110                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9111
9112         final &= ~DREF_SSC_SOURCE_MASK;
9113         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9114         final &= ~DREF_SSC1_ENABLE;
9115
9116         if (has_panel) {
9117                 final |= DREF_SSC_SOURCE_ENABLE;
9118
9119                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9120                         final |= DREF_SSC1_ENABLE;
9121
9122                 if (has_cpu_edp) {
9123                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9124                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9125                         else
9126                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9127                 } else
9128                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9129         } else if (using_ssc_source) {
9130                 final |= DREF_SSC_SOURCE_ENABLE;
9131                 final |= DREF_SSC1_ENABLE;
9132         }
9133
9134         if (final == val)
9135                 return;
9136
9137         /* Always enable nonspread source */
9138         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9139
9140         if (has_ck505)
9141                 val |= DREF_NONSPREAD_CK505_ENABLE;
9142         else
9143                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9144
9145         if (has_panel) {
9146                 val &= ~DREF_SSC_SOURCE_MASK;
9147                 val |= DREF_SSC_SOURCE_ENABLE;
9148
9149                 /* SSC must be turned on before enabling the CPU output  */
9150                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9151                         DRM_DEBUG_KMS("Using SSC on panel\n");
9152                         val |= DREF_SSC1_ENABLE;
9153                 } else
9154                         val &= ~DREF_SSC1_ENABLE;
9155
9156                 /* Get SSC going before enabling the outputs */
9157                 I915_WRITE(PCH_DREF_CONTROL, val);
9158                 POSTING_READ(PCH_DREF_CONTROL);
9159                 udelay(200);
9160
9161                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9162
9163                 /* Enable CPU source on CPU attached eDP */
9164                 if (has_cpu_edp) {
9165                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9166                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
9167                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9168                         } else
9169                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9170                 } else
9171                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9172
9173                 I915_WRITE(PCH_DREF_CONTROL, val);
9174                 POSTING_READ(PCH_DREF_CONTROL);
9175                 udelay(200);
9176         } else {
9177                 DRM_DEBUG_KMS("Disabling CPU source output\n");
9178
9179                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9180
9181                 /* Turn off CPU output */
9182                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9183
9184                 I915_WRITE(PCH_DREF_CONTROL, val);
9185                 POSTING_READ(PCH_DREF_CONTROL);
9186                 udelay(200);
9187
9188                 if (!using_ssc_source) {
9189                         DRM_DEBUG_KMS("Disabling SSC source\n");
9190
9191                         /* Turn off the SSC source */
9192                         val &= ~DREF_SSC_SOURCE_MASK;
9193                         val |= DREF_SSC_SOURCE_DISABLE;
9194
9195                         /* Turn off SSC1 */
9196                         val &= ~DREF_SSC1_ENABLE;
9197
9198                         I915_WRITE(PCH_DREF_CONTROL, val);
9199                         POSTING_READ(PCH_DREF_CONTROL);
9200                         udelay(200);
9201                 }
9202         }
9203
9204         BUG_ON(val != final);
9205 }
9206
9207 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9208 {
9209         u32 tmp;
9210
9211         tmp = I915_READ(SOUTH_CHICKEN2);
9212         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9213         I915_WRITE(SOUTH_CHICKEN2, tmp);
9214
9215         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9216                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9217                 DRM_ERROR("FDI mPHY reset assert timeout\n");
9218
9219         tmp = I915_READ(SOUTH_CHICKEN2);
9220         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9221         I915_WRITE(SOUTH_CHICKEN2, tmp);
9222
9223         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9224                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9225                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9226 }
9227
9228 /* WaMPhyProgramming:hsw */
9229 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9230 {
9231         u32 tmp;
9232
9233         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9234         tmp &= ~(0xFF << 24);
9235         tmp |= (0x12 << 24);
9236         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9237
9238         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9239         tmp |= (1 << 11);
9240         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9241
9242         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9243         tmp |= (1 << 11);
9244         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9245
9246         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9247         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9248         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9249
9250         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9251         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9252         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9253
9254         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9255         tmp &= ~(7 << 13);
9256         tmp |= (5 << 13);
9257         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9258
9259         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9260         tmp &= ~(7 << 13);
9261         tmp |= (5 << 13);
9262         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9263
9264         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9265         tmp &= ~0xFF;
9266         tmp |= 0x1C;
9267         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9268
9269         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9270         tmp &= ~0xFF;
9271         tmp |= 0x1C;
9272         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9273
9274         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9275         tmp &= ~(0xFF << 16);
9276         tmp |= (0x1C << 16);
9277         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9278
9279         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9280         tmp &= ~(0xFF << 16);
9281         tmp |= (0x1C << 16);
9282         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9283
9284         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9285         tmp |= (1 << 27);
9286         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9287
9288         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9289         tmp |= (1 << 27);
9290         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9291
9292         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9293         tmp &= ~(0xF << 28);
9294         tmp |= (4 << 28);
9295         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9296
9297         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9298         tmp &= ~(0xF << 28);
9299         tmp |= (4 << 28);
9300         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9301 }
9302
9303 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9304  * Programming" based on the parameters passed:
9305  * - Sequence to enable CLKOUT_DP
9306  * - Sequence to enable CLKOUT_DP without spread
9307  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9308  */
9309 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9310                                  bool with_spread, bool with_fdi)
9311 {
9312         u32 reg, tmp;
9313
9314         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9315                 with_spread = true;
9316         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9317             with_fdi, "LP PCH doesn't have FDI\n"))
9318                 with_fdi = false;
9319
9320         mutex_lock(&dev_priv->sb_lock);
9321
9322         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9323         tmp &= ~SBI_SSCCTL_DISABLE;
9324         tmp |= SBI_SSCCTL_PATHALT;
9325         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9326
9327         udelay(24);
9328
9329         if (with_spread) {
9330                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9331                 tmp &= ~SBI_SSCCTL_PATHALT;
9332                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9333
9334                 if (with_fdi) {
9335                         lpt_reset_fdi_mphy(dev_priv);
9336                         lpt_program_fdi_mphy(dev_priv);
9337                 }
9338         }
9339
9340         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9341         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9342         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9343         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9344
9345         mutex_unlock(&dev_priv->sb_lock);
9346 }
9347
9348 /* Sequence to disable CLKOUT_DP */
9349 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9350 {
9351         u32 reg, tmp;
9352
9353         mutex_lock(&dev_priv->sb_lock);
9354
9355         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9356         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9357         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9358         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9359
9360         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9361         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9362                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9363                         tmp |= SBI_SSCCTL_PATHALT;
9364                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9365                         udelay(32);
9366                 }
9367                 tmp |= SBI_SSCCTL_DISABLE;
9368                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9369         }
9370
9371         mutex_unlock(&dev_priv->sb_lock);
9372 }
9373
9374 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9375
9376 static const u16 sscdivintphase[] = {
9377         [BEND_IDX( 50)] = 0x3B23,
9378         [BEND_IDX( 45)] = 0x3B23,
9379         [BEND_IDX( 40)] = 0x3C23,
9380         [BEND_IDX( 35)] = 0x3C23,
9381         [BEND_IDX( 30)] = 0x3D23,
9382         [BEND_IDX( 25)] = 0x3D23,
9383         [BEND_IDX( 20)] = 0x3E23,
9384         [BEND_IDX( 15)] = 0x3E23,
9385         [BEND_IDX( 10)] = 0x3F23,
9386         [BEND_IDX(  5)] = 0x3F23,
9387         [BEND_IDX(  0)] = 0x0025,
9388         [BEND_IDX( -5)] = 0x0025,
9389         [BEND_IDX(-10)] = 0x0125,
9390         [BEND_IDX(-15)] = 0x0125,
9391         [BEND_IDX(-20)] = 0x0225,
9392         [BEND_IDX(-25)] = 0x0225,
9393         [BEND_IDX(-30)] = 0x0325,
9394         [BEND_IDX(-35)] = 0x0325,
9395         [BEND_IDX(-40)] = 0x0425,
9396         [BEND_IDX(-45)] = 0x0425,
9397         [BEND_IDX(-50)] = 0x0525,
9398 };
9399
9400 /*
9401  * Bend CLKOUT_DP
9402  * steps -50 to 50 inclusive, in steps of 5
9403  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9404  * change in clock period = -(steps / 10) * 5.787 ps
9405  */
9406 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9407 {
9408         u32 tmp;
9409         int idx = BEND_IDX(steps);
9410
9411         if (WARN_ON(steps % 5 != 0))
9412                 return;
9413
9414         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9415                 return;
9416
9417         mutex_lock(&dev_priv->sb_lock);
9418
9419         if (steps % 10 != 0)
9420                 tmp = 0xAAAAAAAB;
9421         else
9422                 tmp = 0x00000000;
9423         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9424
9425         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9426         tmp &= 0xffff0000;
9427         tmp |= sscdivintphase[idx];
9428         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9429
9430         mutex_unlock(&dev_priv->sb_lock);
9431 }
9432
9433 #undef BEND_IDX
9434
9435 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9436 {
9437         u32 fuse_strap = I915_READ(FUSE_STRAP);
9438         u32 ctl = I915_READ(SPLL_CTL);
9439
9440         if ((ctl & SPLL_PLL_ENABLE) == 0)
9441                 return false;
9442
9443         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9444             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9445                 return true;
9446
9447         if (IS_BROADWELL(dev_priv) &&
9448             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9449                 return true;
9450
9451         return false;
9452 }
9453
9454 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9455                                enum intel_dpll_id id)
9456 {
9457         u32 fuse_strap = I915_READ(FUSE_STRAP);
9458         u32 ctl = I915_READ(WRPLL_CTL(id));
9459
9460         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9461                 return false;
9462
9463         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9464                 return true;
9465
9466         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9467             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9468             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9469                 return true;
9470
9471         return false;
9472 }
9473
9474 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9475 {
9476         struct intel_encoder *encoder;
9477         bool has_fdi = false;
9478
9479         for_each_intel_encoder(&dev_priv->drm, encoder) {
9480                 switch (encoder->type) {
9481                 case INTEL_OUTPUT_ANALOG:
9482                         has_fdi = true;
9483                         break;
9484                 default:
9485                         break;
9486                 }
9487         }
9488
9489         /*
9490          * The BIOS may have decided to use the PCH SSC
9491          * reference so we must not disable it until the
9492          * relevant PLLs have stopped relying on it. We'll
9493          * just leave the PCH SSC reference enabled in case
9494          * any active PLL is using it. It will get disabled
9495          * after runtime suspend if we don't have FDI.
9496          *
9497          * TODO: Move the whole reference clock handling
9498          * to the modeset sequence proper so that we can
9499          * actually enable/disable/reconfigure these things
9500          * safely. To do that we need to introduce a real
9501          * clock hierarchy. That would also allow us to do
9502          * clock bending finally.
9503          */
9504         dev_priv->pch_ssc_use = 0;
9505
9506         if (spll_uses_pch_ssc(dev_priv)) {
9507                 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9508                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9509         }
9510
9511         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9512                 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9513                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9514         }
9515
9516         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9517                 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9518                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9519         }
9520
9521         if (dev_priv->pch_ssc_use)
9522                 return;
9523
9524         if (has_fdi) {
9525                 lpt_bend_clkout_dp(dev_priv, 0);
9526                 lpt_enable_clkout_dp(dev_priv, true, true);
9527         } else {
9528                 lpt_disable_clkout_dp(dev_priv);
9529         }
9530 }
9531
9532 /*
9533  * Initialize reference clocks when the driver loads
9534  */
9535 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9536 {
9537         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9538                 ironlake_init_pch_refclk(dev_priv);
9539         else if (HAS_PCH_LPT(dev_priv))
9540                 lpt_init_pch_refclk(dev_priv);
9541 }
9542
9543 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9544 {
9545         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9546         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9547         enum pipe pipe = crtc->pipe;
9548         u32 val;
9549
9550         val = 0;
9551
9552         switch (crtc_state->pipe_bpp) {
9553         case 18:
9554                 val |= PIPECONF_6BPC;
9555                 break;
9556         case 24:
9557                 val |= PIPECONF_8BPC;
9558                 break;
9559         case 30:
9560                 val |= PIPECONF_10BPC;
9561                 break;
9562         case 36:
9563                 val |= PIPECONF_12BPC;
9564                 break;
9565         default:
9566                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9567                 BUG();
9568         }
9569
9570         if (crtc_state->dither)
9571                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9572
9573         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9574                 val |= PIPECONF_INTERLACED_ILK;
9575         else
9576                 val |= PIPECONF_PROGRESSIVE;
9577
9578         /*
9579          * This would end up with an odd purple hue over
9580          * the entire display. Make sure we don't do it.
9581          */
9582         WARN_ON(crtc_state->limited_color_range &&
9583                 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9584
9585         if (crtc_state->limited_color_range)
9586                 val |= PIPECONF_COLOR_RANGE_SELECT;
9587
9588         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9589                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9590
9591         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9592
9593         val |= PIPECONF_FRAME_START_DELAY(0);
9594
9595         I915_WRITE(PIPECONF(pipe), val);
9596         POSTING_READ(PIPECONF(pipe));
9597 }
9598
9599 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9600 {
9601         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9602         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9603         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9604         u32 val = 0;
9605
9606         if (IS_HASWELL(dev_priv) && crtc_state->dither)
9607                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9608
9609         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9610                 val |= PIPECONF_INTERLACED_ILK;
9611         else
9612                 val |= PIPECONF_PROGRESSIVE;
9613
9614         if (IS_HASWELL(dev_priv) &&
9615             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9616                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9617
9618         I915_WRITE(PIPECONF(cpu_transcoder), val);
9619         POSTING_READ(PIPECONF(cpu_transcoder));
9620 }
9621
9622 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9623 {
9624         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9625         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9626         u32 val = 0;
9627
9628         switch (crtc_state->pipe_bpp) {
9629         case 18:
9630                 val |= PIPEMISC_DITHER_6_BPC;
9631                 break;
9632         case 24:
9633                 val |= PIPEMISC_DITHER_8_BPC;
9634                 break;
9635         case 30:
9636                 val |= PIPEMISC_DITHER_10_BPC;
9637                 break;
9638         case 36:
9639                 val |= PIPEMISC_DITHER_12_BPC;
9640                 break;
9641         default:
9642                 MISSING_CASE(crtc_state->pipe_bpp);
9643                 break;
9644         }
9645
9646         if (crtc_state->dither)
9647                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9648
9649         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9650             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9651                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9652
9653         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9654                 val |= PIPEMISC_YUV420_ENABLE |
9655                         PIPEMISC_YUV420_MODE_FULL_BLEND;
9656
9657         if (INTEL_GEN(dev_priv) >= 11 &&
9658             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9659                                            BIT(PLANE_CURSOR))) == 0)
9660                 val |= PIPEMISC_HDR_MODE_PRECISION;
9661
9662         I915_WRITE(PIPEMISC(crtc->pipe), val);
9663 }
9664
9665 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9666 {
9667         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9668         u32 tmp;
9669
9670         tmp = I915_READ(PIPEMISC(crtc->pipe));
9671
9672         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9673         case PIPEMISC_DITHER_6_BPC:
9674                 return 18;
9675         case PIPEMISC_DITHER_8_BPC:
9676                 return 24;
9677         case PIPEMISC_DITHER_10_BPC:
9678                 return 30;
9679         case PIPEMISC_DITHER_12_BPC:
9680                 return 36;
9681         default:
9682                 MISSING_CASE(tmp);
9683                 return 0;
9684         }
9685 }
9686
9687 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9688 {
9689         /*
9690          * Account for spread spectrum to avoid
9691          * oversubscribing the link. Max center spread
9692          * is 2.5%; use 5% for safety's sake.
9693          */
9694         u32 bps = target_clock * bpp * 21 / 20;
9695         return DIV_ROUND_UP(bps, link_bw * 8);
9696 }
9697
9698 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9699 {
9700         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9701 }
9702
9703 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9704                                   struct intel_crtc_state *crtc_state,
9705                                   struct dpll *reduced_clock)
9706 {
9707         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9708         u32 dpll, fp, fp2;
9709         int factor;
9710
9711         /* Enable autotuning of the PLL clock (if permissible) */
9712         factor = 21;
9713         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9714                 if ((intel_panel_use_ssc(dev_priv) &&
9715                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
9716                     (HAS_PCH_IBX(dev_priv) &&
9717                      intel_is_dual_link_lvds(dev_priv)))
9718                         factor = 25;
9719         } else if (crtc_state->sdvo_tv_clock) {
9720                 factor = 20;
9721         }
9722
9723         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9724
9725         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9726                 fp |= FP_CB_TUNE;
9727
9728         if (reduced_clock) {
9729                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9730
9731                 if (reduced_clock->m < factor * reduced_clock->n)
9732                         fp2 |= FP_CB_TUNE;
9733         } else {
9734                 fp2 = fp;
9735         }
9736
9737         dpll = 0;
9738
9739         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9740                 dpll |= DPLLB_MODE_LVDS;
9741         else
9742                 dpll |= DPLLB_MODE_DAC_SERIAL;
9743
9744         dpll |= (crtc_state->pixel_multiplier - 1)
9745                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9746
9747         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9748             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9749                 dpll |= DPLL_SDVO_HIGH_SPEED;
9750
9751         if (intel_crtc_has_dp_encoder(crtc_state))
9752                 dpll |= DPLL_SDVO_HIGH_SPEED;
9753
9754         /*
9755          * The high speed IO clock is only really required for
9756          * SDVO/HDMI/DP, but we also enable it for CRT to make it
9757          * possible to share the DPLL between CRT and HDMI. Enabling
9758          * the clock needlessly does no real harm, except use up a
9759          * bit of power potentially.
9760          *
9761          * We'll limit this to IVB with 3 pipes, since it has only two
9762          * DPLLs and so DPLL sharing is the only way to get three pipes
9763          * driving PCH ports at the same time. On SNB we could do this,
9764          * and potentially avoid enabling the second DPLL, but it's not
9765          * clear if it''s a win or loss power wise. No point in doing
9766          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9767          */
9768         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
9769             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9770                 dpll |= DPLL_SDVO_HIGH_SPEED;
9771
9772         /* compute bitmask from p1 value */
9773         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9774         /* also FPA1 */
9775         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9776
9777         switch (crtc_state->dpll.p2) {
9778         case 5:
9779                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9780                 break;
9781         case 7:
9782                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9783                 break;
9784         case 10:
9785                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9786                 break;
9787         case 14:
9788                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9789                 break;
9790         }
9791
9792         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9793             intel_panel_use_ssc(dev_priv))
9794                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9795         else
9796                 dpll |= PLL_REF_INPUT_DREFCLK;
9797
9798         dpll |= DPLL_VCO_ENABLE;
9799
9800         crtc_state->dpll_hw_state.dpll = dpll;
9801         crtc_state->dpll_hw_state.fp0 = fp;
9802         crtc_state->dpll_hw_state.fp1 = fp2;
9803 }
9804
9805 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9806                                        struct intel_crtc_state *crtc_state)
9807 {
9808         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9809         struct intel_atomic_state *state =
9810                 to_intel_atomic_state(crtc_state->uapi.state);
9811         const struct intel_limit *limit;
9812         int refclk = 120000;
9813
9814         memset(&crtc_state->dpll_hw_state, 0,
9815                sizeof(crtc_state->dpll_hw_state));
9816
9817         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9818         if (!crtc_state->has_pch_encoder)
9819                 return 0;
9820
9821         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9822                 if (intel_panel_use_ssc(dev_priv)) {
9823                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9824                                       dev_priv->vbt.lvds_ssc_freq);
9825                         refclk = dev_priv->vbt.lvds_ssc_freq;
9826                 }
9827
9828                 if (intel_is_dual_link_lvds(dev_priv)) {
9829                         if (refclk == 100000)
9830                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9831                         else
9832                                 limit = &intel_limits_ironlake_dual_lvds;
9833                 } else {
9834                         if (refclk == 100000)
9835                                 limit = &intel_limits_ironlake_single_lvds_100m;
9836                         else
9837                                 limit = &intel_limits_ironlake_single_lvds;
9838                 }
9839         } else {
9840                 limit = &intel_limits_ironlake_dac;
9841         }
9842
9843         if (!crtc_state->clock_set &&
9844             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9845                                 refclk, NULL, &crtc_state->dpll)) {
9846                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9847                 return -EINVAL;
9848         }
9849
9850         ironlake_compute_dpll(crtc, crtc_state, NULL);
9851
9852         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9853                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9854                               pipe_name(crtc->pipe));
9855                 return -EINVAL;
9856         }
9857
9858         return 0;
9859 }
9860
9861 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9862                                          struct intel_link_m_n *m_n)
9863 {
9864         struct drm_device *dev = crtc->base.dev;
9865         struct drm_i915_private *dev_priv = to_i915(dev);
9866         enum pipe pipe = crtc->pipe;
9867
9868         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9869         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9870         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9871                 & ~TU_SIZE_MASK;
9872         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9873         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9874                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9875 }
9876
9877 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9878                                          enum transcoder transcoder,
9879                                          struct intel_link_m_n *m_n,
9880                                          struct intel_link_m_n *m2_n2)
9881 {
9882         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9883         enum pipe pipe = crtc->pipe;
9884
9885         if (INTEL_GEN(dev_priv) >= 5) {
9886                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9887                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9888                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9889                         & ~TU_SIZE_MASK;
9890                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9891                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9892                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9893
9894                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9895                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9896                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9897                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9898                                         & ~TU_SIZE_MASK;
9899                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9900                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9901                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9902                 }
9903         } else {
9904                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9905                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9906                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9907                         & ~TU_SIZE_MASK;
9908                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9909                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9910                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9911         }
9912 }
9913
9914 void intel_dp_get_m_n(struct intel_crtc *crtc,
9915                       struct intel_crtc_state *pipe_config)
9916 {
9917         if (pipe_config->has_pch_encoder)
9918                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9919         else
9920                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9921                                              &pipe_config->dp_m_n,
9922                                              &pipe_config->dp_m2_n2);
9923 }
9924
9925 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9926                                         struct intel_crtc_state *pipe_config)
9927 {
9928         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9929                                      &pipe_config->fdi_m_n, NULL);
9930 }
9931
9932 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9933                                     struct intel_crtc_state *pipe_config)
9934 {
9935         struct drm_device *dev = crtc->base.dev;
9936         struct drm_i915_private *dev_priv = to_i915(dev);
9937         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9938         u32 ps_ctrl = 0;
9939         int id = -1;
9940         int i;
9941
9942         /* find scaler attached to this pipe */
9943         for (i = 0; i < crtc->num_scalers; i++) {
9944                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9945                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9946                         id = i;
9947                         pipe_config->pch_pfit.enabled = true;
9948                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9949                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9950                         scaler_state->scalers[i].in_use = true;
9951                         break;
9952                 }
9953         }
9954
9955         scaler_state->scaler_id = id;
9956         if (id >= 0) {
9957                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9958         } else {
9959                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9960         }
9961 }
9962
9963 static void
9964 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9965                                  struct intel_initial_plane_config *plane_config)
9966 {
9967         struct drm_device *dev = crtc->base.dev;
9968         struct drm_i915_private *dev_priv = to_i915(dev);
9969         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9970         enum plane_id plane_id = plane->id;
9971         enum pipe pipe;
9972         u32 val, base, offset, stride_mult, tiling, alpha;
9973         int fourcc, pixel_format;
9974         unsigned int aligned_height;
9975         struct drm_framebuffer *fb;
9976         struct intel_framebuffer *intel_fb;
9977
9978         if (!plane->get_hw_state(plane, &pipe))
9979                 return;
9980
9981         WARN_ON(pipe != crtc->pipe);
9982
9983         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9984         if (!intel_fb) {
9985                 DRM_DEBUG_KMS("failed to alloc fb\n");
9986                 return;
9987         }
9988
9989         fb = &intel_fb->base;
9990
9991         fb->dev = dev;
9992
9993         val = I915_READ(PLANE_CTL(pipe, plane_id));
9994
9995         if (INTEL_GEN(dev_priv) >= 11)
9996                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9997         else
9998                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9999
10000         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10001                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
10002                 alpha &= PLANE_COLOR_ALPHA_MASK;
10003         } else {
10004                 alpha = val & PLANE_CTL_ALPHA_MASK;
10005         }
10006
10007         fourcc = skl_format_to_fourcc(pixel_format,
10008                                       val & PLANE_CTL_ORDER_RGBX, alpha);
10009         fb->format = drm_format_info(fourcc);
10010
10011         tiling = val & PLANE_CTL_TILED_MASK;
10012         switch (tiling) {
10013         case PLANE_CTL_TILED_LINEAR:
10014                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10015                 break;
10016         case PLANE_CTL_TILED_X:
10017                 plane_config->tiling = I915_TILING_X;
10018                 fb->modifier = I915_FORMAT_MOD_X_TILED;
10019                 break;
10020         case PLANE_CTL_TILED_Y:
10021                 plane_config->tiling = I915_TILING_Y;
10022                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10023                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
10024                 else
10025                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
10026                 break;
10027         case PLANE_CTL_TILED_YF:
10028                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10029                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10030                 else
10031                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10032                 break;
10033         default:
10034                 MISSING_CASE(tiling);
10035                 goto error;
10036         }
10037
10038         /*
10039          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10040          * while i915 HW rotation is clockwise, thats why this swapping.
10041          */
10042         switch (val & PLANE_CTL_ROTATE_MASK) {
10043         case PLANE_CTL_ROTATE_0:
10044                 plane_config->rotation = DRM_MODE_ROTATE_0;
10045                 break;
10046         case PLANE_CTL_ROTATE_90:
10047                 plane_config->rotation = DRM_MODE_ROTATE_270;
10048                 break;
10049         case PLANE_CTL_ROTATE_180:
10050                 plane_config->rotation = DRM_MODE_ROTATE_180;
10051                 break;
10052         case PLANE_CTL_ROTATE_270:
10053                 plane_config->rotation = DRM_MODE_ROTATE_90;
10054                 break;
10055         }
10056
10057         if (INTEL_GEN(dev_priv) >= 10 &&
10058             val & PLANE_CTL_FLIP_HORIZONTAL)
10059                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10060
10061         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10062         plane_config->base = base;
10063
10064         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
10065
10066         val = I915_READ(PLANE_SIZE(pipe, plane_id));
10067         fb->height = ((val >> 16) & 0xffff) + 1;
10068         fb->width = ((val >> 0) & 0xffff) + 1;
10069
10070         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
10071         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10072         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10073
10074         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10075
10076         plane_config->size = fb->pitches[0] * aligned_height;
10077
10078         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10079                       crtc->base.name, plane->base.name, fb->width, fb->height,
10080                       fb->format->cpp[0] * 8, base, fb->pitches[0],
10081                       plane_config->size);
10082
10083         plane_config->fb = intel_fb;
10084         return;
10085
10086 error:
10087         kfree(intel_fb);
10088 }
10089
10090 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
10091                                      struct intel_crtc_state *pipe_config)
10092 {
10093         struct drm_device *dev = crtc->base.dev;
10094         struct drm_i915_private *dev_priv = to_i915(dev);
10095         u32 tmp;
10096
10097         tmp = I915_READ(PF_CTL(crtc->pipe));
10098
10099         if (tmp & PF_ENABLE) {
10100                 pipe_config->pch_pfit.enabled = true;
10101                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
10102                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
10103
10104                 /* We currently do not free assignements of panel fitters on
10105                  * ivb/hsw (since we don't use the higher upscaling modes which
10106                  * differentiates them) so just WARN about this case for now. */
10107                 if (IS_GEN(dev_priv, 7)) {
10108                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10109                                 PF_PIPE_SEL_IVB(crtc->pipe));
10110                 }
10111         }
10112 }
10113
10114 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
10115                                      struct intel_crtc_state *pipe_config)
10116 {
10117         struct drm_device *dev = crtc->base.dev;
10118         struct drm_i915_private *dev_priv = to_i915(dev);
10119         enum intel_display_power_domain power_domain;
10120         intel_wakeref_t wakeref;
10121         u32 tmp;
10122         bool ret;
10123
10124         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10125         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10126         if (!wakeref)
10127                 return false;
10128
10129         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10130         pipe_config->shared_dpll = NULL;
10131         pipe_config->master_transcoder = INVALID_TRANSCODER;
10132
10133         ret = false;
10134         tmp = I915_READ(PIPECONF(crtc->pipe));
10135         if (!(tmp & PIPECONF_ENABLE))
10136                 goto out;
10137
10138         switch (tmp & PIPECONF_BPC_MASK) {
10139         case PIPECONF_6BPC:
10140                 pipe_config->pipe_bpp = 18;
10141                 break;
10142         case PIPECONF_8BPC:
10143                 pipe_config->pipe_bpp = 24;
10144                 break;
10145         case PIPECONF_10BPC:
10146                 pipe_config->pipe_bpp = 30;
10147                 break;
10148         case PIPECONF_12BPC:
10149                 pipe_config->pipe_bpp = 36;
10150                 break;
10151         default:
10152                 break;
10153         }
10154
10155         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10156                 pipe_config->limited_color_range = true;
10157
10158         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10159         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10160         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10161                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10162                 break;
10163         default:
10164                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10165                 break;
10166         }
10167
10168         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10169                 PIPECONF_GAMMA_MODE_SHIFT;
10170
10171         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10172
10173         i9xx_get_pipe_color_config(pipe_config);
10174         intel_color_get_config(pipe_config);
10175
10176         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10177                 struct intel_shared_dpll *pll;
10178                 enum intel_dpll_id pll_id;
10179
10180                 pipe_config->has_pch_encoder = true;
10181
10182                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10183                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10184                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10185
10186                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10187
10188                 if (HAS_PCH_IBX(dev_priv)) {
10189                         /*
10190                          * The pipe->pch transcoder and pch transcoder->pll
10191                          * mapping is fixed.
10192                          */
10193                         pll_id = (enum intel_dpll_id) crtc->pipe;
10194                 } else {
10195                         tmp = I915_READ(PCH_DPLL_SEL);
10196                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10197                                 pll_id = DPLL_ID_PCH_PLL_B;
10198                         else
10199                                 pll_id= DPLL_ID_PCH_PLL_A;
10200                 }
10201
10202                 pipe_config->shared_dpll =
10203                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10204                 pll = pipe_config->shared_dpll;
10205
10206                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10207                                                 &pipe_config->dpll_hw_state));
10208
10209                 tmp = pipe_config->dpll_hw_state.dpll;
10210                 pipe_config->pixel_multiplier =
10211                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10212                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10213
10214                 ironlake_pch_clock_get(crtc, pipe_config);
10215         } else {
10216                 pipe_config->pixel_multiplier = 1;
10217         }
10218
10219         intel_get_pipe_timings(crtc, pipe_config);
10220         intel_get_pipe_src_size(crtc, pipe_config);
10221
10222         ironlake_get_pfit_config(crtc, pipe_config);
10223
10224         ret = true;
10225
10226 out:
10227         intel_display_power_put(dev_priv, power_domain, wakeref);
10228
10229         return ret;
10230 }
10231 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10232                                       struct intel_crtc_state *crtc_state)
10233 {
10234         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10235         struct intel_atomic_state *state =
10236                 to_intel_atomic_state(crtc_state->uapi.state);
10237
10238         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10239             INTEL_GEN(dev_priv) >= 11) {
10240                 struct intel_encoder *encoder =
10241                         intel_get_crtc_new_encoder(state, crtc_state);
10242
10243                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10244                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10245                                       pipe_name(crtc->pipe));
10246                         return -EINVAL;
10247                 }
10248         }
10249
10250         return 0;
10251 }
10252
10253 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10254                                    enum port port,
10255                                    struct intel_crtc_state *pipe_config)
10256 {
10257         enum intel_dpll_id id;
10258         u32 temp;
10259
10260         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10261         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10262
10263         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10264                 return;
10265
10266         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10267 }
10268
10269 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10270                                 enum port port,
10271                                 struct intel_crtc_state *pipe_config)
10272 {
10273         enum phy phy = intel_port_to_phy(dev_priv, port);
10274         enum icl_port_dpll_id port_dpll_id;
10275         enum intel_dpll_id id;
10276         u32 temp;
10277
10278         if (intel_phy_is_combo(dev_priv, phy)) {
10279                 temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10280                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10281                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10282                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10283         } else if (intel_phy_is_tc(dev_priv, phy)) {
10284                 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10285
10286                 if (clk_sel == DDI_CLK_SEL_MG) {
10287                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10288                                                                     port));
10289                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10290                 } else {
10291                         WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10292                         id = DPLL_ID_ICL_TBTPLL;
10293                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10294                 }
10295         } else {
10296                 WARN(1, "Invalid port %x\n", port);
10297                 return;
10298         }
10299
10300         pipe_config->icl_port_dplls[port_dpll_id].pll =
10301                 intel_get_shared_dpll_by_id(dev_priv, id);
10302
10303         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10304 }
10305
10306 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10307                                 enum port port,
10308                                 struct intel_crtc_state *pipe_config)
10309 {
10310         enum intel_dpll_id id;
10311
10312         switch (port) {
10313         case PORT_A:
10314                 id = DPLL_ID_SKL_DPLL0;
10315                 break;
10316         case PORT_B:
10317                 id = DPLL_ID_SKL_DPLL1;
10318                 break;
10319         case PORT_C:
10320                 id = DPLL_ID_SKL_DPLL2;
10321                 break;
10322         default:
10323                 DRM_ERROR("Incorrect port type\n");
10324                 return;
10325         }
10326
10327         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10328 }
10329
10330 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10331                                 enum port port,
10332                                 struct intel_crtc_state *pipe_config)
10333 {
10334         enum intel_dpll_id id;
10335         u32 temp;
10336
10337         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10338         id = temp >> (port * 3 + 1);
10339
10340         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10341                 return;
10342
10343         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10344 }
10345
10346 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10347                                 enum port port,
10348                                 struct intel_crtc_state *pipe_config)
10349 {
10350         enum intel_dpll_id id;
10351         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10352
10353         switch (ddi_pll_sel) {
10354         case PORT_CLK_SEL_WRPLL1:
10355                 id = DPLL_ID_WRPLL1;
10356                 break;
10357         case PORT_CLK_SEL_WRPLL2:
10358                 id = DPLL_ID_WRPLL2;
10359                 break;
10360         case PORT_CLK_SEL_SPLL:
10361                 id = DPLL_ID_SPLL;
10362                 break;
10363         case PORT_CLK_SEL_LCPLL_810:
10364                 id = DPLL_ID_LCPLL_810;
10365                 break;
10366         case PORT_CLK_SEL_LCPLL_1350:
10367                 id = DPLL_ID_LCPLL_1350;
10368                 break;
10369         case PORT_CLK_SEL_LCPLL_2700:
10370                 id = DPLL_ID_LCPLL_2700;
10371                 break;
10372         default:
10373                 MISSING_CASE(ddi_pll_sel);
10374                 /* fall through */
10375         case PORT_CLK_SEL_NONE:
10376                 return;
10377         }
10378
10379         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10380 }
10381
10382 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10383                                      struct intel_crtc_state *pipe_config,
10384                                      u64 *power_domain_mask,
10385                                      intel_wakeref_t *wakerefs)
10386 {
10387         struct drm_device *dev = crtc->base.dev;
10388         struct drm_i915_private *dev_priv = to_i915(dev);
10389         enum intel_display_power_domain power_domain;
10390         unsigned long panel_transcoder_mask = 0;
10391         unsigned long enabled_panel_transcoders = 0;
10392         enum transcoder panel_transcoder;
10393         intel_wakeref_t wf;
10394         u32 tmp;
10395
10396         if (INTEL_GEN(dev_priv) >= 11)
10397                 panel_transcoder_mask |=
10398                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10399
10400         if (HAS_TRANSCODER_EDP(dev_priv))
10401                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10402
10403         /*
10404          * The pipe->transcoder mapping is fixed with the exception of the eDP
10405          * and DSI transcoders handled below.
10406          */
10407         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10408
10409         /*
10410          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10411          * consistency and less surprising code; it's in always on power).
10412          */
10413         for_each_set_bit(panel_transcoder,
10414                          &panel_transcoder_mask,
10415                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10416                 bool force_thru = false;
10417                 enum pipe trans_pipe;
10418
10419                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10420                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10421                         continue;
10422
10423                 /*
10424                  * Log all enabled ones, only use the first one.
10425                  *
10426                  * FIXME: This won't work for two separate DSI displays.
10427                  */
10428                 enabled_panel_transcoders |= BIT(panel_transcoder);
10429                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10430                         continue;
10431
10432                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10433                 default:
10434                         WARN(1, "unknown pipe linked to transcoder %s\n",
10435                              transcoder_name(panel_transcoder));
10436                         /* fall through */
10437                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10438                         force_thru = true;
10439                         /* fall through */
10440                 case TRANS_DDI_EDP_INPUT_A_ON:
10441                         trans_pipe = PIPE_A;
10442                         break;
10443                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10444                         trans_pipe = PIPE_B;
10445                         break;
10446                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10447                         trans_pipe = PIPE_C;
10448                         break;
10449                 }
10450
10451                 if (trans_pipe == crtc->pipe) {
10452                         pipe_config->cpu_transcoder = panel_transcoder;
10453                         pipe_config->pch_pfit.force_thru = force_thru;
10454                 }
10455         }
10456
10457         /*
10458          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10459          */
10460         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10461                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10462
10463         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10464         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10465
10466         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10467         if (!wf)
10468                 return false;
10469
10470         wakerefs[power_domain] = wf;
10471         *power_domain_mask |= BIT_ULL(power_domain);
10472
10473         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10474
10475         return tmp & PIPECONF_ENABLE;
10476 }
10477
10478 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10479                                          struct intel_crtc_state *pipe_config,
10480                                          u64 *power_domain_mask,
10481                                          intel_wakeref_t *wakerefs)
10482 {
10483         struct drm_device *dev = crtc->base.dev;
10484         struct drm_i915_private *dev_priv = to_i915(dev);
10485         enum intel_display_power_domain power_domain;
10486         enum transcoder cpu_transcoder;
10487         intel_wakeref_t wf;
10488         enum port port;
10489         u32 tmp;
10490
10491         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10492                 if (port == PORT_A)
10493                         cpu_transcoder = TRANSCODER_DSI_A;
10494                 else
10495                         cpu_transcoder = TRANSCODER_DSI_C;
10496
10497                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10498                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10499
10500                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10501                 if (!wf)
10502                         continue;
10503
10504                 wakerefs[power_domain] = wf;
10505                 *power_domain_mask |= BIT_ULL(power_domain);
10506
10507                 /*
10508                  * The PLL needs to be enabled with a valid divider
10509                  * configuration, otherwise accessing DSI registers will hang
10510                  * the machine. See BSpec North Display Engine
10511                  * registers/MIPI[BXT]. We can break out here early, since we
10512                  * need the same DSI PLL to be enabled for both DSI ports.
10513                  */
10514                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10515                         break;
10516
10517                 /* XXX: this works for video mode only */
10518                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10519                 if (!(tmp & DPI_ENABLE))
10520                         continue;
10521
10522                 tmp = I915_READ(MIPI_CTRL(port));
10523                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10524                         continue;
10525
10526                 pipe_config->cpu_transcoder = cpu_transcoder;
10527                 break;
10528         }
10529
10530         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10531 }
10532
10533 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10534                                        struct intel_crtc_state *pipe_config)
10535 {
10536         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10537         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
10538         struct intel_shared_dpll *pll;
10539         enum port port;
10540         u32 tmp;
10541
10542         if (transcoder_is_dsi(cpu_transcoder)) {
10543                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
10544                                                 PORT_A : PORT_B;
10545         } else {
10546                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
10547                 if (INTEL_GEN(dev_priv) >= 12)
10548                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10549                 else
10550                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10551         }
10552
10553         if (INTEL_GEN(dev_priv) >= 11)
10554                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10555         else if (IS_CANNONLAKE(dev_priv))
10556                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10557         else if (IS_GEN9_BC(dev_priv))
10558                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10559         else if (IS_GEN9_LP(dev_priv))
10560                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10561         else
10562                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10563
10564         pll = pipe_config->shared_dpll;
10565         if (pll) {
10566                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10567                                                 &pipe_config->dpll_hw_state));
10568         }
10569
10570         /*
10571          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10572          * DDI E. So just check whether this pipe is wired to DDI E and whether
10573          * the PCH transcoder is on.
10574          */
10575         if (INTEL_GEN(dev_priv) < 9 &&
10576             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10577                 pipe_config->has_pch_encoder = true;
10578
10579                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10580                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10581                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10582
10583                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10584         }
10585 }
10586
10587 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
10588                                                  enum transcoder cpu_transcoder)
10589 {
10590         u32 trans_port_sync, master_select;
10591
10592         trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
10593
10594         if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
10595                 return INVALID_TRANSCODER;
10596
10597         master_select = trans_port_sync &
10598                         PORT_SYNC_MODE_MASTER_SELECT_MASK;
10599         if (master_select == 0)
10600                 return TRANSCODER_EDP;
10601         else
10602                 return master_select - 1;
10603 }
10604
10605 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
10606 {
10607         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10608         u32 transcoders;
10609         enum transcoder cpu_transcoder;
10610
10611         crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
10612                                                                   crtc_state->cpu_transcoder);
10613
10614         transcoders = BIT(TRANSCODER_A) |
10615                 BIT(TRANSCODER_B) |
10616                 BIT(TRANSCODER_C) |
10617                 BIT(TRANSCODER_D);
10618         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
10619                 enum intel_display_power_domain power_domain;
10620                 intel_wakeref_t trans_wakeref;
10621
10622                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10623                 trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
10624                                                                    power_domain);
10625
10626                 if (!trans_wakeref)
10627                         continue;
10628
10629                 if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
10630                     crtc_state->cpu_transcoder)
10631                         crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
10632
10633                 intel_display_power_put(dev_priv, power_domain, trans_wakeref);
10634         }
10635
10636         WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
10637                 crtc_state->sync_mode_slaves_mask);
10638 }
10639
10640 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10641                                     struct intel_crtc_state *pipe_config)
10642 {
10643         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10644         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10645         enum intel_display_power_domain power_domain;
10646         u64 power_domain_mask;
10647         bool active;
10648
10649         intel_crtc_init_scalers(crtc, pipe_config);
10650
10651         pipe_config->master_transcoder = INVALID_TRANSCODER;
10652
10653         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10654         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10655         if (!wf)
10656                 return false;
10657
10658         wakerefs[power_domain] = wf;
10659         power_domain_mask = BIT_ULL(power_domain);
10660
10661         pipe_config->shared_dpll = NULL;
10662
10663         active = hsw_get_transcoder_state(crtc, pipe_config,
10664                                           &power_domain_mask, wakerefs);
10665
10666         if (IS_GEN9_LP(dev_priv) &&
10667             bxt_get_dsi_transcoder_state(crtc, pipe_config,
10668                                          &power_domain_mask, wakerefs)) {
10669                 WARN_ON(active);
10670                 active = true;
10671         }
10672
10673         if (!active)
10674                 goto out;
10675
10676         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10677             INTEL_GEN(dev_priv) >= 11) {
10678                 haswell_get_ddi_port_state(crtc, pipe_config);
10679                 intel_get_pipe_timings(crtc, pipe_config);
10680         }
10681
10682         intel_get_pipe_src_size(crtc, pipe_config);
10683
10684         if (IS_HASWELL(dev_priv)) {
10685                 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10686
10687                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10688                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10689                 else
10690                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10691         } else {
10692                 pipe_config->output_format =
10693                         bdw_get_pipemisc_output_format(crtc);
10694
10695                 /*
10696                  * Currently there is no interface defined to
10697                  * check user preference between RGB/YCBCR444
10698                  * or YCBCR420. So the only possible case for
10699                  * YCBCR444 usage is driving YCBCR420 output
10700                  * with LSPCON, when pipe is configured for
10701                  * YCBCR444 output and LSPCON takes care of
10702                  * downsampling it.
10703                  */
10704                 pipe_config->lspcon_downsampling =
10705                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
10706         }
10707
10708         pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10709
10710         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10711
10712         if (INTEL_GEN(dev_priv) >= 9) {
10713                 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10714
10715                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10716                         pipe_config->gamma_enable = true;
10717
10718                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10719                         pipe_config->csc_enable = true;
10720         } else {
10721                 i9xx_get_pipe_color_config(pipe_config);
10722         }
10723
10724         intel_color_get_config(pipe_config);
10725
10726         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10727         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10728
10729         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10730         if (wf) {
10731                 wakerefs[power_domain] = wf;
10732                 power_domain_mask |= BIT_ULL(power_domain);
10733
10734                 if (INTEL_GEN(dev_priv) >= 9)
10735                         skylake_get_pfit_config(crtc, pipe_config);
10736                 else
10737                         ironlake_get_pfit_config(crtc, pipe_config);
10738         }
10739
10740         if (hsw_crtc_supports_ips(crtc)) {
10741                 if (IS_HASWELL(dev_priv))
10742                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10743                 else {
10744                         /*
10745                          * We cannot readout IPS state on broadwell, set to
10746                          * true so we can set it to a defined state on first
10747                          * commit.
10748                          */
10749                         pipe_config->ips_enabled = true;
10750                 }
10751         }
10752
10753         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10754             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10755                 pipe_config->pixel_multiplier =
10756                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10757         } else {
10758                 pipe_config->pixel_multiplier = 1;
10759         }
10760
10761         if (INTEL_GEN(dev_priv) >= 11 &&
10762             !transcoder_is_dsi(pipe_config->cpu_transcoder))
10763                 icelake_get_trans_port_sync_config(pipe_config);
10764
10765 out:
10766         for_each_power_domain(power_domain, power_domain_mask)
10767                 intel_display_power_put(dev_priv,
10768                                         power_domain, wakerefs[power_domain]);
10769
10770         return active;
10771 }
10772
10773 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10774 {
10775         struct drm_i915_private *dev_priv =
10776                 to_i915(plane_state->uapi.plane->dev);
10777         const struct drm_framebuffer *fb = plane_state->hw.fb;
10778         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10779         u32 base;
10780
10781         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10782                 base = obj->phys_handle->busaddr;
10783         else
10784                 base = intel_plane_ggtt_offset(plane_state);
10785
10786         return base + plane_state->color_plane[0].offset;
10787 }
10788
10789 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10790 {
10791         int x = plane_state->uapi.dst.x1;
10792         int y = plane_state->uapi.dst.y1;
10793         u32 pos = 0;
10794
10795         if (x < 0) {
10796                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10797                 x = -x;
10798         }
10799         pos |= x << CURSOR_X_SHIFT;
10800
10801         if (y < 0) {
10802                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10803                 y = -y;
10804         }
10805         pos |= y << CURSOR_Y_SHIFT;
10806
10807         return pos;
10808 }
10809
10810 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10811 {
10812         const struct drm_mode_config *config =
10813                 &plane_state->uapi.plane->dev->mode_config;
10814         int width = drm_rect_width(&plane_state->uapi.dst);
10815         int height = drm_rect_height(&plane_state->uapi.dst);
10816
10817         return width > 0 && width <= config->cursor_width &&
10818                 height > 0 && height <= config->cursor_height;
10819 }
10820
10821 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10822 {
10823         struct drm_i915_private *dev_priv =
10824                 to_i915(plane_state->uapi.plane->dev);
10825         unsigned int rotation = plane_state->hw.rotation;
10826         int src_x, src_y;
10827         u32 offset;
10828         int ret;
10829
10830         ret = intel_plane_compute_gtt(plane_state);
10831         if (ret)
10832                 return ret;
10833
10834         if (!plane_state->uapi.visible)
10835                 return 0;
10836
10837         src_x = plane_state->uapi.src.x1 >> 16;
10838         src_y = plane_state->uapi.src.y1 >> 16;
10839
10840         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10841         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10842                                                     plane_state, 0);
10843
10844         if (src_x != 0 || src_y != 0) {
10845                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10846                 return -EINVAL;
10847         }
10848
10849         /*
10850          * Put the final coordinates back so that the src
10851          * coordinate checks will see the right values.
10852          */
10853         drm_rect_translate_to(&plane_state->uapi.src,
10854                               src_x << 16, src_y << 16);
10855
10856         /* ILK+ do this automagically in hardware */
10857         if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
10858                 const struct drm_framebuffer *fb = plane_state->hw.fb;
10859                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
10860                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
10861
10862                 offset += (src_h * src_w - 1) * fb->format->cpp[0];
10863         }
10864
10865         plane_state->color_plane[0].offset = offset;
10866         plane_state->color_plane[0].x = src_x;
10867         plane_state->color_plane[0].y = src_y;
10868
10869         return 0;
10870 }
10871
10872 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10873                               struct intel_plane_state *plane_state)
10874 {
10875         const struct drm_framebuffer *fb = plane_state->hw.fb;
10876         int ret;
10877
10878         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10879                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10880                 return -EINVAL;
10881         }
10882
10883         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
10884                                                   &crtc_state->uapi,
10885                                                   DRM_PLANE_HELPER_NO_SCALING,
10886                                                   DRM_PLANE_HELPER_NO_SCALING,
10887                                                   true, true);
10888         if (ret)
10889                 return ret;
10890
10891         /* Use the unclipped src/dst rectangles, which we program to hw */
10892         plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
10893         plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
10894
10895         ret = intel_cursor_check_surface(plane_state);
10896         if (ret)
10897                 return ret;
10898
10899         if (!plane_state->uapi.visible)
10900                 return 0;
10901
10902         ret = intel_plane_check_src_coordinates(plane_state);
10903         if (ret)
10904                 return ret;
10905
10906         return 0;
10907 }
10908
10909 static unsigned int
10910 i845_cursor_max_stride(struct intel_plane *plane,
10911                        u32 pixel_format, u64 modifier,
10912                        unsigned int rotation)
10913 {
10914         return 2048;
10915 }
10916
10917 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10918 {
10919         u32 cntl = 0;
10920
10921         if (crtc_state->gamma_enable)
10922                 cntl |= CURSOR_GAMMA_ENABLE;
10923
10924         return cntl;
10925 }
10926
10927 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10928                            const struct intel_plane_state *plane_state)
10929 {
10930         return CURSOR_ENABLE |
10931                 CURSOR_FORMAT_ARGB |
10932                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10933 }
10934
10935 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10936 {
10937         int width = drm_rect_width(&plane_state->uapi.dst);
10938
10939         /*
10940          * 845g/865g are only limited by the width of their cursors,
10941          * the height is arbitrary up to the precision of the register.
10942          */
10943         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10944 }
10945
10946 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10947                              struct intel_plane_state *plane_state)
10948 {
10949         const struct drm_framebuffer *fb = plane_state->hw.fb;
10950         int ret;
10951
10952         ret = intel_check_cursor(crtc_state, plane_state);
10953         if (ret)
10954                 return ret;
10955
10956         /* if we want to turn off the cursor ignore width and height */
10957         if (!fb)
10958                 return 0;
10959
10960         /* Check for which cursor types we support */
10961         if (!i845_cursor_size_ok(plane_state)) {
10962                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10963                           drm_rect_width(&plane_state->uapi.dst),
10964                           drm_rect_height(&plane_state->uapi.dst));
10965                 return -EINVAL;
10966         }
10967
10968         WARN_ON(plane_state->uapi.visible &&
10969                 plane_state->color_plane[0].stride != fb->pitches[0]);
10970
10971         switch (fb->pitches[0]) {
10972         case 256:
10973         case 512:
10974         case 1024:
10975         case 2048:
10976                 break;
10977         default:
10978                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10979                               fb->pitches[0]);
10980                 return -EINVAL;
10981         }
10982
10983         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10984
10985         return 0;
10986 }
10987
10988 static void i845_update_cursor(struct intel_plane *plane,
10989                                const struct intel_crtc_state *crtc_state,
10990                                const struct intel_plane_state *plane_state)
10991 {
10992         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10993         u32 cntl = 0, base = 0, pos = 0, size = 0;
10994         unsigned long irqflags;
10995
10996         if (plane_state && plane_state->uapi.visible) {
10997                 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
10998                 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
10999
11000                 cntl = plane_state->ctl |
11001                         i845_cursor_ctl_crtc(crtc_state);
11002
11003                 size = (height << 12) | width;
11004
11005                 base = intel_cursor_base(plane_state);
11006                 pos = intel_cursor_position(plane_state);
11007         }
11008
11009         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11010
11011         /* On these chipsets we can only modify the base/size/stride
11012          * whilst the cursor is disabled.
11013          */
11014         if (plane->cursor.base != base ||
11015             plane->cursor.size != size ||
11016             plane->cursor.cntl != cntl) {
11017                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
11018                 I915_WRITE_FW(CURBASE(PIPE_A), base);
11019                 I915_WRITE_FW(CURSIZE, size);
11020                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11021                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
11022
11023                 plane->cursor.base = base;
11024                 plane->cursor.size = size;
11025                 plane->cursor.cntl = cntl;
11026         } else {
11027                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11028         }
11029
11030         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11031 }
11032
11033 static void i845_disable_cursor(struct intel_plane *plane,
11034                                 const struct intel_crtc_state *crtc_state)
11035 {
11036         i845_update_cursor(plane, crtc_state, NULL);
11037 }
11038
11039 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11040                                      enum pipe *pipe)
11041 {
11042         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11043         enum intel_display_power_domain power_domain;
11044         intel_wakeref_t wakeref;
11045         bool ret;
11046
11047         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11048         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11049         if (!wakeref)
11050                 return false;
11051
11052         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11053
11054         *pipe = PIPE_A;
11055
11056         intel_display_power_put(dev_priv, power_domain, wakeref);
11057
11058         return ret;
11059 }
11060
11061 static unsigned int
11062 i9xx_cursor_max_stride(struct intel_plane *plane,
11063                        u32 pixel_format, u64 modifier,
11064                        unsigned int rotation)
11065 {
11066         return plane->base.dev->mode_config.cursor_width * 4;
11067 }
11068
11069 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11070 {
11071         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11072         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11073         u32 cntl = 0;
11074
11075         if (INTEL_GEN(dev_priv) >= 11)
11076                 return cntl;
11077
11078         if (crtc_state->gamma_enable)
11079                 cntl = MCURSOR_GAMMA_ENABLE;
11080
11081         if (crtc_state->csc_enable)
11082                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11083
11084         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11085                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11086
11087         return cntl;
11088 }
11089
11090 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11091                            const struct intel_plane_state *plane_state)
11092 {
11093         struct drm_i915_private *dev_priv =
11094                 to_i915(plane_state->uapi.plane->dev);
11095         u32 cntl = 0;
11096
11097         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11098                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11099
11100         switch (drm_rect_width(&plane_state->uapi.dst)) {
11101         case 64:
11102                 cntl |= MCURSOR_MODE_64_ARGB_AX;
11103                 break;
11104         case 128:
11105                 cntl |= MCURSOR_MODE_128_ARGB_AX;
11106                 break;
11107         case 256:
11108                 cntl |= MCURSOR_MODE_256_ARGB_AX;
11109                 break;
11110         default:
11111                 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11112                 return 0;
11113         }
11114
11115         if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11116                 cntl |= MCURSOR_ROTATE_180;
11117
11118         return cntl;
11119 }
11120
11121 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11122 {
11123         struct drm_i915_private *dev_priv =
11124                 to_i915(plane_state->uapi.plane->dev);
11125         int width = drm_rect_width(&plane_state->uapi.dst);
11126         int height = drm_rect_height(&plane_state->uapi.dst);
11127
11128         if (!intel_cursor_size_ok(plane_state))
11129                 return false;
11130
11131         /* Cursor width is limited to a few power-of-two sizes */
11132         switch (width) {
11133         case 256:
11134         case 128:
11135         case 64:
11136                 break;
11137         default:
11138                 return false;
11139         }
11140
11141         /*
11142          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11143          * height from 8 lines up to the cursor width, when the
11144          * cursor is not rotated. Everything else requires square
11145          * cursors.
11146          */
11147         if (HAS_CUR_FBC(dev_priv) &&
11148             plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11149                 if (height < 8 || height > width)
11150                         return false;
11151         } else {
11152                 if (height != width)
11153                         return false;
11154         }
11155
11156         return true;
11157 }
11158
11159 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11160                              struct intel_plane_state *plane_state)
11161 {
11162         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11163         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11164         const struct drm_framebuffer *fb = plane_state->hw.fb;
11165         enum pipe pipe = plane->pipe;
11166         int ret;
11167
11168         ret = intel_check_cursor(crtc_state, plane_state);
11169         if (ret)
11170                 return ret;
11171
11172         /* if we want to turn off the cursor ignore width and height */
11173         if (!fb)
11174                 return 0;
11175
11176         /* Check for which cursor types we support */
11177         if (!i9xx_cursor_size_ok(plane_state)) {
11178                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11179                           drm_rect_width(&plane_state->uapi.dst),
11180                           drm_rect_height(&plane_state->uapi.dst));
11181                 return -EINVAL;
11182         }
11183
11184         WARN_ON(plane_state->uapi.visible &&
11185                 plane_state->color_plane[0].stride != fb->pitches[0]);
11186
11187         if (fb->pitches[0] !=
11188             drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11189                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
11190                               fb->pitches[0],
11191                               drm_rect_width(&plane_state->uapi.dst));
11192                 return -EINVAL;
11193         }
11194
11195         /*
11196          * There's something wrong with the cursor on CHV pipe C.
11197          * If it straddles the left edge of the screen then
11198          * moving it away from the edge or disabling it often
11199          * results in a pipe underrun, and often that can lead to
11200          * dead pipe (constant underrun reported, and it scans
11201          * out just a solid color). To recover from that, the
11202          * display power well must be turned off and on again.
11203          * Refuse the put the cursor into that compromised position.
11204          */
11205         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11206             plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11207                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
11208                 return -EINVAL;
11209         }
11210
11211         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11212
11213         return 0;
11214 }
11215
11216 static void i9xx_update_cursor(struct intel_plane *plane,
11217                                const struct intel_crtc_state *crtc_state,
11218                                const struct intel_plane_state *plane_state)
11219 {
11220         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11221         enum pipe pipe = plane->pipe;
11222         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11223         unsigned long irqflags;
11224
11225         if (plane_state && plane_state->uapi.visible) {
11226                 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11227                 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11228
11229                 cntl = plane_state->ctl |
11230                         i9xx_cursor_ctl_crtc(crtc_state);
11231
11232                 if (width != height)
11233                         fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11234
11235                 base = intel_cursor_base(plane_state);
11236                 pos = intel_cursor_position(plane_state);
11237         }
11238
11239         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11240
11241         /*
11242          * On some platforms writing CURCNTR first will also
11243          * cause CURPOS to be armed by the CURBASE write.
11244          * Without the CURCNTR write the CURPOS write would
11245          * arm itself. Thus we always update CURCNTR before
11246          * CURPOS.
11247          *
11248          * On other platforms CURPOS always requires the
11249          * CURBASE write to arm the update. Additonally
11250          * a write to any of the cursor register will cancel
11251          * an already armed cursor update. Thus leaving out
11252          * the CURBASE write after CURPOS could lead to a
11253          * cursor that doesn't appear to move, or even change
11254          * shape. Thus we always write CURBASE.
11255          *
11256          * The other registers are armed by by the CURBASE write
11257          * except when the plane is getting enabled at which time
11258          * the CURCNTR write arms the update.
11259          */
11260
11261         if (INTEL_GEN(dev_priv) >= 9)
11262                 skl_write_cursor_wm(plane, crtc_state);
11263
11264         if (plane->cursor.base != base ||
11265             plane->cursor.size != fbc_ctl ||
11266             plane->cursor.cntl != cntl) {
11267                 if (HAS_CUR_FBC(dev_priv))
11268                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
11269                 I915_WRITE_FW(CURCNTR(pipe), cntl);
11270                 I915_WRITE_FW(CURPOS(pipe), pos);
11271                 I915_WRITE_FW(CURBASE(pipe), base);
11272
11273                 plane->cursor.base = base;
11274                 plane->cursor.size = fbc_ctl;
11275                 plane->cursor.cntl = cntl;
11276         } else {
11277                 I915_WRITE_FW(CURPOS(pipe), pos);
11278                 I915_WRITE_FW(CURBASE(pipe), base);
11279         }
11280
11281         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11282 }
11283
11284 static void i9xx_disable_cursor(struct intel_plane *plane,
11285                                 const struct intel_crtc_state *crtc_state)
11286 {
11287         i9xx_update_cursor(plane, crtc_state, NULL);
11288 }
11289
11290 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11291                                      enum pipe *pipe)
11292 {
11293         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11294         enum intel_display_power_domain power_domain;
11295         intel_wakeref_t wakeref;
11296         bool ret;
11297         u32 val;
11298
11299         /*
11300          * Not 100% correct for planes that can move between pipes,
11301          * but that's only the case for gen2-3 which don't have any
11302          * display power wells.
11303          */
11304         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11305         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11306         if (!wakeref)
11307                 return false;
11308
11309         val = I915_READ(CURCNTR(plane->pipe));
11310
11311         ret = val & MCURSOR_MODE;
11312
11313         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11314                 *pipe = plane->pipe;
11315         else
11316                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11317                         MCURSOR_PIPE_SELECT_SHIFT;
11318
11319         intel_display_power_put(dev_priv, power_domain, wakeref);
11320
11321         return ret;
11322 }
11323
11324 /* VESA 640x480x72Hz mode to set on the pipe */
11325 static const struct drm_display_mode load_detect_mode = {
11326         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11327                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11328 };
11329
11330 struct drm_framebuffer *
11331 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11332                          struct drm_mode_fb_cmd2 *mode_cmd)
11333 {
11334         struct intel_framebuffer *intel_fb;
11335         int ret;
11336
11337         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11338         if (!intel_fb)
11339                 return ERR_PTR(-ENOMEM);
11340
11341         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11342         if (ret)
11343                 goto err;
11344
11345         return &intel_fb->base;
11346
11347 err:
11348         kfree(intel_fb);
11349         return ERR_PTR(ret);
11350 }
11351
11352 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11353                                         struct drm_crtc *crtc)
11354 {
11355         struct drm_plane *plane;
11356         struct drm_plane_state *plane_state;
11357         int ret, i;
11358
11359         ret = drm_atomic_add_affected_planes(state, crtc);
11360         if (ret)
11361                 return ret;
11362
11363         for_each_new_plane_in_state(state, plane, plane_state, i) {
11364                 if (plane_state->crtc != crtc)
11365                         continue;
11366
11367                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11368                 if (ret)
11369                         return ret;
11370
11371                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11372         }
11373
11374         return 0;
11375 }
11376
11377 int intel_get_load_detect_pipe(struct drm_connector *connector,
11378                                struct intel_load_detect_pipe *old,
11379                                struct drm_modeset_acquire_ctx *ctx)
11380 {
11381         struct intel_crtc *intel_crtc;
11382         struct intel_encoder *intel_encoder =
11383                 intel_attached_encoder(connector);
11384         struct drm_crtc *possible_crtc;
11385         struct drm_encoder *encoder = &intel_encoder->base;
11386         struct drm_crtc *crtc = NULL;
11387         struct drm_device *dev = encoder->dev;
11388         struct drm_i915_private *dev_priv = to_i915(dev);
11389         struct drm_mode_config *config = &dev->mode_config;
11390         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11391         struct drm_connector_state *connector_state;
11392         struct intel_crtc_state *crtc_state;
11393         int ret, i = -1;
11394
11395         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11396                       connector->base.id, connector->name,
11397                       encoder->base.id, encoder->name);
11398
11399         old->restore_state = NULL;
11400
11401         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11402
11403         /*
11404          * Algorithm gets a little messy:
11405          *
11406          *   - if the connector already has an assigned crtc, use it (but make
11407          *     sure it's on first)
11408          *
11409          *   - try to find the first unused crtc that can drive this connector,
11410          *     and use that if we find one
11411          */
11412
11413         /* See if we already have a CRTC for this connector */
11414         if (connector->state->crtc) {
11415                 crtc = connector->state->crtc;
11416
11417                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11418                 if (ret)
11419                         goto fail;
11420
11421                 /* Make sure the crtc and connector are running */
11422                 goto found;
11423         }
11424
11425         /* Find an unused one (if possible) */
11426         for_each_crtc(dev, possible_crtc) {
11427                 i++;
11428                 if (!(encoder->possible_crtcs & (1 << i)))
11429                         continue;
11430
11431                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11432                 if (ret)
11433                         goto fail;
11434
11435                 if (possible_crtc->state->enable) {
11436                         drm_modeset_unlock(&possible_crtc->mutex);
11437                         continue;
11438                 }
11439
11440                 crtc = possible_crtc;
11441                 break;
11442         }
11443
11444         /*
11445          * If we didn't find an unused CRTC, don't use any.
11446          */
11447         if (!crtc) {
11448                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11449                 ret = -ENODEV;
11450                 goto fail;
11451         }
11452
11453 found:
11454         intel_crtc = to_intel_crtc(crtc);
11455
11456         state = drm_atomic_state_alloc(dev);
11457         restore_state = drm_atomic_state_alloc(dev);
11458         if (!state || !restore_state) {
11459                 ret = -ENOMEM;
11460                 goto fail;
11461         }
11462
11463         state->acquire_ctx = ctx;
11464         restore_state->acquire_ctx = ctx;
11465
11466         connector_state = drm_atomic_get_connector_state(state, connector);
11467         if (IS_ERR(connector_state)) {
11468                 ret = PTR_ERR(connector_state);
11469                 goto fail;
11470         }
11471
11472         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11473         if (ret)
11474                 goto fail;
11475
11476         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11477         if (IS_ERR(crtc_state)) {
11478                 ret = PTR_ERR(crtc_state);
11479                 goto fail;
11480         }
11481
11482         crtc_state->uapi.active = true;
11483
11484         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11485                                            &load_detect_mode);
11486         if (ret)
11487                 goto fail;
11488
11489         ret = intel_modeset_disable_planes(state, crtc);
11490         if (ret)
11491                 goto fail;
11492
11493         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11494         if (!ret)
11495                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11496         if (!ret)
11497                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11498         if (ret) {
11499                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11500                 goto fail;
11501         }
11502
11503         ret = drm_atomic_commit(state);
11504         if (ret) {
11505                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11506                 goto fail;
11507         }
11508
11509         old->restore_state = restore_state;
11510         drm_atomic_state_put(state);
11511
11512         /* let the connector get through one full cycle before testing */
11513         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11514         return true;
11515
11516 fail:
11517         if (state) {
11518                 drm_atomic_state_put(state);
11519                 state = NULL;
11520         }
11521         if (restore_state) {
11522                 drm_atomic_state_put(restore_state);
11523                 restore_state = NULL;
11524         }
11525
11526         if (ret == -EDEADLK)
11527                 return ret;
11528
11529         return false;
11530 }
11531
11532 void intel_release_load_detect_pipe(struct drm_connector *connector,
11533                                     struct intel_load_detect_pipe *old,
11534                                     struct drm_modeset_acquire_ctx *ctx)
11535 {
11536         struct intel_encoder *intel_encoder =
11537                 intel_attached_encoder(connector);
11538         struct drm_encoder *encoder = &intel_encoder->base;
11539         struct drm_atomic_state *state = old->restore_state;
11540         int ret;
11541
11542         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11543                       connector->base.id, connector->name,
11544                       encoder->base.id, encoder->name);
11545
11546         if (!state)
11547                 return;
11548
11549         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11550         if (ret)
11551                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11552         drm_atomic_state_put(state);
11553 }
11554
11555 static int i9xx_pll_refclk(struct drm_device *dev,
11556                            const struct intel_crtc_state *pipe_config)
11557 {
11558         struct drm_i915_private *dev_priv = to_i915(dev);
11559         u32 dpll = pipe_config->dpll_hw_state.dpll;
11560
11561         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11562                 return dev_priv->vbt.lvds_ssc_freq;
11563         else if (HAS_PCH_SPLIT(dev_priv))
11564                 return 120000;
11565         else if (!IS_GEN(dev_priv, 2))
11566                 return 96000;
11567         else
11568                 return 48000;
11569 }
11570
11571 /* Returns the clock of the currently programmed mode of the given pipe. */
11572 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11573                                 struct intel_crtc_state *pipe_config)
11574 {
11575         struct drm_device *dev = crtc->base.dev;
11576         struct drm_i915_private *dev_priv = to_i915(dev);
11577         enum pipe pipe = crtc->pipe;
11578         u32 dpll = pipe_config->dpll_hw_state.dpll;
11579         u32 fp;
11580         struct dpll clock;
11581         int port_clock;
11582         int refclk = i9xx_pll_refclk(dev, pipe_config);
11583
11584         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11585                 fp = pipe_config->dpll_hw_state.fp0;
11586         else
11587                 fp = pipe_config->dpll_hw_state.fp1;
11588
11589         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11590         if (IS_PINEVIEW(dev_priv)) {
11591                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11592                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11593         } else {
11594                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11595                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11596         }
11597
11598         if (!IS_GEN(dev_priv, 2)) {
11599                 if (IS_PINEVIEW(dev_priv))
11600                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11601                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11602                 else
11603                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11604                                DPLL_FPA01_P1_POST_DIV_SHIFT);
11605
11606                 switch (dpll & DPLL_MODE_MASK) {
11607                 case DPLLB_MODE_DAC_SERIAL:
11608                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11609                                 5 : 10;
11610                         break;
11611                 case DPLLB_MODE_LVDS:
11612                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11613                                 7 : 14;
11614                         break;
11615                 default:
11616                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11617                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
11618                         return;
11619                 }
11620
11621                 if (IS_PINEVIEW(dev_priv))
11622                         port_clock = pnv_calc_dpll_params(refclk, &clock);
11623                 else
11624                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
11625         } else {
11626                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11627                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11628
11629                 if (is_lvds) {
11630                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11631                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
11632
11633                         if (lvds & LVDS_CLKB_POWER_UP)
11634                                 clock.p2 = 7;
11635                         else
11636                                 clock.p2 = 14;
11637                 } else {
11638                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
11639                                 clock.p1 = 2;
11640                         else {
11641                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11642                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11643                         }
11644                         if (dpll & PLL_P2_DIVIDE_BY_4)
11645                                 clock.p2 = 4;
11646                         else
11647                                 clock.p2 = 2;
11648                 }
11649
11650                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11651         }
11652
11653         /*
11654          * This value includes pixel_multiplier. We will use
11655          * port_clock to compute adjusted_mode.crtc_clock in the
11656          * encoder's get_config() function.
11657          */
11658         pipe_config->port_clock = port_clock;
11659 }
11660
11661 int intel_dotclock_calculate(int link_freq,
11662                              const struct intel_link_m_n *m_n)
11663 {
11664         /*
11665          * The calculation for the data clock is:
11666          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11667          * But we want to avoid losing precison if possible, so:
11668          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11669          *
11670          * and the link clock is simpler:
11671          * link_clock = (m * link_clock) / n
11672          */
11673
11674         if (!m_n->link_n)
11675                 return 0;
11676
11677         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11678 }
11679
11680 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11681                                    struct intel_crtc_state *pipe_config)
11682 {
11683         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11684
11685         /* read out port_clock from the DPLL */
11686         i9xx_crtc_clock_get(crtc, pipe_config);
11687
11688         /*
11689          * In case there is an active pipe without active ports,
11690          * we may need some idea for the dotclock anyway.
11691          * Calculate one based on the FDI configuration.
11692          */
11693         pipe_config->hw.adjusted_mode.crtc_clock =
11694                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11695                                          &pipe_config->fdi_m_n);
11696 }
11697
11698 /* Returns the currently programmed mode of the given encoder. */
11699 struct drm_display_mode *
11700 intel_encoder_current_mode(struct intel_encoder *encoder)
11701 {
11702         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11703         struct intel_crtc_state *crtc_state;
11704         struct drm_display_mode *mode;
11705         struct intel_crtc *crtc;
11706         enum pipe pipe;
11707
11708         if (!encoder->get_hw_state(encoder, &pipe))
11709                 return NULL;
11710
11711         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11712
11713         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11714         if (!mode)
11715                 return NULL;
11716
11717         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11718         if (!crtc_state) {
11719                 kfree(mode);
11720                 return NULL;
11721         }
11722
11723         crtc_state->uapi.crtc = &crtc->base;
11724
11725         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11726                 kfree(crtc_state);
11727                 kfree(mode);
11728                 return NULL;
11729         }
11730
11731         encoder->get_config(encoder, crtc_state);
11732
11733         intel_mode_from_pipe_config(mode, crtc_state);
11734
11735         kfree(crtc_state);
11736
11737         return mode;
11738 }
11739
11740 static void intel_crtc_destroy(struct drm_crtc *crtc)
11741 {
11742         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11743
11744         drm_crtc_cleanup(crtc);
11745         kfree(intel_crtc);
11746 }
11747
11748 /**
11749  * intel_wm_need_update - Check whether watermarks need updating
11750  * @cur: current plane state
11751  * @new: new plane state
11752  *
11753  * Check current plane state versus the new one to determine whether
11754  * watermarks need to be recalculated.
11755  *
11756  * Returns true or false.
11757  */
11758 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11759                                  struct intel_plane_state *new)
11760 {
11761         /* Update watermarks on tiling or size changes. */
11762         if (new->uapi.visible != cur->uapi.visible)
11763                 return true;
11764
11765         if (!cur->hw.fb || !new->hw.fb)
11766                 return false;
11767
11768         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
11769             cur->hw.rotation != new->hw.rotation ||
11770             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
11771             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
11772             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
11773             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
11774                 return true;
11775
11776         return false;
11777 }
11778
11779 static bool needs_scaling(const struct intel_plane_state *state)
11780 {
11781         int src_w = drm_rect_width(&state->uapi.src) >> 16;
11782         int src_h = drm_rect_height(&state->uapi.src) >> 16;
11783         int dst_w = drm_rect_width(&state->uapi.dst);
11784         int dst_h = drm_rect_height(&state->uapi.dst);
11785
11786         return (src_w != dst_w || src_h != dst_h);
11787 }
11788
11789 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11790                                     struct intel_crtc_state *crtc_state,
11791                                     const struct intel_plane_state *old_plane_state,
11792                                     struct intel_plane_state *plane_state)
11793 {
11794         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11795         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11796         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11797         bool mode_changed = needs_modeset(crtc_state);
11798         bool was_crtc_enabled = old_crtc_state->hw.active;
11799         bool is_crtc_enabled = crtc_state->hw.active;
11800         bool turn_off, turn_on, visible, was_visible;
11801         int ret;
11802
11803         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11804                 ret = skl_update_scaler_plane(crtc_state, plane_state);
11805                 if (ret)
11806                         return ret;
11807         }
11808
11809         was_visible = old_plane_state->uapi.visible;
11810         visible = plane_state->uapi.visible;
11811
11812         if (!was_crtc_enabled && WARN_ON(was_visible))
11813                 was_visible = false;
11814
11815         /*
11816          * Visibility is calculated as if the crtc was on, but
11817          * after scaler setup everything depends on it being off
11818          * when the crtc isn't active.
11819          *
11820          * FIXME this is wrong for watermarks. Watermarks should also
11821          * be computed as if the pipe would be active. Perhaps move
11822          * per-plane wm computation to the .check_plane() hook, and
11823          * only combine the results from all planes in the current place?
11824          */
11825         if (!is_crtc_enabled) {
11826                 plane_state->uapi.visible = visible = false;
11827                 crtc_state->active_planes &= ~BIT(plane->id);
11828                 crtc_state->data_rate[plane->id] = 0;
11829                 crtc_state->min_cdclk[plane->id] = 0;
11830         }
11831
11832         if (!was_visible && !visible)
11833                 return 0;
11834
11835         turn_off = was_visible && (!visible || mode_changed);
11836         turn_on = visible && (!was_visible || mode_changed);
11837
11838         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11839                          crtc->base.base.id, crtc->base.name,
11840                          plane->base.base.id, plane->base.name,
11841                          was_visible, visible,
11842                          turn_off, turn_on, mode_changed);
11843
11844         if (turn_on) {
11845                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11846                         crtc_state->update_wm_pre = true;
11847
11848                 /* must disable cxsr around plane enable/disable */
11849                 if (plane->id != PLANE_CURSOR)
11850                         crtc_state->disable_cxsr = true;
11851         } else if (turn_off) {
11852                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11853                         crtc_state->update_wm_post = true;
11854
11855                 /* must disable cxsr around plane enable/disable */
11856                 if (plane->id != PLANE_CURSOR)
11857                         crtc_state->disable_cxsr = true;
11858         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
11859                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11860                         /* FIXME bollocks */
11861                         crtc_state->update_wm_pre = true;
11862                         crtc_state->update_wm_post = true;
11863                 }
11864         }
11865
11866         if (visible || was_visible)
11867                 crtc_state->fb_bits |= plane->frontbuffer_bit;
11868
11869         /*
11870          * ILK/SNB DVSACNTR/Sprite Enable
11871          * IVB SPR_CTL/Sprite Enable
11872          * "When in Self Refresh Big FIFO mode, a write to enable the
11873          *  plane will be internally buffered and delayed while Big FIFO
11874          *  mode is exiting."
11875          *
11876          * Which means that enabling the sprite can take an extra frame
11877          * when we start in big FIFO mode (LP1+). Thus we need to drop
11878          * down to LP0 and wait for vblank in order to make sure the
11879          * sprite gets enabled on the next vblank after the register write.
11880          * Doing otherwise would risk enabling the sprite one frame after
11881          * we've already signalled flip completion. We can resume LP1+
11882          * once the sprite has been enabled.
11883          *
11884          *
11885          * WaCxSRDisabledForSpriteScaling:ivb
11886          * IVB SPR_SCALE/Scaling Enable
11887          * "Low Power watermarks must be disabled for at least one
11888          *  frame before enabling sprite scaling, and kept disabled
11889          *  until sprite scaling is disabled."
11890          *
11891          * ILK/SNB DVSASCALE/Scaling Enable
11892          * "When in Self Refresh Big FIFO mode, scaling enable will be
11893          *  masked off while Big FIFO mode is exiting."
11894          *
11895          * Despite the w/a only being listed for IVB we assume that
11896          * the ILK/SNB note has similar ramifications, hence we apply
11897          * the w/a on all three platforms.
11898          *
11899          * With experimental results seems this is needed also for primary
11900          * plane, not only sprite plane.
11901          */
11902         if (plane->id != PLANE_CURSOR &&
11903             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11904              IS_IVYBRIDGE(dev_priv)) &&
11905             (turn_on || (!needs_scaling(old_plane_state) &&
11906                          needs_scaling(plane_state))))
11907                 crtc_state->disable_lp_wm = true;
11908
11909         return 0;
11910 }
11911
11912 static bool encoders_cloneable(const struct intel_encoder *a,
11913                                const struct intel_encoder *b)
11914 {
11915         /* masks could be asymmetric, so check both ways */
11916         return a == b || (a->cloneable & (1 << b->type) &&
11917                           b->cloneable & (1 << a->type));
11918 }
11919
11920 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11921                                          struct intel_crtc *crtc,
11922                                          struct intel_encoder *encoder)
11923 {
11924         struct intel_encoder *source_encoder;
11925         struct drm_connector *connector;
11926         struct drm_connector_state *connector_state;
11927         int i;
11928
11929         for_each_new_connector_in_state(state, connector, connector_state, i) {
11930                 if (connector_state->crtc != &crtc->base)
11931                         continue;
11932
11933                 source_encoder =
11934                         to_intel_encoder(connector_state->best_encoder);
11935                 if (!encoders_cloneable(encoder, source_encoder))
11936                         return false;
11937         }
11938
11939         return true;
11940 }
11941
11942 static int icl_add_linked_planes(struct intel_atomic_state *state)
11943 {
11944         struct intel_plane *plane, *linked;
11945         struct intel_plane_state *plane_state, *linked_plane_state;
11946         int i;
11947
11948         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11949                 linked = plane_state->planar_linked_plane;
11950
11951                 if (!linked)
11952                         continue;
11953
11954                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11955                 if (IS_ERR(linked_plane_state))
11956                         return PTR_ERR(linked_plane_state);
11957
11958                 WARN_ON(linked_plane_state->planar_linked_plane != plane);
11959                 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
11960         }
11961
11962         return 0;
11963 }
11964
11965 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11966 {
11967         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11968         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11969         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
11970         struct intel_plane *plane, *linked;
11971         struct intel_plane_state *plane_state;
11972         int i;
11973
11974         if (INTEL_GEN(dev_priv) < 11)
11975                 return 0;
11976
11977         /*
11978          * Destroy all old plane links and make the slave plane invisible
11979          * in the crtc_state->active_planes mask.
11980          */
11981         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11982                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
11983                         continue;
11984
11985                 plane_state->planar_linked_plane = NULL;
11986                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
11987                         crtc_state->active_planes &= ~BIT(plane->id);
11988                         crtc_state->update_planes |= BIT(plane->id);
11989                 }
11990
11991                 plane_state->planar_slave = false;
11992         }
11993
11994         if (!crtc_state->nv12_planes)
11995                 return 0;
11996
11997         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11998                 struct intel_plane_state *linked_state = NULL;
11999
12000                 if (plane->pipe != crtc->pipe ||
12001                     !(crtc_state->nv12_planes & BIT(plane->id)))
12002                         continue;
12003
12004                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12005                         if (!icl_is_nv12_y_plane(linked->id))
12006                                 continue;
12007
12008                         if (crtc_state->active_planes & BIT(linked->id))
12009                                 continue;
12010
12011                         linked_state = intel_atomic_get_plane_state(state, linked);
12012                         if (IS_ERR(linked_state))
12013                                 return PTR_ERR(linked_state);
12014
12015                         break;
12016                 }
12017
12018                 if (!linked_state) {
12019                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
12020                                       hweight8(crtc_state->nv12_planes));
12021
12022                         return -EINVAL;
12023                 }
12024
12025                 plane_state->planar_linked_plane = linked;
12026
12027                 linked_state->planar_slave = true;
12028                 linked_state->planar_linked_plane = plane;
12029                 crtc_state->active_planes |= BIT(linked->id);
12030                 crtc_state->update_planes |= BIT(linked->id);
12031                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
12032
12033                 /* Copy parameters to slave plane */
12034                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12035                 linked_state->color_ctl = plane_state->color_ctl;
12036                 linked_state->color_plane[0] = plane_state->color_plane[0];
12037
12038                 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12039                 linked_state->uapi.src = plane_state->uapi.src;
12040                 linked_state->uapi.dst = plane_state->uapi.dst;
12041
12042                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12043                         if (linked->id == PLANE_SPRITE5)
12044                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12045                         else if (linked->id == PLANE_SPRITE4)
12046                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12047                         else
12048                                 MISSING_CASE(linked->id);
12049                 }
12050         }
12051
12052         return 0;
12053 }
12054
12055 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12056 {
12057         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12058         struct intel_atomic_state *state =
12059                 to_intel_atomic_state(new_crtc_state->uapi.state);
12060         const struct intel_crtc_state *old_crtc_state =
12061                 intel_atomic_get_old_crtc_state(state, crtc);
12062
12063         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12064 }
12065
12066 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
12067 {
12068         struct drm_crtc *crtc = crtc_state->uapi.crtc;
12069         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12070         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12071         struct drm_connector *master_connector, *connector;
12072         struct drm_connector_state *connector_state;
12073         struct drm_connector_list_iter conn_iter;
12074         struct drm_crtc *master_crtc = NULL;
12075         struct drm_crtc_state *master_crtc_state;
12076         struct intel_crtc_state *master_pipe_config;
12077         int i, tile_group_id;
12078
12079         if (INTEL_GEN(dev_priv) < 11)
12080                 return 0;
12081
12082         /*
12083          * In case of tiled displays there could be one or more slaves but there is
12084          * only one master. Lets make the CRTC used by the connector corresponding
12085          * to the last horizonal and last vertical tile a master/genlock CRTC.
12086          * All the other CRTCs corresponding to other tiles of the same Tile group
12087          * are the slave CRTCs and hold a pointer to their genlock CRTC.
12088          */
12089         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12090                 if (connector_state->crtc != crtc)
12091                         continue;
12092                 if (!connector->has_tile)
12093                         continue;
12094                 if (crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
12095                     crtc_state->hw.mode.vdisplay != connector->tile_v_size)
12096                         return 0;
12097                 if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12098                     connector->tile_v_loc == connector->num_v_tile - 1)
12099                         continue;
12100                 crtc_state->sync_mode_slaves_mask = 0;
12101                 tile_group_id = connector->tile_group->id;
12102                 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12103                 drm_for_each_connector_iter(master_connector, &conn_iter) {
12104                         struct drm_connector_state *master_conn_state = NULL;
12105
12106                         if (!master_connector->has_tile)
12107                                 continue;
12108                         if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12109                             master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12110                                 continue;
12111                         if (master_connector->tile_group->id != tile_group_id)
12112                                 continue;
12113
12114                         master_conn_state = drm_atomic_get_connector_state(&state->base,
12115                                                                            master_connector);
12116                         if (IS_ERR(master_conn_state)) {
12117                                 drm_connector_list_iter_end(&conn_iter);
12118                                 return PTR_ERR(master_conn_state);
12119                         }
12120                         if (master_conn_state->crtc) {
12121                                 master_crtc = master_conn_state->crtc;
12122                                 break;
12123                         }
12124                 }
12125                 drm_connector_list_iter_end(&conn_iter);
12126
12127                 if (!master_crtc) {
12128                         DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
12129                                       connector_state->crtc->base.id);
12130                         return -EINVAL;
12131                 }
12132
12133                 master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12134                                                               master_crtc);
12135                 if (IS_ERR(master_crtc_state))
12136                         return PTR_ERR(master_crtc_state);
12137
12138                 master_pipe_config = to_intel_crtc_state(master_crtc_state);
12139                 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12140                 master_pipe_config->sync_mode_slaves_mask |=
12141                         BIT(crtc_state->cpu_transcoder);
12142                 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12143                               transcoder_name(crtc_state->master_transcoder),
12144                               crtc_state->uapi.crtc->base.id,
12145                               master_pipe_config->sync_mode_slaves_mask);
12146         }
12147
12148         return 0;
12149 }
12150
12151 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12152                                    struct intel_crtc *crtc)
12153 {
12154         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12155         struct intel_crtc_state *crtc_state =
12156                 intel_atomic_get_new_crtc_state(state, crtc);
12157         bool mode_changed = needs_modeset(crtc_state);
12158         int ret;
12159
12160         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12161             mode_changed && !crtc_state->hw.active)
12162                 crtc_state->update_wm_post = true;
12163
12164         if (mode_changed && crtc_state->hw.enable &&
12165             dev_priv->display.crtc_compute_clock &&
12166             !WARN_ON(crtc_state->shared_dpll)) {
12167                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12168                 if (ret)
12169                         return ret;
12170         }
12171
12172         /*
12173          * May need to update pipe gamma enable bits
12174          * when C8 planes are getting enabled/disabled.
12175          */
12176         if (c8_planes_changed(crtc_state))
12177                 crtc_state->uapi.color_mgmt_changed = true;
12178
12179         if (mode_changed || crtc_state->update_pipe ||
12180             crtc_state->uapi.color_mgmt_changed) {
12181                 ret = intel_color_check(crtc_state);
12182                 if (ret)
12183                         return ret;
12184         }
12185
12186         ret = 0;
12187         if (dev_priv->display.compute_pipe_wm) {
12188                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12189                 if (ret) {
12190                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12191                         return ret;
12192                 }
12193         }
12194
12195         if (dev_priv->display.compute_intermediate_wm) {
12196                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12197                         return 0;
12198
12199                 /*
12200                  * Calculate 'intermediate' watermarks that satisfy both the
12201                  * old state and the new state.  We can program these
12202                  * immediately.
12203                  */
12204                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12205                 if (ret) {
12206                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12207                         return ret;
12208                 }
12209         }
12210
12211         if (INTEL_GEN(dev_priv) >= 9) {
12212                 if (mode_changed || crtc_state->update_pipe)
12213                         ret = skl_update_scaler_crtc(crtc_state);
12214                 if (!ret)
12215                         ret = intel_atomic_setup_scalers(dev_priv, crtc,
12216                                                          crtc_state);
12217         }
12218
12219         if (HAS_IPS(dev_priv))
12220                 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
12221
12222         return ret;
12223 }
12224
12225 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12226 {
12227         struct intel_connector *connector;
12228         struct drm_connector_list_iter conn_iter;
12229
12230         drm_connector_list_iter_begin(dev, &conn_iter);
12231         for_each_intel_connector_iter(connector, &conn_iter) {
12232                 if (connector->base.state->crtc)
12233                         drm_connector_put(&connector->base);
12234
12235                 if (connector->base.encoder) {
12236                         connector->base.state->best_encoder =
12237                                 connector->base.encoder;
12238                         connector->base.state->crtc =
12239                                 connector->base.encoder->crtc;
12240
12241                         drm_connector_get(&connector->base);
12242                 } else {
12243                         connector->base.state->best_encoder = NULL;
12244                         connector->base.state->crtc = NULL;
12245                 }
12246         }
12247         drm_connector_list_iter_end(&conn_iter);
12248 }
12249
12250 static int
12251 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12252                       struct intel_crtc_state *pipe_config)
12253 {
12254         struct drm_connector *connector = conn_state->connector;
12255         const struct drm_display_info *info = &connector->display_info;
12256         int bpp;
12257
12258         switch (conn_state->max_bpc) {
12259         case 6 ... 7:
12260                 bpp = 6 * 3;
12261                 break;
12262         case 8 ... 9:
12263                 bpp = 8 * 3;
12264                 break;
12265         case 10 ... 11:
12266                 bpp = 10 * 3;
12267                 break;
12268         case 12:
12269                 bpp = 12 * 3;
12270                 break;
12271         default:
12272                 return -EINVAL;
12273         }
12274
12275         if (bpp < pipe_config->pipe_bpp) {
12276                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12277                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12278                               connector->base.id, connector->name,
12279                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
12280                               pipe_config->pipe_bpp);
12281
12282                 pipe_config->pipe_bpp = bpp;
12283         }
12284
12285         return 0;
12286 }
12287
12288 static int
12289 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12290                           struct intel_crtc_state *pipe_config)
12291 {
12292         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12293         struct drm_atomic_state *state = pipe_config->uapi.state;
12294         struct drm_connector *connector;
12295         struct drm_connector_state *connector_state;
12296         int bpp, i;
12297
12298         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12299             IS_CHERRYVIEW(dev_priv)))
12300                 bpp = 10*3;
12301         else if (INTEL_GEN(dev_priv) >= 5)
12302                 bpp = 12*3;
12303         else
12304                 bpp = 8*3;
12305
12306         pipe_config->pipe_bpp = bpp;
12307
12308         /* Clamp display bpp to connector max bpp */
12309         for_each_new_connector_in_state(state, connector, connector_state, i) {
12310                 int ret;
12311
12312                 if (connector_state->crtc != &crtc->base)
12313                         continue;
12314
12315                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12316                 if (ret)
12317                         return ret;
12318         }
12319
12320         return 0;
12321 }
12322
12323 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12324 {
12325         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12326                       "type: 0x%x flags: 0x%x\n",
12327                       mode->crtc_clock,
12328                       mode->crtc_hdisplay, mode->crtc_hsync_start,
12329                       mode->crtc_hsync_end, mode->crtc_htotal,
12330                       mode->crtc_vdisplay, mode->crtc_vsync_start,
12331                       mode->crtc_vsync_end, mode->crtc_vtotal,
12332                       mode->type, mode->flags);
12333 }
12334
12335 static inline void
12336 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12337                       const char *id, unsigned int lane_count,
12338                       const struct intel_link_m_n *m_n)
12339 {
12340         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12341                       id, lane_count,
12342                       m_n->gmch_m, m_n->gmch_n,
12343                       m_n->link_m, m_n->link_n, m_n->tu);
12344 }
12345
12346 static void
12347 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12348                      const union hdmi_infoframe *frame)
12349 {
12350         if ((drm_debug & DRM_UT_KMS) == 0)
12351                 return;
12352
12353         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12354 }
12355
12356 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12357
12358 static const char * const output_type_str[] = {
12359         OUTPUT_TYPE(UNUSED),
12360         OUTPUT_TYPE(ANALOG),
12361         OUTPUT_TYPE(DVO),
12362         OUTPUT_TYPE(SDVO),
12363         OUTPUT_TYPE(LVDS),
12364         OUTPUT_TYPE(TVOUT),
12365         OUTPUT_TYPE(HDMI),
12366         OUTPUT_TYPE(DP),
12367         OUTPUT_TYPE(EDP),
12368         OUTPUT_TYPE(DSI),
12369         OUTPUT_TYPE(DDI),
12370         OUTPUT_TYPE(DP_MST),
12371 };
12372
12373 #undef OUTPUT_TYPE
12374
12375 static void snprintf_output_types(char *buf, size_t len,
12376                                   unsigned int output_types)
12377 {
12378         char *str = buf;
12379         int i;
12380
12381         str[0] = '\0';
12382
12383         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12384                 int r;
12385
12386                 if ((output_types & BIT(i)) == 0)
12387                         continue;
12388
12389                 r = snprintf(str, len, "%s%s",
12390                              str != buf ? "," : "", output_type_str[i]);
12391                 if (r >= len)
12392                         break;
12393                 str += r;
12394                 len -= r;
12395
12396                 output_types &= ~BIT(i);
12397         }
12398
12399         WARN_ON_ONCE(output_types != 0);
12400 }
12401
12402 static const char * const output_format_str[] = {
12403         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12404         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12405         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12406         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12407 };
12408
12409 static const char *output_formats(enum intel_output_format format)
12410 {
12411         if (format >= ARRAY_SIZE(output_format_str))
12412                 format = INTEL_OUTPUT_FORMAT_INVALID;
12413         return output_format_str[format];
12414 }
12415
12416 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12417 {
12418         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12419         const struct drm_framebuffer *fb = plane_state->hw.fb;
12420         struct drm_format_name_buf format_name;
12421
12422         if (!fb) {
12423                 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12424                               plane->base.base.id, plane->base.name,
12425                               yesno(plane_state->uapi.visible));
12426                 return;
12427         }
12428
12429         DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12430                       plane->base.base.id, plane->base.name,
12431                       fb->base.id, fb->width, fb->height,
12432                       drm_get_format_name(fb->format->format, &format_name),
12433                       yesno(plane_state->uapi.visible));
12434         DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12435                       plane_state->hw.rotation, plane_state->scaler_id);
12436         if (plane_state->uapi.visible)
12437                 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12438                               DRM_RECT_FP_ARG(&plane_state->uapi.src),
12439                               DRM_RECT_ARG(&plane_state->uapi.dst));
12440 }
12441
12442 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12443                                    struct intel_atomic_state *state,
12444                                    const char *context)
12445 {
12446         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12447         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12448         const struct intel_plane_state *plane_state;
12449         struct intel_plane *plane;
12450         char buf[64];
12451         int i;
12452
12453         DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12454                       crtc->base.base.id, crtc->base.name,
12455                       yesno(pipe_config->hw.enable), context);
12456
12457         if (!pipe_config->hw.enable)
12458                 goto dump_planes;
12459
12460         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12461         DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12462                       yesno(pipe_config->hw.active),
12463                       buf, pipe_config->output_types,
12464                       output_formats(pipe_config->output_format));
12465
12466         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12467                       transcoder_name(pipe_config->cpu_transcoder),
12468                       pipe_config->pipe_bpp, pipe_config->dither);
12469
12470         if (pipe_config->has_pch_encoder)
12471                 intel_dump_m_n_config(pipe_config, "fdi",
12472                                       pipe_config->fdi_lanes,
12473                                       &pipe_config->fdi_m_n);
12474
12475         if (intel_crtc_has_dp_encoder(pipe_config)) {
12476                 intel_dump_m_n_config(pipe_config, "dp m_n",
12477                                 pipe_config->lane_count, &pipe_config->dp_m_n);
12478                 if (pipe_config->has_drrs)
12479                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
12480                                               pipe_config->lane_count,
12481                                               &pipe_config->dp_m2_n2);
12482         }
12483
12484         DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12485                       pipe_config->has_audio, pipe_config->has_infoframe,
12486                       pipe_config->infoframes.enable);
12487
12488         if (pipe_config->infoframes.enable &
12489             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12490                 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12491         if (pipe_config->infoframes.enable &
12492             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12493                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12494         if (pipe_config->infoframes.enable &
12495             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12496                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12497         if (pipe_config->infoframes.enable &
12498             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12499                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12500
12501         DRM_DEBUG_KMS("requested mode:\n");
12502         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12503         DRM_DEBUG_KMS("adjusted mode:\n");
12504         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12505         intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
12506         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12507                       pipe_config->port_clock,
12508                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12509                       pipe_config->pixel_rate);
12510
12511         if (INTEL_GEN(dev_priv) >= 9)
12512                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12513                               crtc->num_scalers,
12514                               pipe_config->scaler_state.scaler_users,
12515                               pipe_config->scaler_state.scaler_id);
12516
12517         if (HAS_GMCH(dev_priv))
12518                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12519                               pipe_config->gmch_pfit.control,
12520                               pipe_config->gmch_pfit.pgm_ratios,
12521                               pipe_config->gmch_pfit.lvds_border_bits);
12522         else
12523                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12524                               pipe_config->pch_pfit.pos,
12525                               pipe_config->pch_pfit.size,
12526                               enableddisabled(pipe_config->pch_pfit.enabled),
12527                               yesno(pipe_config->pch_pfit.force_thru));
12528
12529         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12530                       pipe_config->ips_enabled, pipe_config->double_wide);
12531
12532         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12533
12534         if (IS_CHERRYVIEW(dev_priv))
12535                 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12536                               pipe_config->cgm_mode, pipe_config->gamma_mode,
12537                               pipe_config->gamma_enable, pipe_config->csc_enable);
12538         else
12539                 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12540                               pipe_config->csc_mode, pipe_config->gamma_mode,
12541                               pipe_config->gamma_enable, pipe_config->csc_enable);
12542
12543 dump_planes:
12544         if (!state)
12545                 return;
12546
12547         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12548                 if (plane->pipe == crtc->pipe)
12549                         intel_dump_plane_state(plane_state);
12550         }
12551 }
12552
12553 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12554 {
12555         struct drm_device *dev = state->base.dev;
12556         struct drm_connector *connector;
12557         struct drm_connector_list_iter conn_iter;
12558         unsigned int used_ports = 0;
12559         unsigned int used_mst_ports = 0;
12560         bool ret = true;
12561
12562         /*
12563          * We're going to peek into connector->state,
12564          * hence connection_mutex must be held.
12565          */
12566         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12567
12568         /*
12569          * Walk the connector list instead of the encoder
12570          * list to detect the problem on ddi platforms
12571          * where there's just one encoder per digital port.
12572          */
12573         drm_connector_list_iter_begin(dev, &conn_iter);
12574         drm_for_each_connector_iter(connector, &conn_iter) {
12575                 struct drm_connector_state *connector_state;
12576                 struct intel_encoder *encoder;
12577
12578                 connector_state =
12579                         drm_atomic_get_new_connector_state(&state->base,
12580                                                            connector);
12581                 if (!connector_state)
12582                         connector_state = connector->state;
12583
12584                 if (!connector_state->best_encoder)
12585                         continue;
12586
12587                 encoder = to_intel_encoder(connector_state->best_encoder);
12588
12589                 WARN_ON(!connector_state->crtc);
12590
12591                 switch (encoder->type) {
12592                         unsigned int port_mask;
12593                 case INTEL_OUTPUT_DDI:
12594                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
12595                                 break;
12596                         /* else, fall through */
12597                 case INTEL_OUTPUT_DP:
12598                 case INTEL_OUTPUT_HDMI:
12599                 case INTEL_OUTPUT_EDP:
12600                         port_mask = 1 << encoder->port;
12601
12602                         /* the same port mustn't appear more than once */
12603                         if (used_ports & port_mask)
12604                                 ret = false;
12605
12606                         used_ports |= port_mask;
12607                         break;
12608                 case INTEL_OUTPUT_DP_MST:
12609                         used_mst_ports |=
12610                                 1 << encoder->port;
12611                         break;
12612                 default:
12613                         break;
12614                 }
12615         }
12616         drm_connector_list_iter_end(&conn_iter);
12617
12618         /* can't mix MST and SST/HDMI on the same port */
12619         if (used_ports & used_mst_ports)
12620                 return false;
12621
12622         return ret;
12623 }
12624
12625 static void
12626 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
12627 {
12628         intel_crtc_copy_color_blobs(crtc_state);
12629 }
12630
12631 static void
12632 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
12633 {
12634         crtc_state->hw.enable = crtc_state->uapi.enable;
12635         crtc_state->hw.active = crtc_state->uapi.active;
12636         crtc_state->hw.mode = crtc_state->uapi.mode;
12637         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
12638         intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
12639 }
12640
12641 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
12642 {
12643         crtc_state->uapi.enable = crtc_state->hw.enable;
12644         crtc_state->uapi.active = crtc_state->hw.active;
12645         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
12646
12647         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
12648
12649         /* copy color blobs to uapi */
12650         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
12651                                   crtc_state->hw.degamma_lut);
12652         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
12653                                   crtc_state->hw.gamma_lut);
12654         drm_property_replace_blob(&crtc_state->uapi.ctm,
12655                                   crtc_state->hw.ctm);
12656 }
12657
12658 static int
12659 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
12660 {
12661         struct drm_i915_private *dev_priv =
12662                 to_i915(crtc_state->uapi.crtc->dev);
12663         struct intel_crtc_state *saved_state;
12664
12665         saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12666         if (!saved_state)
12667                 return -ENOMEM;
12668
12669         /* free the old crtc_state->hw members */
12670         intel_crtc_free_hw_state(crtc_state);
12671
12672         /* FIXME: before the switch to atomic started, a new pipe_config was
12673          * kzalloc'd. Code that depends on any field being zero should be
12674          * fixed, so that the crtc_state can be safely duplicated. For now,
12675          * only fields that are know to not cause problems are preserved. */
12676
12677         saved_state->uapi = crtc_state->uapi;
12678         saved_state->scaler_state = crtc_state->scaler_state;
12679         saved_state->shared_dpll = crtc_state->shared_dpll;
12680         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12681         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12682                sizeof(saved_state->icl_port_dplls));
12683         saved_state->crc_enabled = crtc_state->crc_enabled;
12684         if (IS_G4X(dev_priv) ||
12685             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12686                 saved_state->wm = crtc_state->wm;
12687         /*
12688          * Save the slave bitmask which gets filled for master crtc state during
12689          * slave atomic check call.
12690          */
12691         if (is_trans_port_sync_master(crtc_state))
12692                 saved_state->sync_mode_slaves_mask =
12693                         crtc_state->sync_mode_slaves_mask;
12694
12695         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12696         kfree(saved_state);
12697
12698         intel_crtc_copy_uapi_to_hw_state(crtc_state);
12699
12700         return 0;
12701 }
12702
12703 static int
12704 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12705 {
12706         struct drm_crtc *crtc = pipe_config->uapi.crtc;
12707         struct drm_atomic_state *state = pipe_config->uapi.state;
12708         struct intel_encoder *encoder;
12709         struct drm_connector *connector;
12710         struct drm_connector_state *connector_state;
12711         int base_bpp, ret;
12712         int i;
12713         bool retry = true;
12714
12715         pipe_config->cpu_transcoder =
12716                 (enum transcoder) to_intel_crtc(crtc)->pipe;
12717
12718         /*
12719          * Sanitize sync polarity flags based on requested ones. If neither
12720          * positive or negative polarity is requested, treat this as meaning
12721          * negative polarity.
12722          */
12723         if (!(pipe_config->hw.adjusted_mode.flags &
12724               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12725                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12726
12727         if (!(pipe_config->hw.adjusted_mode.flags &
12728               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12729                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12730
12731         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12732                                         pipe_config);
12733         if (ret)
12734                 return ret;
12735
12736         base_bpp = pipe_config->pipe_bpp;
12737
12738         /*
12739          * Determine the real pipe dimensions. Note that stereo modes can
12740          * increase the actual pipe size due to the frame doubling and
12741          * insertion of additional space for blanks between the frame. This
12742          * is stored in the crtc timings. We use the requested mode to do this
12743          * computation to clearly distinguish it from the adjusted mode, which
12744          * can be changed by the connectors in the below retry loop.
12745          */
12746         drm_mode_get_hv_timing(&pipe_config->hw.mode,
12747                                &pipe_config->pipe_src_w,
12748                                &pipe_config->pipe_src_h);
12749
12750         for_each_new_connector_in_state(state, connector, connector_state, i) {
12751                 if (connector_state->crtc != crtc)
12752                         continue;
12753
12754                 encoder = to_intel_encoder(connector_state->best_encoder);
12755
12756                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12757                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12758                         return -EINVAL;
12759                 }
12760
12761                 /*
12762                  * Determine output_types before calling the .compute_config()
12763                  * hooks so that the hooks can use this information safely.
12764                  */
12765                 if (encoder->compute_output_type)
12766                         pipe_config->output_types |=
12767                                 BIT(encoder->compute_output_type(encoder, pipe_config,
12768                                                                  connector_state));
12769                 else
12770                         pipe_config->output_types |= BIT(encoder->type);
12771         }
12772
12773 encoder_retry:
12774         /* Ensure the port clock defaults are reset when retrying. */
12775         pipe_config->port_clock = 0;
12776         pipe_config->pixel_multiplier = 1;
12777
12778         /* Fill in default crtc timings, allow encoders to overwrite them. */
12779         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
12780                               CRTC_STEREO_DOUBLE);
12781
12782         /* Set the crtc_state defaults for trans_port_sync */
12783         pipe_config->master_transcoder = INVALID_TRANSCODER;
12784         ret = icl_add_sync_mode_crtcs(pipe_config);
12785         if (ret) {
12786                 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
12787                               ret);
12788                 return ret;
12789         }
12790
12791         /* Pass our mode to the connectors and the CRTC to give them a chance to
12792          * adjust it according to limitations or connector properties, and also
12793          * a chance to reject the mode entirely.
12794          */
12795         for_each_new_connector_in_state(state, connector, connector_state, i) {
12796                 if (connector_state->crtc != crtc)
12797                         continue;
12798
12799                 encoder = to_intel_encoder(connector_state->best_encoder);
12800                 ret = encoder->compute_config(encoder, pipe_config,
12801                                               connector_state);
12802                 if (ret < 0) {
12803                         if (ret != -EDEADLK)
12804                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12805                                               ret);
12806                         return ret;
12807                 }
12808         }
12809
12810         /* Set default port clock if not overwritten by the encoder. Needs to be
12811          * done afterwards in case the encoder adjusts the mode. */
12812         if (!pipe_config->port_clock)
12813                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
12814                         * pipe_config->pixel_multiplier;
12815
12816         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12817         if (ret == -EDEADLK)
12818                 return ret;
12819         if (ret < 0) {
12820                 DRM_DEBUG_KMS("CRTC fixup failed\n");
12821                 return ret;
12822         }
12823
12824         if (ret == RETRY) {
12825                 if (WARN(!retry, "loop in pipe configuration computation\n"))
12826                         return -EINVAL;
12827
12828                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12829                 retry = false;
12830                 goto encoder_retry;
12831         }
12832
12833         /* Dithering seems to not pass-through bits correctly when it should, so
12834          * only enable it on 6bpc panels and when its not a compliance
12835          * test requesting 6bpc video pattern.
12836          */
12837         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12838                 !pipe_config->dither_force_disable;
12839         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12840                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12841
12842         /*
12843          * Make drm_calc_timestamping_constants in
12844          * drm_atomic_helper_update_legacy_modeset_state() happy
12845          */
12846         pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
12847
12848         return 0;
12849 }
12850
12851 bool intel_fuzzy_clock_check(int clock1, int clock2)
12852 {
12853         int diff;
12854
12855         if (clock1 == clock2)
12856                 return true;
12857
12858         if (!clock1 || !clock2)
12859                 return false;
12860
12861         diff = abs(clock1 - clock2);
12862
12863         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12864                 return true;
12865
12866         return false;
12867 }
12868
12869 static bool
12870 intel_compare_m_n(unsigned int m, unsigned int n,
12871                   unsigned int m2, unsigned int n2,
12872                   bool exact)
12873 {
12874         if (m == m2 && n == n2)
12875                 return true;
12876
12877         if (exact || !m || !n || !m2 || !n2)
12878                 return false;
12879
12880         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12881
12882         if (n > n2) {
12883                 while (n > n2) {
12884                         m2 <<= 1;
12885                         n2 <<= 1;
12886                 }
12887         } else if (n < n2) {
12888                 while (n < n2) {
12889                         m <<= 1;
12890                         n <<= 1;
12891                 }
12892         }
12893
12894         if (n != n2)
12895                 return false;
12896
12897         return intel_fuzzy_clock_check(m, m2);
12898 }
12899
12900 static bool
12901 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12902                        const struct intel_link_m_n *m2_n2,
12903                        bool exact)
12904 {
12905         return m_n->tu == m2_n2->tu &&
12906                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12907                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12908                 intel_compare_m_n(m_n->link_m, m_n->link_n,
12909                                   m2_n2->link_m, m2_n2->link_n, exact);
12910 }
12911
12912 static bool
12913 intel_compare_infoframe(const union hdmi_infoframe *a,
12914                         const union hdmi_infoframe *b)
12915 {
12916         return memcmp(a, b, sizeof(*a)) == 0;
12917 }
12918
12919 static void
12920 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12921                                bool fastset, const char *name,
12922                                const union hdmi_infoframe *a,
12923                                const union hdmi_infoframe *b)
12924 {
12925         if (fastset) {
12926                 if ((drm_debug & DRM_UT_KMS) == 0)
12927                         return;
12928
12929                 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
12930                 DRM_DEBUG_KMS("expected:\n");
12931                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12932                 DRM_DEBUG_KMS("found:\n");
12933                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12934         } else {
12935                 DRM_ERROR("mismatch in %s infoframe\n", name);
12936                 DRM_ERROR("expected:\n");
12937                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12938                 DRM_ERROR("found:\n");
12939                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12940         }
12941 }
12942
12943 static void __printf(4, 5)
12944 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
12945                      const char *name, const char *format, ...)
12946 {
12947         struct va_format vaf;
12948         va_list args;
12949
12950         va_start(args, format);
12951         vaf.fmt = format;
12952         vaf.va = &args;
12953
12954         if (fastset)
12955                 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
12956                               crtc->base.base.id, crtc->base.name, name, &vaf);
12957         else
12958                 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
12959                           crtc->base.base.id, crtc->base.name, name, &vaf);
12960
12961         va_end(args);
12962 }
12963
12964 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12965 {
12966         if (i915_modparams.fastboot != -1)
12967                 return i915_modparams.fastboot;
12968
12969         /* Enable fastboot by default on Skylake and newer */
12970         if (INTEL_GEN(dev_priv) >= 9)
12971                 return true;
12972
12973         /* Enable fastboot by default on VLV and CHV */
12974         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12975                 return true;
12976
12977         /* Disabled by default on all others */
12978         return false;
12979 }
12980
12981 static bool
12982 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
12983                           const struct intel_crtc_state *pipe_config,
12984                           bool fastset)
12985 {
12986         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
12987         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12988         bool ret = true;
12989         u32 bp_gamma = 0;
12990         bool fixup_inherited = fastset &&
12991                 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12992                 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
12993
12994         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12995                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12996                 ret = false;
12997         }
12998
12999 #define PIPE_CONF_CHECK_X(name) do { \
13000         if (current_config->name != pipe_config->name) { \
13001                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13002                                      "(expected 0x%08x, found 0x%08x)", \
13003                                      current_config->name, \
13004                                      pipe_config->name); \
13005                 ret = false; \
13006         } \
13007 } while (0)
13008
13009 #define PIPE_CONF_CHECK_I(name) do { \
13010         if (current_config->name != pipe_config->name) { \
13011                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13012                                      "(expected %i, found %i)", \
13013                                      current_config->name, \
13014                                      pipe_config->name); \
13015                 ret = false; \
13016         } \
13017 } while (0)
13018
13019 #define PIPE_CONF_CHECK_BOOL(name) do { \
13020         if (current_config->name != pipe_config->name) { \
13021                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13022                                      "(expected %s, found %s)", \
13023                                      yesno(current_config->name), \
13024                                      yesno(pipe_config->name)); \
13025                 ret = false; \
13026         } \
13027 } while (0)
13028
13029 /*
13030  * Checks state where we only read out the enabling, but not the entire
13031  * state itself (like full infoframes or ELD for audio). These states
13032  * require a full modeset on bootup to fix up.
13033  */
13034 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13035         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13036                 PIPE_CONF_CHECK_BOOL(name); \
13037         } else { \
13038                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13039                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13040                                      yesno(current_config->name), \
13041                                      yesno(pipe_config->name)); \
13042                 ret = false; \
13043         } \
13044 } while (0)
13045
13046 #define PIPE_CONF_CHECK_P(name) do { \
13047         if (current_config->name != pipe_config->name) { \
13048                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13049                                      "(expected %p, found %p)", \
13050                                      current_config->name, \
13051                                      pipe_config->name); \
13052                 ret = false; \
13053         } \
13054 } while (0)
13055
13056 #define PIPE_CONF_CHECK_M_N(name) do { \
13057         if (!intel_compare_link_m_n(&current_config->name, \
13058                                     &pipe_config->name,\
13059                                     !fastset)) { \
13060                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13061                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13062                                      "found tu %i, gmch %i/%i link %i/%i)", \
13063                                      current_config->name.tu, \
13064                                      current_config->name.gmch_m, \
13065                                      current_config->name.gmch_n, \
13066                                      current_config->name.link_m, \
13067                                      current_config->name.link_n, \
13068                                      pipe_config->name.tu, \
13069                                      pipe_config->name.gmch_m, \
13070                                      pipe_config->name.gmch_n, \
13071                                      pipe_config->name.link_m, \
13072                                      pipe_config->name.link_n); \
13073                 ret = false; \
13074         } \
13075 } while (0)
13076
13077 /* This is required for BDW+ where there is only one set of registers for
13078  * switching between high and low RR.
13079  * This macro can be used whenever a comparison has to be made between one
13080  * hw state and multiple sw state variables.
13081  */
13082 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13083         if (!intel_compare_link_m_n(&current_config->name, \
13084                                     &pipe_config->name, !fastset) && \
13085             !intel_compare_link_m_n(&current_config->alt_name, \
13086                                     &pipe_config->name, !fastset)) { \
13087                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13088                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13089                                      "or tu %i gmch %i/%i link %i/%i, " \
13090                                      "found tu %i, gmch %i/%i link %i/%i)", \
13091                                      current_config->name.tu, \
13092                                      current_config->name.gmch_m, \
13093                                      current_config->name.gmch_n, \
13094                                      current_config->name.link_m, \
13095                                      current_config->name.link_n, \
13096                                      current_config->alt_name.tu, \
13097                                      current_config->alt_name.gmch_m, \
13098                                      current_config->alt_name.gmch_n, \
13099                                      current_config->alt_name.link_m, \
13100                                      current_config->alt_name.link_n, \
13101                                      pipe_config->name.tu, \
13102                                      pipe_config->name.gmch_m, \
13103                                      pipe_config->name.gmch_n, \
13104                                      pipe_config->name.link_m, \
13105                                      pipe_config->name.link_n); \
13106                 ret = false; \
13107         } \
13108 } while (0)
13109
13110 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13111         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13112                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13113                                      "(%x) (expected %i, found %i)", \
13114                                      (mask), \
13115                                      current_config->name & (mask), \
13116                                      pipe_config->name & (mask)); \
13117                 ret = false; \
13118         } \
13119 } while (0)
13120
13121 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13122         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13123                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13124                                      "(expected %i, found %i)", \
13125                                      current_config->name, \
13126                                      pipe_config->name); \
13127                 ret = false; \
13128         } \
13129 } while (0)
13130
13131 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13132         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13133                                      &pipe_config->infoframes.name)) { \
13134                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13135                                                &current_config->infoframes.name, \
13136                                                &pipe_config->infoframes.name); \
13137                 ret = false; \
13138         } \
13139 } while (0)
13140
13141 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13142         if (current_config->name1 != pipe_config->name1) { \
13143                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13144                                 "(expected %i, found %i, won't compare lut values)", \
13145                                 current_config->name1, \
13146                                 pipe_config->name1); \
13147                 ret = false;\
13148         } else { \
13149                 if (!intel_color_lut_equal(current_config->name2, \
13150                                         pipe_config->name2, pipe_config->name1, \
13151                                         bit_precision)) { \
13152                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13153                                         "hw_state doesn't match sw_state"); \
13154                         ret = false; \
13155                 } \
13156         } \
13157 } while (0)
13158
13159 #define PIPE_CONF_QUIRK(quirk) \
13160         ((current_config->quirks | pipe_config->quirks) & (quirk))
13161
13162         PIPE_CONF_CHECK_I(cpu_transcoder);
13163
13164         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13165         PIPE_CONF_CHECK_I(fdi_lanes);
13166         PIPE_CONF_CHECK_M_N(fdi_m_n);
13167
13168         PIPE_CONF_CHECK_I(lane_count);
13169         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13170
13171         if (INTEL_GEN(dev_priv) < 8) {
13172                 PIPE_CONF_CHECK_M_N(dp_m_n);
13173
13174                 if (current_config->has_drrs)
13175                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13176         } else
13177                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13178
13179         PIPE_CONF_CHECK_X(output_types);
13180
13181         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13182         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13183         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13184         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13185         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13186         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13187
13188         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13189         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13190         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13191         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13192         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13193         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13194
13195         PIPE_CONF_CHECK_I(pixel_multiplier);
13196         PIPE_CONF_CHECK_I(output_format);
13197         PIPE_CONF_CHECK_I(dc3co_exitline);
13198         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13199         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13200             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13201                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13202
13203         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13204         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13205         PIPE_CONF_CHECK_BOOL(has_infoframe);
13206         PIPE_CONF_CHECK_BOOL(fec_enable);
13207
13208         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13209
13210         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13211                               DRM_MODE_FLAG_INTERLACE);
13212
13213         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13214                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13215                                       DRM_MODE_FLAG_PHSYNC);
13216                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13217                                       DRM_MODE_FLAG_NHSYNC);
13218                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13219                                       DRM_MODE_FLAG_PVSYNC);
13220                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13221                                       DRM_MODE_FLAG_NVSYNC);
13222         }
13223
13224         PIPE_CONF_CHECK_X(gmch_pfit.control);
13225         /* pfit ratios are autocomputed by the hw on gen4+ */
13226         if (INTEL_GEN(dev_priv) < 4)
13227                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13228         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13229
13230         /*
13231          * Changing the EDP transcoder input mux
13232          * (A_ONOFF vs. A_ON) requires a full modeset.
13233          */
13234         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13235
13236         if (!fastset) {
13237                 PIPE_CONF_CHECK_I(pipe_src_w);
13238                 PIPE_CONF_CHECK_I(pipe_src_h);
13239
13240                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13241                 if (current_config->pch_pfit.enabled) {
13242                         PIPE_CONF_CHECK_X(pch_pfit.pos);
13243                         PIPE_CONF_CHECK_X(pch_pfit.size);
13244                 }
13245
13246                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13247                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13248
13249                 PIPE_CONF_CHECK_X(gamma_mode);
13250                 if (IS_CHERRYVIEW(dev_priv))
13251                         PIPE_CONF_CHECK_X(cgm_mode);
13252                 else
13253                         PIPE_CONF_CHECK_X(csc_mode);
13254                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13255                 PIPE_CONF_CHECK_BOOL(csc_enable);
13256
13257                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13258                 if (bp_gamma)
13259                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13260
13261         }
13262
13263         PIPE_CONF_CHECK_BOOL(double_wide);
13264
13265         PIPE_CONF_CHECK_P(shared_dpll);
13266         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13267         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13268         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13269         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13270         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13271         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13272         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13273         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13274         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13275         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13276         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13277         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13278         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13279         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13280         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13281         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13282         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13283         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13284         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13285         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13286         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13287         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13288         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13289         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13290         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13291         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13292         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13293         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13294         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13295         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13296         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13297
13298         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13299         PIPE_CONF_CHECK_X(dsi_pll.div);
13300
13301         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13302                 PIPE_CONF_CHECK_I(pipe_bpp);
13303
13304         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13305         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13306
13307         PIPE_CONF_CHECK_I(min_voltage_level);
13308
13309         PIPE_CONF_CHECK_X(infoframes.enable);
13310         PIPE_CONF_CHECK_X(infoframes.gcp);
13311         PIPE_CONF_CHECK_INFOFRAME(avi);
13312         PIPE_CONF_CHECK_INFOFRAME(spd);
13313         PIPE_CONF_CHECK_INFOFRAME(hdmi);
13314         PIPE_CONF_CHECK_INFOFRAME(drm);
13315
13316         PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
13317         PIPE_CONF_CHECK_I(master_transcoder);
13318
13319 #undef PIPE_CONF_CHECK_X
13320 #undef PIPE_CONF_CHECK_I
13321 #undef PIPE_CONF_CHECK_BOOL
13322 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13323 #undef PIPE_CONF_CHECK_P
13324 #undef PIPE_CONF_CHECK_FLAGS
13325 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13326 #undef PIPE_CONF_CHECK_COLOR_LUT
13327 #undef PIPE_CONF_QUIRK
13328
13329         return ret;
13330 }
13331
13332 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13333                                            const struct intel_crtc_state *pipe_config)
13334 {
13335         if (pipe_config->has_pch_encoder) {
13336                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13337                                                             &pipe_config->fdi_m_n);
13338                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13339
13340                 /*
13341                  * FDI already provided one idea for the dotclock.
13342                  * Yell if the encoder disagrees.
13343                  */
13344                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13345                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13346                      fdi_dotclock, dotclock);
13347         }
13348 }
13349
13350 static void verify_wm_state(struct intel_crtc *crtc,
13351                             struct intel_crtc_state *new_crtc_state)
13352 {
13353         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13354         struct skl_hw_state {
13355                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13356                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13357                 struct skl_ddb_allocation ddb;
13358                 struct skl_pipe_wm wm;
13359         } *hw;
13360         struct skl_ddb_allocation *sw_ddb;
13361         struct skl_pipe_wm *sw_wm;
13362         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13363         const enum pipe pipe = crtc->pipe;
13364         int plane, level, max_level = ilk_wm_max_level(dev_priv);
13365
13366         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13367                 return;
13368
13369         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13370         if (!hw)
13371                 return;
13372
13373         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13374         sw_wm = &new_crtc_state->wm.skl.optimal;
13375
13376         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13377
13378         skl_ddb_get_hw_state(dev_priv, &hw->ddb);
13379         sw_ddb = &dev_priv->wm.skl_hw.ddb;
13380
13381         if (INTEL_GEN(dev_priv) >= 11 &&
13382             hw->ddb.enabled_slices != sw_ddb->enabled_slices)
13383                 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
13384                           sw_ddb->enabled_slices,
13385                           hw->ddb.enabled_slices);
13386
13387         /* planes */
13388         for_each_universal_plane(dev_priv, pipe, plane) {
13389                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13390
13391                 hw_plane_wm = &hw->wm.planes[plane];
13392                 sw_plane_wm = &sw_wm->planes[plane];
13393
13394                 /* Watermarks */
13395                 for (level = 0; level <= max_level; level++) {
13396                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13397                                                 &sw_plane_wm->wm[level]))
13398                                 continue;
13399
13400                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13401                                   pipe_name(pipe), plane + 1, level,
13402                                   sw_plane_wm->wm[level].plane_en,
13403                                   sw_plane_wm->wm[level].plane_res_b,
13404                                   sw_plane_wm->wm[level].plane_res_l,
13405                                   hw_plane_wm->wm[level].plane_en,
13406                                   hw_plane_wm->wm[level].plane_res_b,
13407                                   hw_plane_wm->wm[level].plane_res_l);
13408                 }
13409
13410                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13411                                          &sw_plane_wm->trans_wm)) {
13412                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13413                                   pipe_name(pipe), plane + 1,
13414                                   sw_plane_wm->trans_wm.plane_en,
13415                                   sw_plane_wm->trans_wm.plane_res_b,
13416                                   sw_plane_wm->trans_wm.plane_res_l,
13417                                   hw_plane_wm->trans_wm.plane_en,
13418                                   hw_plane_wm->trans_wm.plane_res_b,
13419                                   hw_plane_wm->trans_wm.plane_res_l);
13420                 }
13421
13422                 /* DDB */
13423                 hw_ddb_entry = &hw->ddb_y[plane];
13424                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13425
13426                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13427                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13428                                   pipe_name(pipe), plane + 1,
13429                                   sw_ddb_entry->start, sw_ddb_entry->end,
13430                                   hw_ddb_entry->start, hw_ddb_entry->end);
13431                 }
13432         }
13433
13434         /*
13435          * cursor
13436          * If the cursor plane isn't active, we may not have updated it's ddb
13437          * allocation. In that case since the ddb allocation will be updated
13438          * once the plane becomes visible, we can skip this check
13439          */
13440         if (1) {
13441                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13442
13443                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13444                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13445
13446                 /* Watermarks */
13447                 for (level = 0; level <= max_level; level++) {
13448                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13449                                                 &sw_plane_wm->wm[level]))
13450                                 continue;
13451
13452                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13453                                   pipe_name(pipe), level,
13454                                   sw_plane_wm->wm[level].plane_en,
13455                                   sw_plane_wm->wm[level].plane_res_b,
13456                                   sw_plane_wm->wm[level].plane_res_l,
13457                                   hw_plane_wm->wm[level].plane_en,
13458                                   hw_plane_wm->wm[level].plane_res_b,
13459                                   hw_plane_wm->wm[level].plane_res_l);
13460                 }
13461
13462                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13463                                          &sw_plane_wm->trans_wm)) {
13464                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13465                                   pipe_name(pipe),
13466                                   sw_plane_wm->trans_wm.plane_en,
13467                                   sw_plane_wm->trans_wm.plane_res_b,
13468                                   sw_plane_wm->trans_wm.plane_res_l,
13469                                   hw_plane_wm->trans_wm.plane_en,
13470                                   hw_plane_wm->trans_wm.plane_res_b,
13471                                   hw_plane_wm->trans_wm.plane_res_l);
13472                 }
13473
13474                 /* DDB */
13475                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13476                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13477
13478                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13479                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13480                                   pipe_name(pipe),
13481                                   sw_ddb_entry->start, sw_ddb_entry->end,
13482                                   hw_ddb_entry->start, hw_ddb_entry->end);
13483                 }
13484         }
13485
13486         kfree(hw);
13487 }
13488
13489 static void
13490 verify_connector_state(struct intel_atomic_state *state,
13491                        struct intel_crtc *crtc)
13492 {
13493         struct drm_connector *connector;
13494         struct drm_connector_state *new_conn_state;
13495         int i;
13496
13497         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13498                 struct drm_encoder *encoder = connector->encoder;
13499                 struct intel_crtc_state *crtc_state = NULL;
13500
13501                 if (new_conn_state->crtc != &crtc->base)
13502                         continue;
13503
13504                 if (crtc)
13505                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13506
13507                 intel_connector_verify_state(crtc_state, new_conn_state);
13508
13509                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13510                      "connector's atomic encoder doesn't match legacy encoder\n");
13511         }
13512 }
13513
13514 static void
13515 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13516 {
13517         struct intel_encoder *encoder;
13518         struct drm_connector *connector;
13519         struct drm_connector_state *old_conn_state, *new_conn_state;
13520         int i;
13521
13522         for_each_intel_encoder(&dev_priv->drm, encoder) {
13523                 bool enabled = false, found = false;
13524                 enum pipe pipe;
13525
13526                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13527                               encoder->base.base.id,
13528                               encoder->base.name);
13529
13530                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13531                                                    new_conn_state, i) {
13532                         if (old_conn_state->best_encoder == &encoder->base)
13533                                 found = true;
13534
13535                         if (new_conn_state->best_encoder != &encoder->base)
13536                                 continue;
13537                         found = enabled = true;
13538
13539                         I915_STATE_WARN(new_conn_state->crtc !=
13540                                         encoder->base.crtc,
13541                              "connector's crtc doesn't match encoder crtc\n");
13542                 }
13543
13544                 if (!found)
13545                         continue;
13546
13547                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13548                      "encoder's enabled state mismatch "
13549                      "(expected %i, found %i)\n",
13550                      !!encoder->base.crtc, enabled);
13551
13552                 if (!encoder->base.crtc) {
13553                         bool active;
13554
13555                         active = encoder->get_hw_state(encoder, &pipe);
13556                         I915_STATE_WARN(active,
13557                              "encoder detached but still enabled on pipe %c.\n",
13558                              pipe_name(pipe));
13559                 }
13560         }
13561 }
13562
13563 static void
13564 verify_crtc_state(struct intel_crtc *crtc,
13565                   struct intel_crtc_state *old_crtc_state,
13566                   struct intel_crtc_state *new_crtc_state)
13567 {
13568         struct drm_device *dev = crtc->base.dev;
13569         struct drm_i915_private *dev_priv = to_i915(dev);
13570         struct intel_encoder *encoder;
13571         struct intel_crtc_state *pipe_config;
13572         struct drm_atomic_state *state;
13573         bool active;
13574
13575         state = old_crtc_state->uapi.state;
13576         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
13577         intel_crtc_free_hw_state(old_crtc_state);
13578
13579         pipe_config = old_crtc_state;
13580         memset(pipe_config, 0, sizeof(*pipe_config));
13581         pipe_config->uapi.crtc = &crtc->base;
13582         pipe_config->uapi.state = state;
13583
13584         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13585
13586         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13587
13588         /* we keep both pipes enabled on 830 */
13589         if (IS_I830(dev_priv))
13590                 active = new_crtc_state->hw.active;
13591
13592         I915_STATE_WARN(new_crtc_state->hw.active != active,
13593                         "crtc active state doesn't match with hw state "
13594                         "(expected %i, found %i)\n",
13595                         new_crtc_state->hw.active, active);
13596
13597         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
13598                         "transitional active state does not match atomic hw state "
13599                         "(expected %i, found %i)\n",
13600                         new_crtc_state->hw.active, crtc->active);
13601
13602         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13603                 enum pipe pipe;
13604
13605                 active = encoder->get_hw_state(encoder, &pipe);
13606                 I915_STATE_WARN(active != new_crtc_state->hw.active,
13607                                 "[ENCODER:%i] active %i with crtc active %i\n",
13608                                 encoder->base.base.id, active,
13609                                 new_crtc_state->hw.active);
13610
13611                 I915_STATE_WARN(active && crtc->pipe != pipe,
13612                                 "Encoder connected to wrong pipe %c\n",
13613                                 pipe_name(pipe));
13614
13615                 if (active)
13616                         encoder->get_config(encoder, pipe_config);
13617         }
13618
13619         intel_crtc_compute_pixel_rate(pipe_config);
13620
13621         if (!new_crtc_state->hw.active)
13622                 return;
13623
13624         intel_pipe_config_sanity_check(dev_priv, pipe_config);
13625
13626         if (!intel_pipe_config_compare(new_crtc_state,
13627                                        pipe_config, false)) {
13628                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13629                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13630                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13631         }
13632 }
13633
13634 static void
13635 intel_verify_planes(struct intel_atomic_state *state)
13636 {
13637         struct intel_plane *plane;
13638         const struct intel_plane_state *plane_state;
13639         int i;
13640
13641         for_each_new_intel_plane_in_state(state, plane,
13642                                           plane_state, i)
13643                 assert_plane(plane, plane_state->planar_slave ||
13644                              plane_state->uapi.visible);
13645 }
13646
13647 static void
13648 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13649                          struct intel_shared_dpll *pll,
13650                          struct intel_crtc *crtc,
13651                          struct intel_crtc_state *new_crtc_state)
13652 {
13653         struct intel_dpll_hw_state dpll_hw_state;
13654         unsigned int crtc_mask;
13655         bool active;
13656
13657         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13658
13659         DRM_DEBUG_KMS("%s\n", pll->info->name);
13660
13661         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13662
13663         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13664                 I915_STATE_WARN(!pll->on && pll->active_mask,
13665                      "pll in active use but not on in sw tracking\n");
13666                 I915_STATE_WARN(pll->on && !pll->active_mask,
13667                      "pll is on but not used by any active crtc\n");
13668                 I915_STATE_WARN(pll->on != active,
13669                      "pll on state mismatch (expected %i, found %i)\n",
13670                      pll->on, active);
13671         }
13672
13673         if (!crtc) {
13674                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13675                                 "more active pll users than references: %x vs %x\n",
13676                                 pll->active_mask, pll->state.crtc_mask);
13677
13678                 return;
13679         }
13680
13681         crtc_mask = drm_crtc_mask(&crtc->base);
13682
13683         if (new_crtc_state->hw.active)
13684                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13685                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13686                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13687         else
13688                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13689                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13690                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13691
13692         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13693                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13694                         crtc_mask, pll->state.crtc_mask);
13695
13696         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13697                                           &dpll_hw_state,
13698                                           sizeof(dpll_hw_state)),
13699                         "pll hw state mismatch\n");
13700 }
13701
13702 static void
13703 verify_shared_dpll_state(struct intel_crtc *crtc,
13704                          struct intel_crtc_state *old_crtc_state,
13705                          struct intel_crtc_state *new_crtc_state)
13706 {
13707         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13708
13709         if (new_crtc_state->shared_dpll)
13710                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13711
13712         if (old_crtc_state->shared_dpll &&
13713             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13714                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13715                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13716
13717                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13718                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13719                                 pipe_name(drm_crtc_index(&crtc->base)));
13720                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13721                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13722                                 pipe_name(drm_crtc_index(&crtc->base)));
13723         }
13724 }
13725
13726 static void
13727 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13728                           struct intel_atomic_state *state,
13729                           struct intel_crtc_state *old_crtc_state,
13730                           struct intel_crtc_state *new_crtc_state)
13731 {
13732         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13733                 return;
13734
13735         verify_wm_state(crtc, new_crtc_state);
13736         verify_connector_state(state, crtc);
13737         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13738         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13739 }
13740
13741 static void
13742 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13743 {
13744         int i;
13745
13746         for (i = 0; i < dev_priv->num_shared_dpll; i++)
13747                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13748 }
13749
13750 static void
13751 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13752                               struct intel_atomic_state *state)
13753 {
13754         verify_encoder_state(dev_priv, state);
13755         verify_connector_state(state, NULL);
13756         verify_disabled_dpll_state(dev_priv);
13757 }
13758
13759 static void
13760 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13761 {
13762         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13763         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13764         const struct drm_display_mode *adjusted_mode =
13765                 &crtc_state->hw.adjusted_mode;
13766
13767         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13768
13769         /*
13770          * The scanline counter increments at the leading edge of hsync.
13771          *
13772          * On most platforms it starts counting from vtotal-1 on the
13773          * first active line. That means the scanline counter value is
13774          * always one less than what we would expect. Ie. just after
13775          * start of vblank, which also occurs at start of hsync (on the
13776          * last active line), the scanline counter will read vblank_start-1.
13777          *
13778          * On gen2 the scanline counter starts counting from 1 instead
13779          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13780          * to keep the value positive), instead of adding one.
13781          *
13782          * On HSW+ the behaviour of the scanline counter depends on the output
13783          * type. For DP ports it behaves like most other platforms, but on HDMI
13784          * there's an extra 1 line difference. So we need to add two instead of
13785          * one to the value.
13786          *
13787          * On VLV/CHV DSI the scanline counter would appear to increment
13788          * approx. 1/3 of a scanline before start of vblank. Unfortunately
13789          * that means we can't tell whether we're in vblank or not while
13790          * we're on that particular line. We must still set scanline_offset
13791          * to 1 so that the vblank timestamps come out correct when we query
13792          * the scanline counter from within the vblank interrupt handler.
13793          * However if queried just before the start of vblank we'll get an
13794          * answer that's slightly in the future.
13795          */
13796         if (IS_GEN(dev_priv, 2)) {
13797                 int vtotal;
13798
13799                 vtotal = adjusted_mode->crtc_vtotal;
13800                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13801                         vtotal /= 2;
13802
13803                 crtc->scanline_offset = vtotal - 1;
13804         } else if (HAS_DDI(dev_priv) &&
13805                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13806                 crtc->scanline_offset = 2;
13807         } else {
13808                 crtc->scanline_offset = 1;
13809         }
13810 }
13811
13812 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13813 {
13814         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13815         struct intel_crtc_state *new_crtc_state;
13816         struct intel_crtc *crtc;
13817         int i;
13818
13819         if (!dev_priv->display.crtc_compute_clock)
13820                 return;
13821
13822         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13823                 if (!needs_modeset(new_crtc_state))
13824                         continue;
13825
13826                 intel_release_shared_dplls(state, crtc);
13827         }
13828 }
13829
13830 /*
13831  * This implements the workaround described in the "notes" section of the mode
13832  * set sequence documentation. When going from no pipes or single pipe to
13833  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13834  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13835  */
13836 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13837 {
13838         struct intel_crtc_state *crtc_state;
13839         struct intel_crtc *crtc;
13840         struct intel_crtc_state *first_crtc_state = NULL;
13841         struct intel_crtc_state *other_crtc_state = NULL;
13842         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13843         int i;
13844
13845         /* look at all crtc's that are going to be enabled in during modeset */
13846         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13847                 if (!crtc_state->hw.active ||
13848                     !needs_modeset(crtc_state))
13849                         continue;
13850
13851                 if (first_crtc_state) {
13852                         other_crtc_state = crtc_state;
13853                         break;
13854                 } else {
13855                         first_crtc_state = crtc_state;
13856                         first_pipe = crtc->pipe;
13857                 }
13858         }
13859
13860         /* No workaround needed? */
13861         if (!first_crtc_state)
13862                 return 0;
13863
13864         /* w/a possibly needed, check how many crtc's are already enabled. */
13865         for_each_intel_crtc(state->base.dev, crtc) {
13866                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13867                 if (IS_ERR(crtc_state))
13868                         return PTR_ERR(crtc_state);
13869
13870                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13871
13872                 if (!crtc_state->hw.active ||
13873                     needs_modeset(crtc_state))
13874                         continue;
13875
13876                 /* 2 or more enabled crtcs means no need for w/a */
13877                 if (enabled_pipe != INVALID_PIPE)
13878                         return 0;
13879
13880                 enabled_pipe = crtc->pipe;
13881         }
13882
13883         if (enabled_pipe != INVALID_PIPE)
13884                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13885         else if (other_crtc_state)
13886                 other_crtc_state->hsw_workaround_pipe = first_pipe;
13887
13888         return 0;
13889 }
13890
13891 static int intel_modeset_checks(struct intel_atomic_state *state)
13892 {
13893         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13894         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13895         struct intel_crtc *crtc;
13896         int ret, i;
13897
13898         /* keep the current setting */
13899         if (!state->cdclk.force_min_cdclk_changed)
13900                 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13901
13902         state->modeset = true;
13903         state->active_pipes = dev_priv->active_pipes;
13904         state->cdclk.logical = dev_priv->cdclk.logical;
13905         state->cdclk.actual = dev_priv->cdclk.actual;
13906
13907         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13908                                             new_crtc_state, i) {
13909                 if (new_crtc_state->hw.active)
13910                         state->active_pipes |= BIT(crtc->pipe);
13911                 else
13912                         state->active_pipes &= ~BIT(crtc->pipe);
13913
13914                 if (old_crtc_state->hw.active != new_crtc_state->hw.active)
13915                         state->active_pipe_changes |= BIT(crtc->pipe);
13916         }
13917
13918         if (state->active_pipe_changes) {
13919                 ret = intel_atomic_lock_global_state(state);
13920                 if (ret)
13921                         return ret;
13922         }
13923
13924         ret = intel_modeset_calc_cdclk(state);
13925         if (ret)
13926                 return ret;
13927
13928         intel_modeset_clear_plls(state);
13929
13930         if (IS_HASWELL(dev_priv))
13931                 return haswell_mode_set_planes_workaround(state);
13932
13933         return 0;
13934 }
13935
13936 /*
13937  * Handle calculation of various watermark data at the end of the atomic check
13938  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13939  * handlers to ensure that all derived state has been updated.
13940  */
13941 static int calc_watermark_data(struct intel_atomic_state *state)
13942 {
13943         struct drm_device *dev = state->base.dev;
13944         struct drm_i915_private *dev_priv = to_i915(dev);
13945
13946         /* Is there platform-specific watermark information to calculate? */
13947         if (dev_priv->display.compute_global_watermarks)
13948                 return dev_priv->display.compute_global_watermarks(state);
13949
13950         return 0;
13951 }
13952
13953 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
13954                                      struct intel_crtc_state *new_crtc_state)
13955 {
13956         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
13957                 return;
13958
13959         new_crtc_state->uapi.mode_changed = false;
13960         new_crtc_state->update_pipe = true;
13961
13962         /*
13963          * If we're not doing the full modeset we want to
13964          * keep the current M/N values as they may be
13965          * sufficiently different to the computed values
13966          * to cause problems.
13967          *
13968          * FIXME: should really copy more fuzzy state here
13969          */
13970         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
13971         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
13972         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
13973         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
13974 }
13975
13976 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
13977                                           struct intel_crtc *crtc,
13978                                           u8 plane_ids_mask)
13979 {
13980         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13981         struct intel_plane *plane;
13982
13983         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
13984                 struct intel_plane_state *plane_state;
13985
13986                 if ((plane_ids_mask & BIT(plane->id)) == 0)
13987                         continue;
13988
13989                 plane_state = intel_atomic_get_plane_state(state, plane);
13990                 if (IS_ERR(plane_state))
13991                         return PTR_ERR(plane_state);
13992         }
13993
13994         return 0;
13995 }
13996
13997 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
13998 {
13999         /* See {hsw,vlv,ivb}_plane_ratio() */
14000         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14001                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14002                 IS_IVYBRIDGE(dev_priv);
14003 }
14004
14005 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14006                                      bool *need_modeset)
14007 {
14008         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14009         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14010         struct intel_plane_state *plane_state;
14011         struct intel_plane *plane;
14012         struct intel_crtc *crtc;
14013         int i, ret;
14014
14015         ret = icl_add_linked_planes(state);
14016         if (ret)
14017                 return ret;
14018
14019         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14020                 ret = intel_plane_atomic_check(state, plane);
14021                 if (ret) {
14022                         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
14023                                          plane->base.base.id, plane->base.name);
14024                         return ret;
14025                 }
14026         }
14027
14028         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14029                                             new_crtc_state, i) {
14030                 u8 old_active_planes, new_active_planes;
14031
14032                 ret = icl_check_nv12_planes(new_crtc_state);
14033                 if (ret)
14034                         return ret;
14035
14036                 /*
14037                  * On some platforms the number of active planes affects
14038                  * the planes' minimum cdclk calculation. Add such planes
14039                  * to the state before we compute the minimum cdclk.
14040                  */
14041                 if (!active_planes_affects_min_cdclk(dev_priv))
14042                         continue;
14043
14044                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14045                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14046
14047                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14048                         continue;
14049
14050                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14051                 if (ret)
14052                         return ret;
14053         }
14054
14055         /*
14056          * active_planes bitmask has been updated, and potentially
14057          * affected planes are part of the state. We can now
14058          * compute the minimum cdclk for each plane.
14059          */
14060         for_each_new_intel_plane_in_state(state, plane, plane_state, i)
14061                 *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
14062
14063         return 0;
14064 }
14065
14066 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14067 {
14068         struct intel_crtc_state *crtc_state;
14069         struct intel_crtc *crtc;
14070         int i;
14071
14072         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14073                 int ret = intel_crtc_atomic_check(state, crtc);
14074                 if (ret) {
14075                         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
14076                                          crtc->base.base.id, crtc->base.name);
14077                         return ret;
14078                 }
14079         }
14080
14081         return 0;
14082 }
14083
14084 /**
14085  * intel_atomic_check - validate state object
14086  * @dev: drm device
14087  * @_state: state to validate
14088  */
14089 static int intel_atomic_check(struct drm_device *dev,
14090                               struct drm_atomic_state *_state)
14091 {
14092         struct drm_i915_private *dev_priv = to_i915(dev);
14093         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14094         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14095         struct intel_crtc *crtc;
14096         int ret, i;
14097         bool any_ms = false;
14098
14099         /* Catch I915_MODE_FLAG_INHERITED */
14100         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14101                                             new_crtc_state, i) {
14102                 if (new_crtc_state->hw.mode.private_flags !=
14103                     old_crtc_state->hw.mode.private_flags)
14104                         new_crtc_state->uapi.mode_changed = true;
14105         }
14106
14107         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14108         if (ret)
14109                 goto fail;
14110
14111         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14112                                             new_crtc_state, i) {
14113                 if (!needs_modeset(new_crtc_state)) {
14114                         /* Light copy */
14115                         intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14116
14117                         continue;
14118                 }
14119
14120                 if (!new_crtc_state->uapi.enable) {
14121                         intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
14122
14123                         any_ms = true;
14124                         continue;
14125                 }
14126
14127                 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14128                 if (ret)
14129                         goto fail;
14130
14131                 ret = intel_modeset_pipe_config(new_crtc_state);
14132                 if (ret)
14133                         goto fail;
14134
14135                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14136
14137                 if (needs_modeset(new_crtc_state))
14138                         any_ms = true;
14139         }
14140
14141         if (any_ms && !check_digital_port_conflicts(state)) {
14142                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
14143                 ret = EINVAL;
14144                 goto fail;
14145         }
14146
14147         ret = drm_dp_mst_atomic_check(&state->base);
14148         if (ret)
14149                 goto fail;
14150
14151         any_ms |= state->cdclk.force_min_cdclk_changed;
14152
14153         ret = intel_atomic_check_planes(state, &any_ms);
14154         if (ret)
14155                 goto fail;
14156
14157         if (any_ms) {
14158                 ret = intel_modeset_checks(state);
14159                 if (ret)
14160                         goto fail;
14161         } else {
14162                 state->cdclk.logical = dev_priv->cdclk.logical;
14163         }
14164
14165         ret = intel_atomic_check_crtcs(state);
14166         if (ret)
14167                 goto fail;
14168
14169         intel_fbc_choose_crtc(dev_priv, state);
14170         ret = calc_watermark_data(state);
14171         if (ret)
14172                 goto fail;
14173
14174         ret = intel_bw_atomic_check(state);
14175         if (ret)
14176                 goto fail;
14177
14178         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14179                                             new_crtc_state, i) {
14180                 if (!needs_modeset(new_crtc_state) &&
14181                     !new_crtc_state->update_pipe)
14182                         continue;
14183
14184                 intel_dump_pipe_config(new_crtc_state, state,
14185                                        needs_modeset(new_crtc_state) ?
14186                                        "[modeset]" : "[fastset]");
14187         }
14188
14189         return 0;
14190
14191  fail:
14192         if (ret == -EDEADLK)
14193                 return ret;
14194
14195         /*
14196          * FIXME would probably be nice to know which crtc specifically
14197          * caused the failure, in cases where we can pinpoint it.
14198          */
14199         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14200                                             new_crtc_state, i)
14201                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14202
14203         return ret;
14204 }
14205
14206 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14207 {
14208         return drm_atomic_helper_prepare_planes(state->base.dev,
14209                                                 &state->base);
14210 }
14211
14212 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14213 {
14214         struct drm_device *dev = crtc->base.dev;
14215         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14216
14217         if (!vblank->max_vblank_count)
14218                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14219
14220         return crtc->base.funcs->get_vblank_counter(&crtc->base);
14221 }
14222
14223 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14224                                   struct intel_crtc_state *crtc_state)
14225 {
14226         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14227
14228         if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
14229                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14230
14231         if (crtc_state->has_pch_encoder) {
14232                 enum pipe pch_transcoder =
14233                         intel_crtc_pch_transcoder(crtc);
14234
14235                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14236         }
14237 }
14238
14239 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14240                                const struct intel_crtc_state *new_crtc_state)
14241 {
14242         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14243         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14244
14245         /*
14246          * Update pipe size and adjust fitter if needed: the reason for this is
14247          * that in compute_mode_changes we check the native mode (not the pfit
14248          * mode) to see if we can flip rather than do a full mode set. In the
14249          * fastboot case, we'll flip, but if we don't update the pipesrc and
14250          * pfit state, we'll end up with a big fb scanned out into the wrong
14251          * sized surface.
14252          */
14253         intel_set_pipe_src_size(new_crtc_state);
14254
14255         /* on skylake this is done by detaching scalers */
14256         if (INTEL_GEN(dev_priv) >= 9) {
14257                 skl_detach_scalers(new_crtc_state);
14258
14259                 if (new_crtc_state->pch_pfit.enabled)
14260                         skylake_pfit_enable(new_crtc_state);
14261         } else if (HAS_PCH_SPLIT(dev_priv)) {
14262                 if (new_crtc_state->pch_pfit.enabled)
14263                         ironlake_pfit_enable(new_crtc_state);
14264                 else if (old_crtc_state->pch_pfit.enabled)
14265                         ironlake_pfit_disable(old_crtc_state);
14266         }
14267
14268         if (INTEL_GEN(dev_priv) >= 11)
14269                 icl_set_pipe_chicken(crtc);
14270 }
14271
14272 static void commit_pipe_config(struct intel_atomic_state *state,
14273                                struct intel_crtc_state *old_crtc_state,
14274                                struct intel_crtc_state *new_crtc_state)
14275 {
14276         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14277         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14278         bool modeset = needs_modeset(new_crtc_state);
14279
14280         /*
14281          * During modesets pipe configuration was programmed as the
14282          * CRTC was enabled.
14283          */
14284         if (!modeset) {
14285                 if (new_crtc_state->uapi.color_mgmt_changed ||
14286                     new_crtc_state->update_pipe)
14287                         intel_color_commit(new_crtc_state);
14288
14289                 if (INTEL_GEN(dev_priv) >= 9)
14290                         skl_detach_scalers(new_crtc_state);
14291
14292                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14293                         bdw_set_pipemisc(new_crtc_state);
14294
14295                 if (new_crtc_state->update_pipe)
14296                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
14297         }
14298
14299         if (dev_priv->display.atomic_update_watermarks)
14300                 dev_priv->display.atomic_update_watermarks(state, crtc);
14301 }
14302
14303 static void intel_update_crtc(struct intel_crtc *crtc,
14304                               struct intel_atomic_state *state,
14305                               struct intel_crtc_state *old_crtc_state,
14306                               struct intel_crtc_state *new_crtc_state)
14307 {
14308         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14309         bool modeset = needs_modeset(new_crtc_state);
14310         struct intel_plane_state *new_plane_state =
14311                 intel_atomic_get_new_plane_state(state,
14312                                                  to_intel_plane(crtc->base.primary));
14313
14314         if (modeset) {
14315                 intel_crtc_update_active_timings(new_crtc_state);
14316
14317                 dev_priv->display.crtc_enable(state, crtc);
14318
14319                 /* vblanks work again, re-enable pipe CRC. */
14320                 intel_crtc_enable_pipe_crc(crtc);
14321         } else {
14322                 if (new_crtc_state->preload_luts &&
14323                     (new_crtc_state->uapi.color_mgmt_changed ||
14324                      new_crtc_state->update_pipe))
14325                         intel_color_load_luts(new_crtc_state);
14326
14327                 intel_pre_plane_update(state, crtc);
14328
14329                 if (new_crtc_state->update_pipe)
14330                         intel_encoders_update_pipe(state, crtc);
14331         }
14332
14333         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14334                 intel_fbc_disable(crtc);
14335         else if (new_plane_state)
14336                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14337
14338         /* Perform vblank evasion around commit operation */
14339         intel_pipe_update_start(new_crtc_state);
14340
14341         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14342
14343         if (INTEL_GEN(dev_priv) >= 9)
14344                 skl_update_planes_on_crtc(state, crtc);
14345         else
14346                 i9xx_update_planes_on_crtc(state, crtc);
14347
14348         intel_pipe_update_end(new_crtc_state);
14349
14350         /*
14351          * We usually enable FIFO underrun interrupts as part of the
14352          * CRTC enable sequence during modesets.  But when we inherit a
14353          * valid pipe configuration from the BIOS we need to take care
14354          * of enabling them on the CRTC's first fastset.
14355          */
14356         if (new_crtc_state->update_pipe && !modeset &&
14357             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14358                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14359 }
14360
14361 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
14362 {
14363         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
14364         enum transcoder slave_transcoder;
14365
14366         WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
14367
14368         slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
14369         return intel_get_crtc_for_pipe(dev_priv,
14370                                        (enum pipe)slave_transcoder);
14371 }
14372
14373 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14374                                           struct intel_crtc_state *old_crtc_state,
14375                                           struct intel_crtc_state *new_crtc_state,
14376                                           struct intel_crtc *crtc)
14377 {
14378         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14379
14380         intel_crtc_disable_planes(state, crtc);
14381
14382         /*
14383          * We need to disable pipe CRC before disabling the pipe,
14384          * or we race against vblank off.
14385          */
14386         intel_crtc_disable_pipe_crc(crtc);
14387
14388         dev_priv->display.crtc_disable(state, crtc);
14389         crtc->active = false;
14390         intel_fbc_disable(crtc);
14391         intel_disable_shared_dpll(old_crtc_state);
14392
14393         /* FIXME unify this for all platforms */
14394         if (!new_crtc_state->hw.active &&
14395             !HAS_GMCH(dev_priv) &&
14396             dev_priv->display.initial_watermarks)
14397                 dev_priv->display.initial_watermarks(state, crtc);
14398 }
14399
14400 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14401 {
14402         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14403         struct intel_crtc *crtc;
14404         u32 handled = 0;
14405         int i;
14406
14407         /* Only disable port sync slaves */
14408         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14409                                             new_crtc_state, i) {
14410                 if (!needs_modeset(new_crtc_state))
14411                         continue;
14412
14413                 if (!old_crtc_state->hw.active)
14414                         continue;
14415
14416                 /* In case of Transcoder port Sync master slave CRTCs can be
14417                  * assigned in any order and we need to make sure that
14418                  * slave CRTCs are disabled first and then master CRTC since
14419                  * Slave vblanks are masked till Master Vblanks.
14420                  */
14421                 if (!is_trans_port_sync_slave(old_crtc_state))
14422                         continue;
14423
14424                 intel_pre_plane_update(state, crtc);
14425                 intel_old_crtc_state_disables(state, old_crtc_state,
14426                                               new_crtc_state, crtc);
14427                 handled |= BIT(crtc->pipe);
14428         }
14429
14430         /* Disable everything else left on */
14431         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14432                                             new_crtc_state, i) {
14433                 if (!needs_modeset(new_crtc_state) ||
14434                     (handled & BIT(crtc->pipe)))
14435                         continue;
14436
14437                 intel_pre_plane_update(state, crtc);
14438                 if (old_crtc_state->hw.active)
14439                         intel_old_crtc_state_disables(state, old_crtc_state,
14440                                                       new_crtc_state, crtc);
14441         }
14442 }
14443
14444 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
14445 {
14446         struct intel_crtc *crtc;
14447         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14448         int i;
14449
14450         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14451                 if (!new_crtc_state->hw.active)
14452                         continue;
14453
14454                 intel_update_crtc(crtc, state, old_crtc_state,
14455                                   new_crtc_state);
14456         }
14457 }
14458
14459 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
14460                                               struct intel_atomic_state *state,
14461                                               struct intel_crtc_state *new_crtc_state)
14462 {
14463         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14464
14465         intel_crtc_update_active_timings(new_crtc_state);
14466         dev_priv->display.crtc_enable(state, crtc);
14467         intel_crtc_enable_pipe_crc(crtc);
14468 }
14469
14470 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
14471                                        struct intel_atomic_state *state)
14472 {
14473         struct drm_connector *uninitialized_var(conn);
14474         struct drm_connector_state *conn_state;
14475         struct intel_dp *intel_dp;
14476         int i;
14477
14478         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
14479                 if (conn_state->crtc == &crtc->base)
14480                         break;
14481         }
14482         intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
14483         intel_dp_stop_link_train(intel_dp);
14484 }
14485
14486 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
14487                                            struct intel_atomic_state *state)
14488 {
14489         struct intel_crtc_state *new_crtc_state =
14490                 intel_atomic_get_new_crtc_state(state, crtc);
14491         struct intel_crtc_state *old_crtc_state =
14492                 intel_atomic_get_old_crtc_state(state, crtc);
14493         struct intel_plane_state *new_plane_state =
14494                 intel_atomic_get_new_plane_state(state,
14495                                                  to_intel_plane(crtc->base.primary));
14496         bool modeset = needs_modeset(new_crtc_state);
14497
14498         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14499                 intel_fbc_disable(crtc);
14500         else if (new_plane_state)
14501                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14502
14503         /* Perform vblank evasion around commit operation */
14504         intel_pipe_update_start(new_crtc_state);
14505         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14506         skl_update_planes_on_crtc(state, crtc);
14507         intel_pipe_update_end(new_crtc_state);
14508
14509         /*
14510          * We usually enable FIFO underrun interrupts as part of the
14511          * CRTC enable sequence during modesets.  But when we inherit a
14512          * valid pipe configuration from the BIOS we need to take care
14513          * of enabling them on the CRTC's first fastset.
14514          */
14515         if (new_crtc_state->update_pipe && !modeset &&
14516             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14517                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14518 }
14519
14520 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
14521                                                struct intel_atomic_state *state,
14522                                                struct intel_crtc_state *old_crtc_state,
14523                                                struct intel_crtc_state *new_crtc_state)
14524 {
14525         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14526         struct intel_crtc_state *new_slave_crtc_state =
14527                 intel_atomic_get_new_crtc_state(state, slave_crtc);
14528         struct intel_crtc_state *old_slave_crtc_state =
14529                 intel_atomic_get_old_crtc_state(state, slave_crtc);
14530
14531         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14532                 !old_slave_crtc_state);
14533
14534         DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
14535                       crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
14536                       slave_crtc->base.name);
14537
14538         /* Enable seq for slave with with DP_TP_CTL left Idle until the
14539          * master is ready
14540          */
14541         intel_crtc_enable_trans_port_sync(slave_crtc,
14542                                           state,
14543                                           new_slave_crtc_state);
14544
14545         /* Enable seq for master with with DP_TP_CTL left Idle */
14546         intel_crtc_enable_trans_port_sync(crtc,
14547                                           state,
14548                                           new_crtc_state);
14549
14550         /* Set Slave's DP_TP_CTL to Normal */
14551         intel_set_dp_tp_ctl_normal(slave_crtc,
14552                                    state);
14553
14554         /* Set Master's DP_TP_CTL To Normal */
14555         usleep_range(200, 400);
14556         intel_set_dp_tp_ctl_normal(crtc,
14557                                    state);
14558
14559         /* Now do the post crtc enable for all master and slaves */
14560         intel_post_crtc_enable_updates(slave_crtc,
14561                                        state);
14562         intel_post_crtc_enable_updates(crtc,
14563                                        state);
14564 }
14565
14566 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
14567 {
14568         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14569         struct intel_crtc *crtc;
14570         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14571         unsigned int updated = 0;
14572         bool progress;
14573         int i;
14574         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
14575         u8 required_slices = state->wm_results.ddb.enabled_slices;
14576         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
14577
14578         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
14579                 /* ignore allocations for crtc's that have been turned off. */
14580                 if (!needs_modeset(new_crtc_state) && new_crtc_state->hw.active)
14581                         entries[i] = old_crtc_state->wm.skl.ddb;
14582
14583         /* If 2nd DBuf slice required, enable it here */
14584         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
14585                 icl_dbuf_slices_update(dev_priv, required_slices);
14586
14587         /*
14588          * Whenever the number of active pipes changes, we need to make sure we
14589          * update the pipes in the right order so that their ddb allocations
14590          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
14591          * cause pipe underruns and other bad stuff.
14592          */
14593         do {
14594                 progress = false;
14595
14596                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14597                         enum pipe pipe = crtc->pipe;
14598                         bool vbl_wait = false;
14599                         bool modeset = needs_modeset(new_crtc_state);
14600
14601                         if (updated & BIT(crtc->pipe) || !new_crtc_state->hw.active)
14602                                 continue;
14603
14604                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
14605                                                         entries,
14606                                                         INTEL_NUM_PIPES(dev_priv), i))
14607                                 continue;
14608
14609                         updated |= BIT(pipe);
14610                         entries[i] = new_crtc_state->wm.skl.ddb;
14611
14612                         /*
14613                          * If this is an already active pipe, it's DDB changed,
14614                          * and this isn't the last pipe that needs updating
14615                          * then we need to wait for a vblank to pass for the
14616                          * new ddb allocation to take effect.
14617                          */
14618                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
14619                                                  &old_crtc_state->wm.skl.ddb) &&
14620                             !modeset &&
14621                             state->wm_results.dirty_pipes != updated)
14622                                 vbl_wait = true;
14623
14624                         if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
14625                                 if (is_trans_port_sync_master(new_crtc_state))
14626                                         intel_update_trans_port_sync_crtcs(crtc,
14627                                                                            state,
14628                                                                            old_crtc_state,
14629                                                                            new_crtc_state);
14630                                 else
14631                                         continue;
14632                         } else {
14633                                 intel_update_crtc(crtc, state, old_crtc_state,
14634                                                   new_crtc_state);
14635                         }
14636
14637                         if (vbl_wait)
14638                                 intel_wait_for_vblank(dev_priv, pipe);
14639
14640                         progress = true;
14641                 }
14642         } while (progress);
14643
14644         /* If 2nd DBuf slice is no more required disable it */
14645         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
14646                 icl_dbuf_slices_update(dev_priv, required_slices);
14647 }
14648
14649 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14650 {
14651         struct intel_atomic_state *state, *next;
14652         struct llist_node *freed;
14653
14654         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14655         llist_for_each_entry_safe(state, next, freed, freed)
14656                 drm_atomic_state_put(&state->base);
14657 }
14658
14659 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14660 {
14661         struct drm_i915_private *dev_priv =
14662                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14663
14664         intel_atomic_helper_free_state(dev_priv);
14665 }
14666
14667 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
14668 {
14669         struct wait_queue_entry wait_fence, wait_reset;
14670         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
14671
14672         init_wait_entry(&wait_fence, 0);
14673         init_wait_entry(&wait_reset, 0);
14674         for (;;) {
14675                 prepare_to_wait(&intel_state->commit_ready.wait,
14676                                 &wait_fence, TASK_UNINTERRUPTIBLE);
14677                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14678                                               I915_RESET_MODESET),
14679                                 &wait_reset, TASK_UNINTERRUPTIBLE);
14680
14681
14682                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
14683                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
14684                         break;
14685
14686                 schedule();
14687         }
14688         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
14689         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14690                                   I915_RESET_MODESET),
14691                     &wait_reset);
14692 }
14693
14694 static void intel_atomic_cleanup_work(struct work_struct *work)
14695 {
14696         struct drm_atomic_state *state =
14697                 container_of(work, struct drm_atomic_state, commit_work);
14698         struct drm_i915_private *i915 = to_i915(state->dev);
14699
14700         drm_atomic_helper_cleanup_planes(&i915->drm, state);
14701         drm_atomic_helper_commit_cleanup_done(state);
14702         drm_atomic_state_put(state);
14703
14704         intel_atomic_helper_free_state(i915);
14705 }
14706
14707 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
14708 {
14709         struct drm_device *dev = state->base.dev;
14710         struct drm_i915_private *dev_priv = to_i915(dev);
14711         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14712         struct intel_crtc *crtc;
14713         u64 put_domains[I915_MAX_PIPES] = {};
14714         intel_wakeref_t wakeref = 0;
14715         int i;
14716
14717         intel_atomic_commit_fence_wait(state);
14718
14719         drm_atomic_helper_wait_for_dependencies(&state->base);
14720
14721         if (state->modeset)
14722                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
14723
14724         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14725                                             new_crtc_state, i) {
14726                 if (needs_modeset(new_crtc_state) ||
14727                     new_crtc_state->update_pipe) {
14728
14729                         put_domains[crtc->pipe] =
14730                                 modeset_get_crtc_power_domains(new_crtc_state);
14731                 }
14732         }
14733
14734         intel_commit_modeset_disables(state);
14735
14736         /* FIXME: Eventually get rid of our crtc->config pointer */
14737         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14738                 crtc->config = new_crtc_state;
14739
14740         if (state->modeset) {
14741                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
14742
14743                 intel_set_cdclk_pre_plane_update(dev_priv,
14744                                                  &state->cdclk.actual,
14745                                                  &dev_priv->cdclk.actual,
14746                                                  state->cdclk.pipe);
14747
14748                 /*
14749                  * SKL workaround: bspec recommends we disable the SAGV when we
14750                  * have more then one pipe enabled
14751                  */
14752                 if (!intel_can_enable_sagv(state))
14753                         intel_disable_sagv(dev_priv);
14754
14755                 intel_modeset_verify_disabled(dev_priv, state);
14756         }
14757
14758         /* Complete the events for pipes that have now been disabled */
14759         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14760                 bool modeset = needs_modeset(new_crtc_state);
14761
14762                 /* Complete events for now disable pipes here. */
14763                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
14764                         spin_lock_irq(&dev->event_lock);
14765                         drm_crtc_send_vblank_event(&crtc->base,
14766                                                    new_crtc_state->uapi.event);
14767                         spin_unlock_irq(&dev->event_lock);
14768
14769                         new_crtc_state->uapi.event = NULL;
14770                 }
14771         }
14772
14773         if (state->modeset)
14774                 intel_encoders_update_prepare(state);
14775
14776         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14777         dev_priv->display.commit_modeset_enables(state);
14778
14779         if (state->modeset) {
14780                 intel_encoders_update_complete(state);
14781
14782                 intel_set_cdclk_post_plane_update(dev_priv,
14783                                                   &state->cdclk.actual,
14784                                                   &dev_priv->cdclk.actual,
14785                                                   state->cdclk.pipe);
14786         }
14787
14788         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14789          * already, but still need the state for the delayed optimization. To
14790          * fix this:
14791          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14792          * - schedule that vblank worker _before_ calling hw_done
14793          * - at the start of commit_tail, cancel it _synchrously
14794          * - switch over to the vblank wait helper in the core after that since
14795          *   we don't need out special handling any more.
14796          */
14797         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14798
14799         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14800                 if (new_crtc_state->hw.active &&
14801                     !needs_modeset(new_crtc_state) &&
14802                     !new_crtc_state->preload_luts &&
14803                     (new_crtc_state->uapi.color_mgmt_changed ||
14804                      new_crtc_state->update_pipe))
14805                         intel_color_load_luts(new_crtc_state);
14806         }
14807
14808         /*
14809          * Now that the vblank has passed, we can go ahead and program the
14810          * optimal watermarks on platforms that need two-step watermark
14811          * programming.
14812          *
14813          * TODO: Move this (and other cleanup) to an async worker eventually.
14814          */
14815         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14816                                             new_crtc_state, i) {
14817                 /*
14818                  * Gen2 reports pipe underruns whenever all planes are disabled.
14819                  * So re-enable underrun reporting after some planes get enabled.
14820                  *
14821                  * We do this before .optimize_watermarks() so that we have a
14822                  * chance of catching underruns with the intermediate watermarks
14823                  * vs. the new plane configuration.
14824                  */
14825                 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
14826                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14827
14828                 if (dev_priv->display.optimize_watermarks)
14829                         dev_priv->display.optimize_watermarks(state, crtc);
14830         }
14831
14832         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14833                 intel_post_plane_update(state, crtc);
14834
14835                 if (put_domains[i])
14836                         modeset_put_power_domains(dev_priv, put_domains[i]);
14837
14838                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14839         }
14840
14841         /* Underruns don't always raise interrupts, so check manually */
14842         intel_check_cpu_fifo_underruns(dev_priv);
14843         intel_check_pch_fifo_underruns(dev_priv);
14844
14845         if (state->modeset)
14846                 intel_verify_planes(state);
14847
14848         if (state->modeset && intel_can_enable_sagv(state))
14849                 intel_enable_sagv(dev_priv);
14850
14851         drm_atomic_helper_commit_hw_done(&state->base);
14852
14853         if (state->modeset) {
14854                 /* As one of the primary mmio accessors, KMS has a high
14855                  * likelihood of triggering bugs in unclaimed access. After we
14856                  * finish modesetting, see if an error has been flagged, and if
14857                  * so enable debugging for the next modeset - and hope we catch
14858                  * the culprit.
14859                  */
14860                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
14861                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
14862         }
14863         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14864
14865         /*
14866          * Defer the cleanup of the old state to a separate worker to not
14867          * impede the current task (userspace for blocking modesets) that
14868          * are executed inline. For out-of-line asynchronous modesets/flips,
14869          * deferring to a new worker seems overkill, but we would place a
14870          * schedule point (cond_resched()) here anyway to keep latencies
14871          * down.
14872          */
14873         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
14874         queue_work(system_highpri_wq, &state->base.commit_work);
14875 }
14876
14877 static void intel_atomic_commit_work(struct work_struct *work)
14878 {
14879         struct intel_atomic_state *state =
14880                 container_of(work, struct intel_atomic_state, base.commit_work);
14881
14882         intel_atomic_commit_tail(state);
14883 }
14884
14885 static int __i915_sw_fence_call
14886 intel_atomic_commit_ready(struct i915_sw_fence *fence,
14887                           enum i915_sw_fence_notify notify)
14888 {
14889         struct intel_atomic_state *state =
14890                 container_of(fence, struct intel_atomic_state, commit_ready);
14891
14892         switch (notify) {
14893         case FENCE_COMPLETE:
14894                 /* we do blocking waits in the worker, nothing to do here */
14895                 break;
14896         case FENCE_FREE:
14897                 {
14898                         struct intel_atomic_helper *helper =
14899                                 &to_i915(state->base.dev)->atomic_helper;
14900
14901                         if (llist_add(&state->freed, &helper->free_list))
14902                                 schedule_work(&helper->free_work);
14903                         break;
14904                 }
14905         }
14906
14907         return NOTIFY_DONE;
14908 }
14909
14910 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
14911 {
14912         struct intel_plane_state *old_plane_state, *new_plane_state;
14913         struct intel_plane *plane;
14914         int i;
14915
14916         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14917                                              new_plane_state, i)
14918                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
14919                                         to_intel_frontbuffer(new_plane_state->hw.fb),
14920                                         plane->frontbuffer_bit);
14921 }
14922
14923 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
14924 {
14925         struct intel_crtc *crtc;
14926
14927         for_each_intel_crtc(&dev_priv->drm, crtc)
14928                 drm_modeset_lock_assert_held(&crtc->base.mutex);
14929 }
14930
14931 static int intel_atomic_commit(struct drm_device *dev,
14932                                struct drm_atomic_state *_state,
14933                                bool nonblock)
14934 {
14935         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14936         struct drm_i915_private *dev_priv = to_i915(dev);
14937         int ret = 0;
14938
14939         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
14940
14941         drm_atomic_state_get(&state->base);
14942         i915_sw_fence_init(&state->commit_ready,
14943                            intel_atomic_commit_ready);
14944
14945         /*
14946          * The intel_legacy_cursor_update() fast path takes care
14947          * of avoiding the vblank waits for simple cursor
14948          * movement and flips. For cursor on/off and size changes,
14949          * we want to perform the vblank waits so that watermark
14950          * updates happen during the correct frames. Gen9+ have
14951          * double buffered watermarks and so shouldn't need this.
14952          *
14953          * Unset state->legacy_cursor_update before the call to
14954          * drm_atomic_helper_setup_commit() because otherwise
14955          * drm_atomic_helper_wait_for_flip_done() is a noop and
14956          * we get FIFO underruns because we didn't wait
14957          * for vblank.
14958          *
14959          * FIXME doing watermarks and fb cleanup from a vblank worker
14960          * (assuming we had any) would solve these problems.
14961          */
14962         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
14963                 struct intel_crtc_state *new_crtc_state;
14964                 struct intel_crtc *crtc;
14965                 int i;
14966
14967                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14968                         if (new_crtc_state->wm.need_postvbl_update ||
14969                             new_crtc_state->update_wm_post)
14970                                 state->base.legacy_cursor_update = false;
14971         }
14972
14973         ret = intel_atomic_prepare_commit(state);
14974         if (ret) {
14975                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
14976                 i915_sw_fence_commit(&state->commit_ready);
14977                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14978                 return ret;
14979         }
14980
14981         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
14982         if (!ret)
14983                 ret = drm_atomic_helper_swap_state(&state->base, true);
14984
14985         if (ret) {
14986                 i915_sw_fence_commit(&state->commit_ready);
14987
14988                 drm_atomic_helper_cleanup_planes(dev, &state->base);
14989                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14990                 return ret;
14991         }
14992         dev_priv->wm.distrust_bios_wm = false;
14993         intel_shared_dpll_swap_state(state);
14994         intel_atomic_track_fbs(state);
14995
14996         if (state->global_state_changed) {
14997                 assert_global_state_locked(dev_priv);
14998
14999                 memcpy(dev_priv->min_cdclk, state->min_cdclk,
15000                        sizeof(state->min_cdclk));
15001                 memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
15002                        sizeof(state->min_voltage_level));
15003                 dev_priv->active_pipes = state->active_pipes;
15004                 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
15005
15006                 intel_cdclk_swap_state(state);
15007         }
15008
15009         drm_atomic_state_get(&state->base);
15010         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15011
15012         i915_sw_fence_commit(&state->commit_ready);
15013         if (nonblock && state->modeset) {
15014                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15015         } else if (nonblock) {
15016                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15017         } else {
15018                 if (state->modeset)
15019                         flush_workqueue(dev_priv->modeset_wq);
15020                 intel_atomic_commit_tail(state);
15021         }
15022
15023         return 0;
15024 }
15025
15026 struct wait_rps_boost {
15027         struct wait_queue_entry wait;
15028
15029         struct drm_crtc *crtc;
15030         struct i915_request *request;
15031 };
15032
15033 static int do_rps_boost(struct wait_queue_entry *_wait,
15034                         unsigned mode, int sync, void *key)
15035 {
15036         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15037         struct i915_request *rq = wait->request;
15038
15039         /*
15040          * If we missed the vblank, but the request is already running it
15041          * is reasonable to assume that it will complete before the next
15042          * vblank without our intervention, so leave RPS alone.
15043          */
15044         if (!i915_request_started(rq))
15045                 intel_rps_boost(rq);
15046         i915_request_put(rq);
15047
15048         drm_crtc_vblank_put(wait->crtc);
15049
15050         list_del(&wait->wait.entry);
15051         kfree(wait);
15052         return 1;
15053 }
15054
15055 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15056                                        struct dma_fence *fence)
15057 {
15058         struct wait_rps_boost *wait;
15059
15060         if (!dma_fence_is_i915(fence))
15061                 return;
15062
15063         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15064                 return;
15065
15066         if (drm_crtc_vblank_get(crtc))
15067                 return;
15068
15069         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15070         if (!wait) {
15071                 drm_crtc_vblank_put(crtc);
15072                 return;
15073         }
15074
15075         wait->request = to_request(dma_fence_get(fence));
15076         wait->crtc = crtc;
15077
15078         wait->wait.func = do_rps_boost;
15079         wait->wait.flags = 0;
15080
15081         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15082 }
15083
15084 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15085 {
15086         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15087         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15088         struct drm_framebuffer *fb = plane_state->hw.fb;
15089         struct i915_vma *vma;
15090
15091         if (plane->id == PLANE_CURSOR &&
15092             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15093                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15094                 const int align = intel_cursor_alignment(dev_priv);
15095                 int err;
15096
15097                 err = i915_gem_object_attach_phys(obj, align);
15098                 if (err)
15099                         return err;
15100         }
15101
15102         vma = intel_pin_and_fence_fb_obj(fb,
15103                                          &plane_state->view,
15104                                          intel_plane_uses_fence(plane_state),
15105                                          &plane_state->flags);
15106         if (IS_ERR(vma))
15107                 return PTR_ERR(vma);
15108
15109         plane_state->vma = vma;
15110
15111         return 0;
15112 }
15113
15114 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15115 {
15116         struct i915_vma *vma;
15117
15118         vma = fetch_and_zero(&old_plane_state->vma);
15119         if (vma)
15120                 intel_unpin_fb_vma(vma, old_plane_state->flags);
15121 }
15122
15123 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15124 {
15125         struct i915_sched_attr attr = {
15126                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15127         };
15128
15129         i915_gem_object_wait_priority(obj, 0, &attr);
15130 }
15131
15132 /**
15133  * intel_prepare_plane_fb - Prepare fb for usage on plane
15134  * @plane: drm plane to prepare for
15135  * @_new_plane_state: the plane state being prepared
15136  *
15137  * Prepares a framebuffer for usage on a display plane.  Generally this
15138  * involves pinning the underlying object and updating the frontbuffer tracking
15139  * bits.  Some older platforms need special physical address handling for
15140  * cursor planes.
15141  *
15142  * Returns 0 on success, negative error code on failure.
15143  */
15144 int
15145 intel_prepare_plane_fb(struct drm_plane *plane,
15146                        struct drm_plane_state *_new_plane_state)
15147 {
15148         struct intel_plane_state *new_plane_state =
15149                 to_intel_plane_state(_new_plane_state);
15150         struct intel_atomic_state *intel_state =
15151                 to_intel_atomic_state(new_plane_state->uapi.state);
15152         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15153         struct drm_framebuffer *fb = new_plane_state->hw.fb;
15154         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15155         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
15156         int ret;
15157
15158         if (old_obj) {
15159                 struct intel_crtc_state *crtc_state =
15160                         intel_atomic_get_new_crtc_state(intel_state,
15161                                                         to_intel_crtc(plane->state->crtc));
15162
15163                 /* Big Hammer, we also need to ensure that any pending
15164                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15165                  * current scanout is retired before unpinning the old
15166                  * framebuffer. Note that we rely on userspace rendering
15167                  * into the buffer attached to the pipe they are waiting
15168                  * on. If not, userspace generates a GPU hang with IPEHR
15169                  * point to the MI_WAIT_FOR_EVENT.
15170                  *
15171                  * This should only fail upon a hung GPU, in which case we
15172                  * can safely continue.
15173                  */
15174                 if (needs_modeset(crtc_state)) {
15175                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15176                                                               old_obj->base.resv, NULL,
15177                                                               false, 0,
15178                                                               GFP_KERNEL);
15179                         if (ret < 0)
15180                                 return ret;
15181                 }
15182         }
15183
15184         if (new_plane_state->uapi.fence) { /* explicit fencing */
15185                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
15186                                                     new_plane_state->uapi.fence,
15187                                                     I915_FENCE_TIMEOUT,
15188                                                     GFP_KERNEL);
15189                 if (ret < 0)
15190                         return ret;
15191         }
15192
15193         if (!obj)
15194                 return 0;
15195
15196         ret = i915_gem_object_pin_pages(obj);
15197         if (ret)
15198                 return ret;
15199
15200         ret = intel_plane_pin_fb(new_plane_state);
15201
15202         i915_gem_object_unpin_pages(obj);
15203         if (ret)
15204                 return ret;
15205
15206         fb_obj_bump_render_priority(obj);
15207         intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
15208
15209         if (!new_plane_state->uapi.fence) { /* implicit fencing */
15210                 struct dma_fence *fence;
15211
15212                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15213                                                       obj->base.resv, NULL,
15214                                                       false, I915_FENCE_TIMEOUT,
15215                                                       GFP_KERNEL);
15216                 if (ret < 0)
15217                         return ret;
15218
15219                 fence = dma_resv_get_excl_rcu(obj->base.resv);
15220                 if (fence) {
15221                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15222                                                    fence);
15223                         dma_fence_put(fence);
15224                 }
15225         } else {
15226                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15227                                            new_plane_state->uapi.fence);
15228         }
15229
15230         /*
15231          * We declare pageflips to be interactive and so merit a small bias
15232          * towards upclocking to deliver the frame on time. By only changing
15233          * the RPS thresholds to sample more regularly and aim for higher
15234          * clocks we can hopefully deliver low power workloads (like kodi)
15235          * that are not quite steady state without resorting to forcing
15236          * maximum clocks following a vblank miss (see do_rps_boost()).
15237          */
15238         if (!intel_state->rps_interactive) {
15239                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15240                 intel_state->rps_interactive = true;
15241         }
15242
15243         return 0;
15244 }
15245
15246 /**
15247  * intel_cleanup_plane_fb - Cleans up an fb after plane use
15248  * @plane: drm plane to clean up for
15249  * @_old_plane_state: the state from the previous modeset
15250  *
15251  * Cleans up a framebuffer that has just been removed from a plane.
15252  */
15253 void
15254 intel_cleanup_plane_fb(struct drm_plane *plane,
15255                        struct drm_plane_state *_old_plane_state)
15256 {
15257         struct intel_plane_state *old_plane_state =
15258                 to_intel_plane_state(_old_plane_state);
15259         struct intel_atomic_state *intel_state =
15260                 to_intel_atomic_state(old_plane_state->uapi.state);
15261         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15262
15263         if (intel_state->rps_interactive) {
15264                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15265                 intel_state->rps_interactive = false;
15266         }
15267
15268         /* Should only be called after a successful intel_prepare_plane_fb()! */
15269         intel_plane_unpin_fb(old_plane_state);
15270 }
15271
15272 /**
15273  * intel_plane_destroy - destroy a plane
15274  * @plane: plane to destroy
15275  *
15276  * Common destruction function for all types of planes (primary, cursor,
15277  * sprite).
15278  */
15279 void intel_plane_destroy(struct drm_plane *plane)
15280 {
15281         drm_plane_cleanup(plane);
15282         kfree(to_intel_plane(plane));
15283 }
15284
15285 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15286                                             u32 format, u64 modifier)
15287 {
15288         switch (modifier) {
15289         case DRM_FORMAT_MOD_LINEAR:
15290         case I915_FORMAT_MOD_X_TILED:
15291                 break;
15292         default:
15293                 return false;
15294         }
15295
15296         switch (format) {
15297         case DRM_FORMAT_C8:
15298         case DRM_FORMAT_RGB565:
15299         case DRM_FORMAT_XRGB1555:
15300         case DRM_FORMAT_XRGB8888:
15301                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15302                         modifier == I915_FORMAT_MOD_X_TILED;
15303         default:
15304                 return false;
15305         }
15306 }
15307
15308 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
15309                                             u32 format, u64 modifier)
15310 {
15311         switch (modifier) {
15312         case DRM_FORMAT_MOD_LINEAR:
15313         case I915_FORMAT_MOD_X_TILED:
15314                 break;
15315         default:
15316                 return false;
15317         }
15318
15319         switch (format) {
15320         case DRM_FORMAT_C8:
15321         case DRM_FORMAT_RGB565:
15322         case DRM_FORMAT_XRGB8888:
15323         case DRM_FORMAT_XBGR8888:
15324         case DRM_FORMAT_ARGB8888:
15325         case DRM_FORMAT_ABGR8888:
15326         case DRM_FORMAT_XRGB2101010:
15327         case DRM_FORMAT_XBGR2101010:
15328         case DRM_FORMAT_ARGB2101010:
15329         case DRM_FORMAT_ABGR2101010:
15330         case DRM_FORMAT_XBGR16161616F:
15331                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15332                         modifier == I915_FORMAT_MOD_X_TILED;
15333         default:
15334                 return false;
15335         }
15336 }
15337
15338 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
15339                                               u32 format, u64 modifier)
15340 {
15341         return modifier == DRM_FORMAT_MOD_LINEAR &&
15342                 format == DRM_FORMAT_ARGB8888;
15343 }
15344
15345 static const struct drm_plane_funcs i965_plane_funcs = {
15346         .update_plane = drm_atomic_helper_update_plane,
15347         .disable_plane = drm_atomic_helper_disable_plane,
15348         .destroy = intel_plane_destroy,
15349         .atomic_duplicate_state = intel_plane_duplicate_state,
15350         .atomic_destroy_state = intel_plane_destroy_state,
15351         .format_mod_supported = i965_plane_format_mod_supported,
15352 };
15353
15354 static const struct drm_plane_funcs i8xx_plane_funcs = {
15355         .update_plane = drm_atomic_helper_update_plane,
15356         .disable_plane = drm_atomic_helper_disable_plane,
15357         .destroy = intel_plane_destroy,
15358         .atomic_duplicate_state = intel_plane_duplicate_state,
15359         .atomic_destroy_state = intel_plane_destroy_state,
15360         .format_mod_supported = i8xx_plane_format_mod_supported,
15361 };
15362
15363 static int
15364 intel_legacy_cursor_update(struct drm_plane *_plane,
15365                            struct drm_crtc *_crtc,
15366                            struct drm_framebuffer *fb,
15367                            int crtc_x, int crtc_y,
15368                            unsigned int crtc_w, unsigned int crtc_h,
15369                            u32 src_x, u32 src_y,
15370                            u32 src_w, u32 src_h,
15371                            struct drm_modeset_acquire_ctx *ctx)
15372 {
15373         struct intel_plane *plane = to_intel_plane(_plane);
15374         struct intel_crtc *crtc = to_intel_crtc(_crtc);
15375         struct intel_plane_state *old_plane_state =
15376                 to_intel_plane_state(plane->base.state);
15377         struct intel_plane_state *new_plane_state;
15378         struct intel_crtc_state *crtc_state =
15379                 to_intel_crtc_state(crtc->base.state);
15380         struct intel_crtc_state *new_crtc_state;
15381         int ret;
15382
15383         /*
15384          * When crtc is inactive or there is a modeset pending,
15385          * wait for it to complete in the slowpath
15386          */
15387         if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
15388             crtc_state->update_pipe)
15389                 goto slow;
15390
15391         /*
15392          * Don't do an async update if there is an outstanding commit modifying
15393          * the plane.  This prevents our async update's changes from getting
15394          * overridden by a previous synchronous update's state.
15395          */
15396         if (old_plane_state->uapi.commit &&
15397             !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
15398                 goto slow;
15399
15400         /*
15401          * If any parameters change that may affect watermarks,
15402          * take the slowpath. Only changing fb or position should be
15403          * in the fastpath.
15404          */
15405         if (old_plane_state->uapi.crtc != &crtc->base ||
15406             old_plane_state->uapi.src_w != src_w ||
15407             old_plane_state->uapi.src_h != src_h ||
15408             old_plane_state->uapi.crtc_w != crtc_w ||
15409             old_plane_state->uapi.crtc_h != crtc_h ||
15410             !old_plane_state->uapi.fb != !fb)
15411                 goto slow;
15412
15413         new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
15414         if (!new_plane_state)
15415                 return -ENOMEM;
15416
15417         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
15418         if (!new_crtc_state) {
15419                 ret = -ENOMEM;
15420                 goto out_free;
15421         }
15422
15423         drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
15424
15425         new_plane_state->uapi.src_x = src_x;
15426         new_plane_state->uapi.src_y = src_y;
15427         new_plane_state->uapi.src_w = src_w;
15428         new_plane_state->uapi.src_h = src_h;
15429         new_plane_state->uapi.crtc_x = crtc_x;
15430         new_plane_state->uapi.crtc_y = crtc_y;
15431         new_plane_state->uapi.crtc_w = crtc_w;
15432         new_plane_state->uapi.crtc_h = crtc_h;
15433
15434         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
15435                                                   old_plane_state, new_plane_state);
15436         if (ret)
15437                 goto out_free;
15438
15439         ret = intel_plane_pin_fb(new_plane_state);
15440         if (ret)
15441                 goto out_free;
15442
15443         intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
15444                                 ORIGIN_FLIP);
15445         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15446                                 to_intel_frontbuffer(new_plane_state->hw.fb),
15447                                 plane->frontbuffer_bit);
15448
15449         /* Swap plane state */
15450         plane->base.state = &new_plane_state->uapi;
15451
15452         /*
15453          * We cannot swap crtc_state as it may be in use by an atomic commit or
15454          * page flip that's running simultaneously. If we swap crtc_state and
15455          * destroy the old state, we will cause a use-after-free there.
15456          *
15457          * Only update active_planes, which is needed for our internal
15458          * bookkeeping. Either value will do the right thing when updating
15459          * planes atomically. If the cursor was part of the atomic update then
15460          * we would have taken the slowpath.
15461          */
15462         crtc_state->active_planes = new_crtc_state->active_planes;
15463
15464         if (new_plane_state->uapi.visible)
15465                 intel_update_plane(plane, crtc_state, new_plane_state);
15466         else
15467                 intel_disable_plane(plane, crtc_state);
15468
15469         intel_plane_unpin_fb(old_plane_state);
15470
15471 out_free:
15472         if (new_crtc_state)
15473                 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
15474         if (ret)
15475                 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
15476         else
15477                 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
15478         return ret;
15479
15480 slow:
15481         return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
15482                                               crtc_x, crtc_y, crtc_w, crtc_h,
15483                                               src_x, src_y, src_w, src_h, ctx);
15484 }
15485
15486 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
15487         .update_plane = intel_legacy_cursor_update,
15488         .disable_plane = drm_atomic_helper_disable_plane,
15489         .destroy = intel_plane_destroy,
15490         .atomic_duplicate_state = intel_plane_duplicate_state,
15491         .atomic_destroy_state = intel_plane_destroy_state,
15492         .format_mod_supported = intel_cursor_format_mod_supported,
15493 };
15494
15495 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
15496                                enum i9xx_plane_id i9xx_plane)
15497 {
15498         if (!HAS_FBC(dev_priv))
15499                 return false;
15500
15501         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15502                 return i9xx_plane == PLANE_A; /* tied to pipe A */
15503         else if (IS_IVYBRIDGE(dev_priv))
15504                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
15505                         i9xx_plane == PLANE_C;
15506         else if (INTEL_GEN(dev_priv) >= 4)
15507                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
15508         else
15509                 return i9xx_plane == PLANE_A;
15510 }
15511
15512 static struct intel_plane *
15513 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
15514 {
15515         struct intel_plane *plane;
15516         const struct drm_plane_funcs *plane_funcs;
15517         unsigned int supported_rotations;
15518         unsigned int possible_crtcs;
15519         const u32 *formats;
15520         int num_formats;
15521         int ret, zpos;
15522
15523         if (INTEL_GEN(dev_priv) >= 9)
15524                 return skl_universal_plane_create(dev_priv, pipe,
15525                                                   PLANE_PRIMARY);
15526
15527         plane = intel_plane_alloc();
15528         if (IS_ERR(plane))
15529                 return plane;
15530
15531         plane->pipe = pipe;
15532         /*
15533          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
15534          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
15535          */
15536         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
15537                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
15538         else
15539                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
15540         plane->id = PLANE_PRIMARY;
15541         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
15542
15543         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
15544         if (plane->has_fbc) {
15545                 struct intel_fbc *fbc = &dev_priv->fbc;
15546
15547                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
15548         }
15549
15550         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15551                 formats = vlv_primary_formats;
15552                 num_formats = ARRAY_SIZE(vlv_primary_formats);
15553         } else if (INTEL_GEN(dev_priv) >= 4) {
15554                 /*
15555                  * WaFP16GammaEnabling:ivb
15556                  * "Workaround : When using the 64-bit format, the plane
15557                  *  output on each color channel has one quarter amplitude.
15558                  *  It can be brought up to full amplitude by using pipe
15559                  *  gamma correction or pipe color space conversion to
15560                  *  multiply the plane output by four."
15561                  *
15562                  * There is no dedicated plane gamma for the primary plane,
15563                  * and using the pipe gamma/csc could conflict with other
15564                  * planes, so we choose not to expose fp16 on IVB primary
15565                  * planes. HSW primary planes no longer have this problem.
15566                  */
15567                 if (IS_IVYBRIDGE(dev_priv)) {
15568                         formats = ivb_primary_formats;
15569                         num_formats = ARRAY_SIZE(ivb_primary_formats);
15570                 } else {
15571                         formats = i965_primary_formats;
15572                         num_formats = ARRAY_SIZE(i965_primary_formats);
15573                 }
15574         } else {
15575                 formats = i8xx_primary_formats;
15576                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
15577         }
15578
15579         if (INTEL_GEN(dev_priv) >= 4)
15580                 plane_funcs = &i965_plane_funcs;
15581         else
15582                 plane_funcs = &i8xx_plane_funcs;
15583
15584         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15585                 plane->min_cdclk = vlv_plane_min_cdclk;
15586         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15587                 plane->min_cdclk = hsw_plane_min_cdclk;
15588         else if (IS_IVYBRIDGE(dev_priv))
15589                 plane->min_cdclk = ivb_plane_min_cdclk;
15590         else
15591                 plane->min_cdclk = i9xx_plane_min_cdclk;
15592
15593         plane->max_stride = i9xx_plane_max_stride;
15594         plane->update_plane = i9xx_update_plane;
15595         plane->disable_plane = i9xx_disable_plane;
15596         plane->get_hw_state = i9xx_plane_get_hw_state;
15597         plane->check_plane = i9xx_plane_check;
15598
15599         possible_crtcs = BIT(pipe);
15600
15601         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
15602                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15603                                                possible_crtcs, plane_funcs,
15604                                                formats, num_formats,
15605                                                i9xx_format_modifiers,
15606                                                DRM_PLANE_TYPE_PRIMARY,
15607                                                "primary %c", pipe_name(pipe));
15608         else
15609                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15610                                                possible_crtcs, plane_funcs,
15611                                                formats, num_formats,
15612                                                i9xx_format_modifiers,
15613                                                DRM_PLANE_TYPE_PRIMARY,
15614                                                "plane %c",
15615                                                plane_name(plane->i9xx_plane));
15616         if (ret)
15617                 goto fail;
15618
15619         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
15620                 supported_rotations =
15621                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
15622                         DRM_MODE_REFLECT_X;
15623         } else if (INTEL_GEN(dev_priv) >= 4) {
15624                 supported_rotations =
15625                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
15626         } else {
15627                 supported_rotations = DRM_MODE_ROTATE_0;
15628         }
15629
15630         if (INTEL_GEN(dev_priv) >= 4)
15631                 drm_plane_create_rotation_property(&plane->base,
15632                                                    DRM_MODE_ROTATE_0,
15633                                                    supported_rotations);
15634
15635         zpos = 0;
15636         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
15637
15638         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
15639
15640         return plane;
15641
15642 fail:
15643         intel_plane_free(plane);
15644
15645         return ERR_PTR(ret);
15646 }
15647
15648 static struct intel_plane *
15649 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
15650                           enum pipe pipe)
15651 {
15652         unsigned int possible_crtcs;
15653         struct intel_plane *cursor;
15654         int ret, zpos;
15655
15656         cursor = intel_plane_alloc();
15657         if (IS_ERR(cursor))
15658                 return cursor;
15659
15660         cursor->pipe = pipe;
15661         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
15662         cursor->id = PLANE_CURSOR;
15663         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
15664
15665         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15666                 cursor->max_stride = i845_cursor_max_stride;
15667                 cursor->update_plane = i845_update_cursor;
15668                 cursor->disable_plane = i845_disable_cursor;
15669                 cursor->get_hw_state = i845_cursor_get_hw_state;
15670                 cursor->check_plane = i845_check_cursor;
15671         } else {
15672                 cursor->max_stride = i9xx_cursor_max_stride;
15673                 cursor->update_plane = i9xx_update_cursor;
15674                 cursor->disable_plane = i9xx_disable_cursor;
15675                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
15676                 cursor->check_plane = i9xx_check_cursor;
15677         }
15678
15679         cursor->cursor.base = ~0;
15680         cursor->cursor.cntl = ~0;
15681
15682         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
15683                 cursor->cursor.size = ~0;
15684
15685         possible_crtcs = BIT(pipe);
15686
15687         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
15688                                        possible_crtcs, &intel_cursor_plane_funcs,
15689                                        intel_cursor_formats,
15690                                        ARRAY_SIZE(intel_cursor_formats),
15691                                        cursor_format_modifiers,
15692                                        DRM_PLANE_TYPE_CURSOR,
15693                                        "cursor %c", pipe_name(pipe));
15694         if (ret)
15695                 goto fail;
15696
15697         if (INTEL_GEN(dev_priv) >= 4)
15698                 drm_plane_create_rotation_property(&cursor->base,
15699                                                    DRM_MODE_ROTATE_0,
15700                                                    DRM_MODE_ROTATE_0 |
15701                                                    DRM_MODE_ROTATE_180);
15702
15703         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
15704         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
15705
15706         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15707
15708         return cursor;
15709
15710 fail:
15711         intel_plane_free(cursor);
15712
15713         return ERR_PTR(ret);
15714 }
15715
15716 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
15717                                     struct intel_crtc_state *crtc_state)
15718 {
15719         struct intel_crtc_scaler_state *scaler_state =
15720                 &crtc_state->scaler_state;
15721         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15722         int i;
15723
15724         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
15725         if (!crtc->num_scalers)
15726                 return;
15727
15728         for (i = 0; i < crtc->num_scalers; i++) {
15729                 struct intel_scaler *scaler = &scaler_state->scalers[i];
15730
15731                 scaler->in_use = 0;
15732                 scaler->mode = 0;
15733         }
15734
15735         scaler_state->scaler_id = -1;
15736 }
15737
15738 #define INTEL_CRTC_FUNCS \
15739         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
15740         .set_config = drm_atomic_helper_set_config, \
15741         .destroy = intel_crtc_destroy, \
15742         .page_flip = drm_atomic_helper_page_flip, \
15743         .atomic_duplicate_state = intel_crtc_duplicate_state, \
15744         .atomic_destroy_state = intel_crtc_destroy_state, \
15745         .set_crc_source = intel_crtc_set_crc_source, \
15746         .verify_crc_source = intel_crtc_verify_crc_source, \
15747         .get_crc_sources = intel_crtc_get_crc_sources
15748
15749 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15750         INTEL_CRTC_FUNCS,
15751
15752         .get_vblank_counter = g4x_get_vblank_counter,
15753         .enable_vblank = bdw_enable_vblank,
15754         .disable_vblank = bdw_disable_vblank,
15755 };
15756
15757 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15758         INTEL_CRTC_FUNCS,
15759
15760         .get_vblank_counter = g4x_get_vblank_counter,
15761         .enable_vblank = ilk_enable_vblank,
15762         .disable_vblank = ilk_disable_vblank,
15763 };
15764
15765 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15766         INTEL_CRTC_FUNCS,
15767
15768         .get_vblank_counter = g4x_get_vblank_counter,
15769         .enable_vblank = i965_enable_vblank,
15770         .disable_vblank = i965_disable_vblank,
15771 };
15772
15773 static const struct drm_crtc_funcs i965_crtc_funcs = {
15774         INTEL_CRTC_FUNCS,
15775
15776         .get_vblank_counter = i915_get_vblank_counter,
15777         .enable_vblank = i965_enable_vblank,
15778         .disable_vblank = i965_disable_vblank,
15779 };
15780
15781 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15782         INTEL_CRTC_FUNCS,
15783
15784         .get_vblank_counter = i915_get_vblank_counter,
15785         .enable_vblank = i915gm_enable_vblank,
15786         .disable_vblank = i915gm_disable_vblank,
15787 };
15788
15789 static const struct drm_crtc_funcs i915_crtc_funcs = {
15790         INTEL_CRTC_FUNCS,
15791
15792         .get_vblank_counter = i915_get_vblank_counter,
15793         .enable_vblank = i8xx_enable_vblank,
15794         .disable_vblank = i8xx_disable_vblank,
15795 };
15796
15797 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15798         INTEL_CRTC_FUNCS,
15799
15800         /* no hw vblank counter */
15801         .enable_vblank = i8xx_enable_vblank,
15802         .disable_vblank = i8xx_disable_vblank,
15803 };
15804
15805 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15806 {
15807         const struct drm_crtc_funcs *funcs;
15808         struct intel_crtc *intel_crtc;
15809         struct intel_crtc_state *crtc_state = NULL;
15810         struct intel_plane *primary = NULL;
15811         struct intel_plane *cursor = NULL;
15812         int sprite, ret;
15813
15814         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
15815         if (!intel_crtc)
15816                 return -ENOMEM;
15817
15818         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
15819         if (!crtc_state) {
15820                 ret = -ENOMEM;
15821                 goto fail;
15822         }
15823         __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->uapi);
15824         intel_crtc->config = crtc_state;
15825
15826         primary = intel_primary_plane_create(dev_priv, pipe);
15827         if (IS_ERR(primary)) {
15828                 ret = PTR_ERR(primary);
15829                 goto fail;
15830         }
15831         intel_crtc->plane_ids_mask |= BIT(primary->id);
15832
15833         for_each_sprite(dev_priv, pipe, sprite) {
15834                 struct intel_plane *plane;
15835
15836                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15837                 if (IS_ERR(plane)) {
15838                         ret = PTR_ERR(plane);
15839                         goto fail;
15840                 }
15841                 intel_crtc->plane_ids_mask |= BIT(plane->id);
15842         }
15843
15844         cursor = intel_cursor_plane_create(dev_priv, pipe);
15845         if (IS_ERR(cursor)) {
15846                 ret = PTR_ERR(cursor);
15847                 goto fail;
15848         }
15849         intel_crtc->plane_ids_mask |= BIT(cursor->id);
15850
15851         if (HAS_GMCH(dev_priv)) {
15852                 if (IS_CHERRYVIEW(dev_priv) ||
15853                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15854                         funcs = &g4x_crtc_funcs;
15855                 else if (IS_GEN(dev_priv, 4))
15856                         funcs = &i965_crtc_funcs;
15857                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
15858                         funcs = &i915gm_crtc_funcs;
15859                 else if (IS_GEN(dev_priv, 3))
15860                         funcs = &i915_crtc_funcs;
15861                 else
15862                         funcs = &i8xx_crtc_funcs;
15863         } else {
15864                 if (INTEL_GEN(dev_priv) >= 8)
15865                         funcs = &bdw_crtc_funcs;
15866                 else
15867                         funcs = &ilk_crtc_funcs;
15868         }
15869
15870         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
15871                                         &primary->base, &cursor->base,
15872                                         funcs, "pipe %c", pipe_name(pipe));
15873         if (ret)
15874                 goto fail;
15875
15876         intel_crtc->pipe = pipe;
15877
15878         /* initialize shared scalers */
15879         intel_crtc_init_scalers(intel_crtc, crtc_state);
15880
15881         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15882                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15883         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
15884
15885         if (INTEL_GEN(dev_priv) < 9) {
15886                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15887
15888                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15889                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15890                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
15891         }
15892
15893         intel_color_init(intel_crtc);
15894
15895         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15896
15897         return 0;
15898
15899 fail:
15900         /*
15901          * drm_mode_config_cleanup() will free up any
15902          * crtcs/planes already initialized.
15903          */
15904         kfree(crtc_state);
15905         kfree(intel_crtc);
15906
15907         return ret;
15908 }
15909
15910 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15911                                       struct drm_file *file)
15912 {
15913         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15914         struct drm_crtc *drmmode_crtc;
15915         struct intel_crtc *crtc;
15916
15917         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15918         if (!drmmode_crtc)
15919                 return -ENOENT;
15920
15921         crtc = to_intel_crtc(drmmode_crtc);
15922         pipe_from_crtc_id->pipe = crtc->pipe;
15923
15924         return 0;
15925 }
15926
15927 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
15928 {
15929         struct drm_device *dev = encoder->base.dev;
15930         struct intel_encoder *source_encoder;
15931         u32 possible_clones = 0;
15932
15933         for_each_intel_encoder(dev, source_encoder) {
15934                 if (encoders_cloneable(encoder, source_encoder))
15935                         possible_clones |= drm_encoder_mask(&source_encoder->base);
15936         }
15937
15938         return possible_clones;
15939 }
15940
15941 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
15942 {
15943         struct drm_device *dev = encoder->base.dev;
15944         struct intel_crtc *crtc;
15945         u32 possible_crtcs = 0;
15946
15947         for_each_intel_crtc(dev, crtc) {
15948                 if (encoder->pipe_mask & BIT(crtc->pipe))
15949                         possible_crtcs |= drm_crtc_mask(&crtc->base);
15950         }
15951
15952         return possible_crtcs;
15953 }
15954
15955 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
15956 {
15957         if (!IS_MOBILE(dev_priv))
15958                 return false;
15959
15960         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
15961                 return false;
15962
15963         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
15964                 return false;
15965
15966         return true;
15967 }
15968
15969 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
15970 {
15971         if (INTEL_GEN(dev_priv) >= 9)
15972                 return false;
15973
15974         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
15975                 return false;
15976
15977         if (HAS_PCH_LPT_H(dev_priv) &&
15978             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
15979                 return false;
15980
15981         /* DDI E can't be used if DDI A requires 4 lanes */
15982         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
15983                 return false;
15984
15985         if (!dev_priv->vbt.int_crt_support)
15986                 return false;
15987
15988         return true;
15989 }
15990
15991 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
15992 {
15993         int pps_num;
15994         int pps_idx;
15995
15996         if (HAS_DDI(dev_priv))
15997                 return;
15998         /*
15999          * This w/a is needed at least on CPT/PPT, but to be sure apply it
16000          * everywhere where registers can be write protected.
16001          */
16002         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16003                 pps_num = 2;
16004         else
16005                 pps_num = 1;
16006
16007         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16008                 u32 val = I915_READ(PP_CONTROL(pps_idx));
16009
16010                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16011                 I915_WRITE(PP_CONTROL(pps_idx), val);
16012         }
16013 }
16014
16015 static void intel_pps_init(struct drm_i915_private *dev_priv)
16016 {
16017         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16018                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16019         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16020                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16021         else
16022                 dev_priv->pps_mmio_base = PPS_BASE;
16023
16024         intel_pps_unlock_regs_wa(dev_priv);
16025 }
16026
16027 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16028 {
16029         struct intel_encoder *encoder;
16030         bool dpd_is_edp = false;
16031
16032         intel_pps_init(dev_priv);
16033
16034         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16035                 return;
16036
16037         if (INTEL_GEN(dev_priv) >= 12) {
16038                 intel_ddi_init(dev_priv, PORT_A);
16039                 intel_ddi_init(dev_priv, PORT_B);
16040                 intel_ddi_init(dev_priv, PORT_D);
16041                 intel_ddi_init(dev_priv, PORT_E);
16042                 intel_ddi_init(dev_priv, PORT_F);
16043                 intel_ddi_init(dev_priv, PORT_G);
16044                 intel_ddi_init(dev_priv, PORT_H);
16045                 intel_ddi_init(dev_priv, PORT_I);
16046                 icl_dsi_init(dev_priv);
16047         } else if (IS_ELKHARTLAKE(dev_priv)) {
16048                 intel_ddi_init(dev_priv, PORT_A);
16049                 intel_ddi_init(dev_priv, PORT_B);
16050                 intel_ddi_init(dev_priv, PORT_C);
16051                 intel_ddi_init(dev_priv, PORT_D);
16052                 icl_dsi_init(dev_priv);
16053         } else if (IS_GEN(dev_priv, 11)) {
16054                 intel_ddi_init(dev_priv, PORT_A);
16055                 intel_ddi_init(dev_priv, PORT_B);
16056                 intel_ddi_init(dev_priv, PORT_C);
16057                 intel_ddi_init(dev_priv, PORT_D);
16058                 intel_ddi_init(dev_priv, PORT_E);
16059                 /*
16060                  * On some ICL SKUs port F is not present. No strap bits for
16061                  * this, so rely on VBT.
16062                  * Work around broken VBTs on SKUs known to have no port F.
16063                  */
16064                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16065                     intel_bios_is_port_present(dev_priv, PORT_F))
16066                         intel_ddi_init(dev_priv, PORT_F);
16067
16068                 icl_dsi_init(dev_priv);
16069         } else if (IS_GEN9_LP(dev_priv)) {
16070                 /*
16071                  * FIXME: Broxton doesn't support port detection via the
16072                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16073                  * detect the ports.
16074                  */
16075                 intel_ddi_init(dev_priv, PORT_A);
16076                 intel_ddi_init(dev_priv, PORT_B);
16077                 intel_ddi_init(dev_priv, PORT_C);
16078
16079                 vlv_dsi_init(dev_priv);
16080         } else if (HAS_DDI(dev_priv)) {
16081                 int found;
16082
16083                 if (intel_ddi_crt_present(dev_priv))
16084                         intel_crt_init(dev_priv);
16085
16086                 /*
16087                  * Haswell uses DDI functions to detect digital outputs.
16088                  * On SKL pre-D0 the strap isn't connected, so we assume
16089                  * it's there.
16090                  */
16091                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16092                 /* WaIgnoreDDIAStrap: skl */
16093                 if (found || IS_GEN9_BC(dev_priv))
16094                         intel_ddi_init(dev_priv, PORT_A);
16095
16096                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16097                  * register */
16098                 found = I915_READ(SFUSE_STRAP);
16099
16100                 if (found & SFUSE_STRAP_DDIB_DETECTED)
16101                         intel_ddi_init(dev_priv, PORT_B);
16102                 if (found & SFUSE_STRAP_DDIC_DETECTED)
16103                         intel_ddi_init(dev_priv, PORT_C);
16104                 if (found & SFUSE_STRAP_DDID_DETECTED)
16105                         intel_ddi_init(dev_priv, PORT_D);
16106                 if (found & SFUSE_STRAP_DDIF_DETECTED)
16107                         intel_ddi_init(dev_priv, PORT_F);
16108                 /*
16109                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16110                  */
16111                 if (IS_GEN9_BC(dev_priv) &&
16112                     intel_bios_is_port_present(dev_priv, PORT_E))
16113                         intel_ddi_init(dev_priv, PORT_E);
16114
16115         } else if (HAS_PCH_SPLIT(dev_priv)) {
16116                 int found;
16117
16118                 /*
16119                  * intel_edp_init_connector() depends on this completing first,
16120                  * to prevent the registration of both eDP and LVDS and the
16121                  * incorrect sharing of the PPS.
16122                  */
16123                 intel_lvds_init(dev_priv);
16124                 intel_crt_init(dev_priv);
16125
16126                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16127
16128                 if (ilk_has_edp_a(dev_priv))
16129                         intel_dp_init(dev_priv, DP_A, PORT_A);
16130
16131                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
16132                         /* PCH SDVOB multiplex with HDMIB */
16133                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16134                         if (!found)
16135                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16136                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
16137                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16138                 }
16139
16140                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
16141                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16142
16143                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
16144                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16145
16146                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
16147                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16148
16149                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
16150                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16151         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16152                 bool has_edp, has_port;
16153
16154                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16155                         intel_crt_init(dev_priv);
16156
16157                 /*
16158                  * The DP_DETECTED bit is the latched state of the DDC
16159                  * SDA pin at boot. However since eDP doesn't require DDC
16160                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
16161                  * eDP ports may have been muxed to an alternate function.
16162                  * Thus we can't rely on the DP_DETECTED bit alone to detect
16163                  * eDP ports. Consult the VBT as well as DP_DETECTED to
16164                  * detect eDP ports.
16165                  *
16166                  * Sadly the straps seem to be missing sometimes even for HDMI
16167                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16168                  * and VBT for the presence of the port. Additionally we can't
16169                  * trust the port type the VBT declares as we've seen at least
16170                  * HDMI ports that the VBT claim are DP or eDP.
16171                  */
16172                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16173                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16174                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
16175                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16176                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16177                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16178
16179                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16180                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16181                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
16182                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16183                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16184                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16185
16186                 if (IS_CHERRYVIEW(dev_priv)) {
16187                         /*
16188                          * eDP not supported on port D,
16189                          * so no need to worry about it
16190                          */
16191                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16192                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
16193                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16194                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
16195                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16196                 }
16197
16198                 vlv_dsi_init(dev_priv);
16199         } else if (IS_PINEVIEW(dev_priv)) {
16200                 intel_lvds_init(dev_priv);
16201                 intel_crt_init(dev_priv);
16202         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16203                 bool found = false;
16204
16205                 if (IS_MOBILE(dev_priv))
16206                         intel_lvds_init(dev_priv);
16207
16208                 intel_crt_init(dev_priv);
16209
16210                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16211                         DRM_DEBUG_KMS("probing SDVOB\n");
16212                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16213                         if (!found && IS_G4X(dev_priv)) {
16214                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
16215                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16216                         }
16217
16218                         if (!found && IS_G4X(dev_priv))
16219                                 intel_dp_init(dev_priv, DP_B, PORT_B);
16220                 }
16221
16222                 /* Before G4X SDVOC doesn't have its own detect register */
16223
16224                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16225                         DRM_DEBUG_KMS("probing SDVOC\n");
16226                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16227                 }
16228
16229                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
16230
16231                         if (IS_G4X(dev_priv)) {
16232                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
16233                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16234                         }
16235                         if (IS_G4X(dev_priv))
16236                                 intel_dp_init(dev_priv, DP_C, PORT_C);
16237                 }
16238
16239                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
16240                         intel_dp_init(dev_priv, DP_D, PORT_D);
16241
16242                 if (SUPPORTS_TV(dev_priv))
16243                         intel_tv_init(dev_priv);
16244         } else if (IS_GEN(dev_priv, 2)) {
16245                 if (IS_I85X(dev_priv))
16246                         intel_lvds_init(dev_priv);
16247
16248                 intel_crt_init(dev_priv);
16249                 intel_dvo_init(dev_priv);
16250         }
16251
16252         intel_psr_init(dev_priv);
16253
16254         for_each_intel_encoder(&dev_priv->drm, encoder) {
16255                 encoder->base.possible_crtcs =
16256                         intel_encoder_possible_crtcs(encoder);
16257                 encoder->base.possible_clones =
16258                         intel_encoder_possible_clones(encoder);
16259         }
16260
16261         intel_init_pch_refclk(dev_priv);
16262
16263         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16264 }
16265
16266 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16267 {
16268         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16269
16270         drm_framebuffer_cleanup(fb);
16271         intel_frontbuffer_put(intel_fb->frontbuffer);
16272
16273         kfree(intel_fb);
16274 }
16275
16276 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16277                                                 struct drm_file *file,
16278                                                 unsigned int *handle)
16279 {
16280         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16281
16282         if (obj->userptr.mm) {
16283                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
16284                 return -EINVAL;
16285         }
16286
16287         return drm_gem_handle_create(file, &obj->base, handle);
16288 }
16289
16290 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16291                                         struct drm_file *file,
16292                                         unsigned flags, unsigned color,
16293                                         struct drm_clip_rect *clips,
16294                                         unsigned num_clips)
16295 {
16296         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16297
16298         i915_gem_object_flush_if_display(obj);
16299         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16300
16301         return 0;
16302 }
16303
16304 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16305         .destroy = intel_user_framebuffer_destroy,
16306         .create_handle = intel_user_framebuffer_create_handle,
16307         .dirty = intel_user_framebuffer_dirty,
16308 };
16309
16310 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16311                                   struct drm_i915_gem_object *obj,
16312                                   struct drm_mode_fb_cmd2 *mode_cmd)
16313 {
16314         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16315         struct drm_framebuffer *fb = &intel_fb->base;
16316         u32 max_stride;
16317         unsigned int tiling, stride;
16318         int ret = -EINVAL;
16319         int i;
16320
16321         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16322         if (!intel_fb->frontbuffer)
16323                 return -ENOMEM;
16324
16325         i915_gem_object_lock(obj);
16326         tiling = i915_gem_object_get_tiling(obj);
16327         stride = i915_gem_object_get_stride(obj);
16328         i915_gem_object_unlock(obj);
16329
16330         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16331                 /*
16332                  * If there's a fence, enforce that
16333                  * the fb modifier and tiling mode match.
16334                  */
16335                 if (tiling != I915_TILING_NONE &&
16336                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16337                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
16338                         goto err;
16339                 }
16340         } else {
16341                 if (tiling == I915_TILING_X) {
16342                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16343                 } else if (tiling == I915_TILING_Y) {
16344                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
16345                         goto err;
16346                 }
16347         }
16348
16349         if (!drm_any_plane_has_format(&dev_priv->drm,
16350                                       mode_cmd->pixel_format,
16351                                       mode_cmd->modifier[0])) {
16352                 struct drm_format_name_buf format_name;
16353
16354                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
16355                               drm_get_format_name(mode_cmd->pixel_format,
16356                                                   &format_name),
16357                               mode_cmd->modifier[0]);
16358                 goto err;
16359         }
16360
16361         /*
16362          * gen2/3 display engine uses the fence if present,
16363          * so the tiling mode must match the fb modifier exactly.
16364          */
16365         if (INTEL_GEN(dev_priv) < 4 &&
16366             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16367                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
16368                 goto err;
16369         }
16370
16371         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16372                                          mode_cmd->modifier[0]);
16373         if (mode_cmd->pitches[0] > max_stride) {
16374                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
16375                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16376                               "tiled" : "linear",
16377                               mode_cmd->pitches[0], max_stride);
16378                 goto err;
16379         }
16380
16381         /*
16382          * If there's a fence, enforce that
16383          * the fb pitch and fence stride match.
16384          */
16385         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16386                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
16387                               mode_cmd->pitches[0], stride);
16388                 goto err;
16389         }
16390
16391         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16392         if (mode_cmd->offsets[0] != 0)
16393                 goto err;
16394
16395         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16396
16397         for (i = 0; i < fb->format->num_planes; i++) {
16398                 u32 stride_alignment;
16399
16400                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16401                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
16402                         goto err;
16403                 }
16404
16405                 stride_alignment = intel_fb_stride_alignment(fb, i);
16406
16407                 /*
16408                  * Display WA #0531: skl,bxt,kbl,glk
16409                  *
16410                  * Render decompression and plane width > 3840
16411                  * combined with horizontal panning requires the
16412                  * plane stride to be a multiple of 4. We'll just
16413                  * require the entire fb to accommodate that to avoid
16414                  * potential runtime errors at plane configuration time.
16415                  */
16416                 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
16417                     is_ccs_modifier(fb->modifier))
16418                         stride_alignment *= 4;
16419
16420                 if (fb->pitches[i] & (stride_alignment - 1)) {
16421                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
16422                                       i, fb->pitches[i], stride_alignment);
16423                         goto err;
16424                 }
16425
16426                 fb->obj[i] = &obj->base;
16427         }
16428
16429         ret = intel_fill_fb_info(dev_priv, fb);
16430         if (ret)
16431                 goto err;
16432
16433         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16434         if (ret) {
16435                 DRM_ERROR("framebuffer init failed %d\n", ret);
16436                 goto err;
16437         }
16438
16439         return 0;
16440
16441 err:
16442         intel_frontbuffer_put(intel_fb->frontbuffer);
16443         return ret;
16444 }
16445
16446 static struct drm_framebuffer *
16447 intel_user_framebuffer_create(struct drm_device *dev,
16448                               struct drm_file *filp,
16449                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
16450 {
16451         struct drm_framebuffer *fb;
16452         struct drm_i915_gem_object *obj;
16453         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16454
16455         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16456         if (!obj)
16457                 return ERR_PTR(-ENOENT);
16458
16459         fb = intel_framebuffer_create(obj, &mode_cmd);
16460         i915_gem_object_put(obj);
16461
16462         return fb;
16463 }
16464
16465 static void intel_atomic_state_free(struct drm_atomic_state *state)
16466 {
16467         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
16468
16469         drm_atomic_state_default_release(state);
16470
16471         i915_sw_fence_fini(&intel_state->commit_ready);
16472
16473         kfree(state);
16474 }
16475
16476 static enum drm_mode_status
16477 intel_mode_valid(struct drm_device *dev,
16478                  const struct drm_display_mode *mode)
16479 {
16480         struct drm_i915_private *dev_priv = to_i915(dev);
16481         int hdisplay_max, htotal_max;
16482         int vdisplay_max, vtotal_max;
16483
16484         /*
16485          * Can't reject DBLSCAN here because Xorg ddxen can add piles
16486          * of DBLSCAN modes to the output's mode list when they detect
16487          * the scaling mode property on the connector. And they don't
16488          * ask the kernel to validate those modes in any way until
16489          * modeset time at which point the client gets a protocol error.
16490          * So in order to not upset those clients we silently ignore the
16491          * DBLSCAN flag on such connectors. For other connectors we will
16492          * reject modes with the DBLSCAN flag in encoder->compute_config().
16493          * And we always reject DBLSCAN modes in connector->mode_valid()
16494          * as we never want such modes on the connector's mode list.
16495          */
16496
16497         if (mode->vscan > 1)
16498                 return MODE_NO_VSCAN;
16499
16500         if (mode->flags & DRM_MODE_FLAG_HSKEW)
16501                 return MODE_H_ILLEGAL;
16502
16503         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16504                            DRM_MODE_FLAG_NCSYNC |
16505                            DRM_MODE_FLAG_PCSYNC))
16506                 return MODE_HSYNC;
16507
16508         if (mode->flags & (DRM_MODE_FLAG_BCAST |
16509                            DRM_MODE_FLAG_PIXMUX |
16510                            DRM_MODE_FLAG_CLKDIV2))
16511                 return MODE_BAD;
16512
16513         /* Transcoder timing limits */
16514         if (INTEL_GEN(dev_priv) >= 11) {
16515                 hdisplay_max = 16384;
16516                 vdisplay_max = 8192;
16517                 htotal_max = 16384;
16518                 vtotal_max = 8192;
16519         } else if (INTEL_GEN(dev_priv) >= 9 ||
16520                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16521                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16522                 vdisplay_max = 4096;
16523                 htotal_max = 8192;
16524                 vtotal_max = 8192;
16525         } else if (INTEL_GEN(dev_priv) >= 3) {
16526                 hdisplay_max = 4096;
16527                 vdisplay_max = 4096;
16528                 htotal_max = 8192;
16529                 vtotal_max = 8192;
16530         } else {
16531                 hdisplay_max = 2048;
16532                 vdisplay_max = 2048;
16533                 htotal_max = 4096;
16534                 vtotal_max = 4096;
16535         }
16536
16537         if (mode->hdisplay > hdisplay_max ||
16538             mode->hsync_start > htotal_max ||
16539             mode->hsync_end > htotal_max ||
16540             mode->htotal > htotal_max)
16541                 return MODE_H_ILLEGAL;
16542
16543         if (mode->vdisplay > vdisplay_max ||
16544             mode->vsync_start > vtotal_max ||
16545             mode->vsync_end > vtotal_max ||
16546             mode->vtotal > vtotal_max)
16547                 return MODE_V_ILLEGAL;
16548
16549         if (INTEL_GEN(dev_priv) >= 5) {
16550                 if (mode->hdisplay < 64 ||
16551                     mode->htotal - mode->hdisplay < 32)
16552                         return MODE_H_ILLEGAL;
16553
16554                 if (mode->vtotal - mode->vdisplay < 5)
16555                         return MODE_V_ILLEGAL;
16556         } else {
16557                 if (mode->htotal - mode->hdisplay < 32)
16558                         return MODE_H_ILLEGAL;
16559
16560                 if (mode->vtotal - mode->vdisplay < 3)
16561                         return MODE_V_ILLEGAL;
16562         }
16563
16564         return MODE_OK;
16565 }
16566
16567 enum drm_mode_status
16568 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16569                                 const struct drm_display_mode *mode)
16570 {
16571         int plane_width_max, plane_height_max;
16572
16573         /*
16574          * intel_mode_valid() should be
16575          * sufficient on older platforms.
16576          */
16577         if (INTEL_GEN(dev_priv) < 9)
16578                 return MODE_OK;
16579
16580         /*
16581          * Most people will probably want a fullscreen
16582          * plane so let's not advertize modes that are
16583          * too big for that.
16584          */
16585         if (INTEL_GEN(dev_priv) >= 11) {
16586                 plane_width_max = 5120;
16587                 plane_height_max = 4320;
16588         } else {
16589                 plane_width_max = 5120;
16590                 plane_height_max = 4096;
16591         }
16592
16593         if (mode->hdisplay > plane_width_max)
16594                 return MODE_H_ILLEGAL;
16595
16596         if (mode->vdisplay > plane_height_max)
16597                 return MODE_V_ILLEGAL;
16598
16599         return MODE_OK;
16600 }
16601
16602 static const struct drm_mode_config_funcs intel_mode_funcs = {
16603         .fb_create = intel_user_framebuffer_create,
16604         .get_format_info = intel_get_format_info,
16605         .output_poll_changed = intel_fbdev_output_poll_changed,
16606         .mode_valid = intel_mode_valid,
16607         .atomic_check = intel_atomic_check,
16608         .atomic_commit = intel_atomic_commit,
16609         .atomic_state_alloc = intel_atomic_state_alloc,
16610         .atomic_state_clear = intel_atomic_state_clear,
16611         .atomic_state_free = intel_atomic_state_free,
16612 };
16613
16614 /**
16615  * intel_init_display_hooks - initialize the display modesetting hooks
16616  * @dev_priv: device private
16617  */
16618 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16619 {
16620         intel_init_cdclk_hooks(dev_priv);
16621
16622         if (INTEL_GEN(dev_priv) >= 9) {
16623                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16624                 dev_priv->display.get_initial_plane_config =
16625                         skylake_get_initial_plane_config;
16626                 dev_priv->display.crtc_compute_clock =
16627                         haswell_crtc_compute_clock;
16628                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16629                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16630         } else if (HAS_DDI(dev_priv)) {
16631                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16632                 dev_priv->display.get_initial_plane_config =
16633                         i9xx_get_initial_plane_config;
16634                 dev_priv->display.crtc_compute_clock =
16635                         haswell_crtc_compute_clock;
16636                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16637                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16638         } else if (HAS_PCH_SPLIT(dev_priv)) {
16639                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
16640                 dev_priv->display.get_initial_plane_config =
16641                         i9xx_get_initial_plane_config;
16642                 dev_priv->display.crtc_compute_clock =
16643                         ironlake_crtc_compute_clock;
16644                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
16645                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
16646         } else if (IS_CHERRYVIEW(dev_priv)) {
16647                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16648                 dev_priv->display.get_initial_plane_config =
16649                         i9xx_get_initial_plane_config;
16650                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16651                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16652                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16653         } else if (IS_VALLEYVIEW(dev_priv)) {
16654                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16655                 dev_priv->display.get_initial_plane_config =
16656                         i9xx_get_initial_plane_config;
16657                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16658                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16659                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16660         } else if (IS_G4X(dev_priv)) {
16661                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16662                 dev_priv->display.get_initial_plane_config =
16663                         i9xx_get_initial_plane_config;
16664                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16665                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16666                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16667         } else if (IS_PINEVIEW(dev_priv)) {
16668                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16669                 dev_priv->display.get_initial_plane_config =
16670                         i9xx_get_initial_plane_config;
16671                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16672                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16673                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16674         } else if (!IS_GEN(dev_priv, 2)) {
16675                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16676                 dev_priv->display.get_initial_plane_config =
16677                         i9xx_get_initial_plane_config;
16678                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16679                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16680                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16681         } else {
16682                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16683                 dev_priv->display.get_initial_plane_config =
16684                         i9xx_get_initial_plane_config;
16685                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16686                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16687                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16688         }
16689
16690         if (IS_GEN(dev_priv, 5)) {
16691                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
16692         } else if (IS_GEN(dev_priv, 6)) {
16693                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16694         } else if (IS_IVYBRIDGE(dev_priv)) {
16695                 /* FIXME: detect B0+ stepping and use auto training */
16696                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16697         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
16698                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
16699         }
16700
16701         if (INTEL_GEN(dev_priv) >= 9)
16702                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16703         else
16704                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16705
16706 }
16707
16708 void intel_modeset_init_hw(struct drm_i915_private *i915)
16709 {
16710         intel_update_cdclk(i915);
16711         intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
16712         i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
16713 }
16714
16715 /*
16716  * Calculate what we think the watermarks should be for the state we've read
16717  * out of the hardware and then immediately program those watermarks so that
16718  * we ensure the hardware settings match our internal state.
16719  *
16720  * We can calculate what we think WM's should be by creating a duplicate of the
16721  * current state (which was constructed during hardware readout) and running it
16722  * through the atomic check code to calculate new watermark values in the
16723  * state object.
16724  */
16725 static void sanitize_watermarks(struct drm_device *dev)
16726 {
16727         struct drm_i915_private *dev_priv = to_i915(dev);
16728         struct drm_atomic_state *state;
16729         struct intel_atomic_state *intel_state;
16730         struct intel_crtc *crtc;
16731         struct intel_crtc_state *crtc_state;
16732         struct drm_modeset_acquire_ctx ctx;
16733         int ret;
16734         int i;
16735
16736         /* Only supported on platforms that use atomic watermark design */
16737         if (!dev_priv->display.optimize_watermarks)
16738                 return;
16739
16740         /*
16741          * We need to hold connection_mutex before calling duplicate_state so
16742          * that the connector loop is protected.
16743          */
16744         drm_modeset_acquire_init(&ctx, 0);
16745 retry:
16746         ret = drm_modeset_lock_all_ctx(dev, &ctx);
16747         if (ret == -EDEADLK) {
16748                 drm_modeset_backoff(&ctx);
16749                 goto retry;
16750         } else if (WARN_ON(ret)) {
16751                 goto fail;
16752         }
16753
16754         state = drm_atomic_helper_duplicate_state(dev, &ctx);
16755         if (WARN_ON(IS_ERR(state)))
16756                 goto fail;
16757
16758         intel_state = to_intel_atomic_state(state);
16759
16760         /*
16761          * Hardware readout is the only time we don't want to calculate
16762          * intermediate watermarks (since we don't trust the current
16763          * watermarks).
16764          */
16765         if (!HAS_GMCH(dev_priv))
16766                 intel_state->skip_intermediate_wm = true;
16767
16768         ret = intel_atomic_check(dev, state);
16769         if (ret) {
16770                 /*
16771                  * If we fail here, it means that the hardware appears to be
16772                  * programmed in a way that shouldn't be possible, given our
16773                  * understanding of watermark requirements.  This might mean a
16774                  * mistake in the hardware readout code or a mistake in the
16775                  * watermark calculations for a given platform.  Raise a WARN
16776                  * so that this is noticeable.
16777                  *
16778                  * If this actually happens, we'll have to just leave the
16779                  * BIOS-programmed watermarks untouched and hope for the best.
16780                  */
16781                 WARN(true, "Could not determine valid watermarks for inherited state\n");
16782                 goto put_state;
16783         }
16784
16785         /* Write calculated watermark values back */
16786         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16787                 crtc_state->wm.need_postvbl_update = true;
16788                 dev_priv->display.optimize_watermarks(intel_state, crtc);
16789
16790                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16791         }
16792
16793 put_state:
16794         drm_atomic_state_put(state);
16795 fail:
16796         drm_modeset_drop_locks(&ctx);
16797         drm_modeset_acquire_fini(&ctx);
16798 }
16799
16800 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16801 {
16802         if (IS_GEN(dev_priv, 5)) {
16803                 u32 fdi_pll_clk =
16804                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16805
16806                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16807         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16808                 dev_priv->fdi_pll_freq = 270000;
16809         } else {
16810                 return;
16811         }
16812
16813         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16814 }
16815
16816 static int intel_initial_commit(struct drm_device *dev)
16817 {
16818         struct drm_atomic_state *state = NULL;
16819         struct drm_modeset_acquire_ctx ctx;
16820         struct intel_crtc *crtc;
16821         int ret = 0;
16822
16823         state = drm_atomic_state_alloc(dev);
16824         if (!state)
16825                 return -ENOMEM;
16826
16827         drm_modeset_acquire_init(&ctx, 0);
16828
16829 retry:
16830         state->acquire_ctx = &ctx;
16831
16832         for_each_intel_crtc(dev, crtc) {
16833                 struct intel_crtc_state *crtc_state =
16834                         intel_atomic_get_crtc_state(state, crtc);
16835
16836                 if (IS_ERR(crtc_state)) {
16837                         ret = PTR_ERR(crtc_state);
16838                         goto out;
16839                 }
16840
16841                 if (crtc_state->hw.active) {
16842                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
16843                         if (ret)
16844                                 goto out;
16845
16846                         /*
16847                          * FIXME hack to force a LUT update to avoid the
16848                          * plane update forcing the pipe gamma on without
16849                          * having a proper LUT loaded. Remove once we
16850                          * have readout for pipe gamma enable.
16851                          */
16852                         crtc_state->uapi.color_mgmt_changed = true;
16853                 }
16854         }
16855
16856         ret = drm_atomic_commit(state);
16857
16858 out:
16859         if (ret == -EDEADLK) {
16860                 drm_atomic_state_clear(state);
16861                 drm_modeset_backoff(&ctx);
16862                 goto retry;
16863         }
16864
16865         drm_atomic_state_put(state);
16866
16867         drm_modeset_drop_locks(&ctx);
16868         drm_modeset_acquire_fini(&ctx);
16869
16870         return ret;
16871 }
16872
16873 static void intel_mode_config_init(struct drm_i915_private *i915)
16874 {
16875         struct drm_mode_config *mode_config = &i915->drm.mode_config;
16876
16877         drm_mode_config_init(&i915->drm);
16878
16879         mode_config->min_width = 0;
16880         mode_config->min_height = 0;
16881
16882         mode_config->preferred_depth = 24;
16883         mode_config->prefer_shadow = 1;
16884
16885         mode_config->allow_fb_modifiers = true;
16886
16887         mode_config->funcs = &intel_mode_funcs;
16888
16889         /*
16890          * Maximum framebuffer dimensions, chosen to match
16891          * the maximum render engine surface size on gen4+.
16892          */
16893         if (INTEL_GEN(i915) >= 7) {
16894                 mode_config->max_width = 16384;
16895                 mode_config->max_height = 16384;
16896         } else if (INTEL_GEN(i915) >= 4) {
16897                 mode_config->max_width = 8192;
16898                 mode_config->max_height = 8192;
16899         } else if (IS_GEN(i915, 3)) {
16900                 mode_config->max_width = 4096;
16901                 mode_config->max_height = 4096;
16902         } else {
16903                 mode_config->max_width = 2048;
16904                 mode_config->max_height = 2048;
16905         }
16906
16907         if (IS_I845G(i915) || IS_I865G(i915)) {
16908                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
16909                 mode_config->cursor_height = 1023;
16910         } else if (IS_GEN(i915, 2)) {
16911                 mode_config->cursor_width = 64;
16912                 mode_config->cursor_height = 64;
16913         } else {
16914                 mode_config->cursor_width = 256;
16915                 mode_config->cursor_height = 256;
16916         }
16917 }
16918
16919 int intel_modeset_init(struct drm_i915_private *i915)
16920 {
16921         struct drm_device *dev = &i915->drm;
16922         enum pipe pipe;
16923         struct intel_crtc *crtc;
16924         int ret;
16925
16926         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
16927         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
16928                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
16929
16930         intel_mode_config_init(i915);
16931
16932         ret = intel_bw_init(i915);
16933         if (ret)
16934                 return ret;
16935
16936         init_llist_head(&i915->atomic_helper.free_list);
16937         INIT_WORK(&i915->atomic_helper.free_work,
16938                   intel_atomic_helper_free_state_worker);
16939
16940         intel_init_quirks(i915);
16941
16942         intel_fbc_init(i915);
16943
16944         intel_init_pm(i915);
16945
16946         intel_panel_sanitize_ssc(i915);
16947
16948         intel_gmbus_setup(i915);
16949
16950         DRM_DEBUG_KMS("%d display pipe%s available.\n",
16951                       INTEL_NUM_PIPES(i915),
16952                       INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
16953
16954         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
16955                 for_each_pipe(i915, pipe) {
16956                         ret = intel_crtc_init(i915, pipe);
16957                         if (ret) {
16958                                 drm_mode_config_cleanup(dev);
16959                                 return ret;
16960                         }
16961                 }
16962         }
16963
16964         intel_shared_dpll_init(dev);
16965         intel_update_fdi_pll_freq(i915);
16966
16967         intel_update_czclk(i915);
16968         intel_modeset_init_hw(i915);
16969
16970         intel_hdcp_component_init(i915);
16971
16972         if (i915->max_cdclk_freq == 0)
16973                 intel_update_max_cdclk(i915);
16974
16975         /* Just disable it once at startup */
16976         intel_vga_disable(i915);
16977         intel_setup_outputs(i915);
16978
16979         drm_modeset_lock_all(dev);
16980         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
16981         drm_modeset_unlock_all(dev);
16982
16983         for_each_intel_crtc(dev, crtc) {
16984                 struct intel_initial_plane_config plane_config = {};
16985
16986                 if (!crtc->active)
16987                         continue;
16988
16989                 /*
16990                  * Note that reserving the BIOS fb up front prevents us
16991                  * from stuffing other stolen allocations like the ring
16992                  * on top.  This prevents some ugliness at boot time, and
16993                  * can even allow for smooth boot transitions if the BIOS
16994                  * fb is large enough for the active pipe configuration.
16995                  */
16996                 i915->display.get_initial_plane_config(crtc, &plane_config);
16997
16998                 /*
16999                  * If the fb is shared between multiple heads, we'll
17000                  * just get the first one.
17001                  */
17002                 intel_find_initial_plane_obj(crtc, &plane_config);
17003         }
17004
17005         /*
17006          * Make sure hardware watermarks really match the state we read out.
17007          * Note that we need to do this after reconstructing the BIOS fb's
17008          * since the watermark calculation done here will use pstate->fb.
17009          */
17010         if (!HAS_GMCH(i915))
17011                 sanitize_watermarks(dev);
17012
17013         /*
17014          * Force all active planes to recompute their states. So that on
17015          * mode_setcrtc after probe, all the intel_plane_state variables
17016          * are already calculated and there is no assert_plane warnings
17017          * during bootup.
17018          */
17019         ret = intel_initial_commit(dev);
17020         if (ret)
17021                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
17022
17023         return 0;
17024 }
17025
17026 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17027 {
17028         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17029         /* 640x480@60Hz, ~25175 kHz */
17030         struct dpll clock = {
17031                 .m1 = 18,
17032                 .m2 = 7,
17033                 .p1 = 13,
17034                 .p2 = 4,
17035                 .n = 2,
17036         };
17037         u32 dpll, fp;
17038         int i;
17039
17040         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
17041
17042         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17043                       pipe_name(pipe), clock.vco, clock.dot);
17044
17045         fp = i9xx_dpll_compute_fp(&clock);
17046         dpll = DPLL_DVO_2X_MODE |
17047                 DPLL_VGA_MODE_DIS |
17048                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17049                 PLL_P2_DIVIDE_BY_4 |
17050                 PLL_REF_INPUT_DREFCLK |
17051                 DPLL_VCO_ENABLE;
17052
17053         I915_WRITE(FP0(pipe), fp);
17054         I915_WRITE(FP1(pipe), fp);
17055
17056         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17057         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17058         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17059         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17060         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17061         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17062         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17063
17064         /*
17065          * Apparently we need to have VGA mode enabled prior to changing
17066          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17067          * dividers, even though the register value does change.
17068          */
17069         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17070         I915_WRITE(DPLL(pipe), dpll);
17071
17072         /* Wait for the clocks to stabilize. */
17073         POSTING_READ(DPLL(pipe));
17074         udelay(150);
17075
17076         /* The pixel multiplier can only be updated once the
17077          * DPLL is enabled and the clocks are stable.
17078          *
17079          * So write it again.
17080          */
17081         I915_WRITE(DPLL(pipe), dpll);
17082
17083         /* We do this three times for luck */
17084         for (i = 0; i < 3 ; i++) {
17085                 I915_WRITE(DPLL(pipe), dpll);
17086                 POSTING_READ(DPLL(pipe));
17087                 udelay(150); /* wait for warmup */
17088         }
17089
17090         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17091         POSTING_READ(PIPECONF(pipe));
17092
17093         intel_wait_for_pipe_scanline_moving(crtc);
17094 }
17095
17096 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17097 {
17098         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17099
17100         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
17101                       pipe_name(pipe));
17102
17103         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
17104         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
17105         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
17106         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
17107         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
17108
17109         I915_WRITE(PIPECONF(pipe), 0);
17110         POSTING_READ(PIPECONF(pipe));
17111
17112         intel_wait_for_pipe_scanline_stopped(crtc);
17113
17114         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
17115         POSTING_READ(DPLL(pipe));
17116 }
17117
17118 static void
17119 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17120 {
17121         struct intel_crtc *crtc;
17122
17123         if (INTEL_GEN(dev_priv) >= 4)
17124                 return;
17125
17126         for_each_intel_crtc(&dev_priv->drm, crtc) {
17127                 struct intel_plane *plane =
17128                         to_intel_plane(crtc->base.primary);
17129                 struct intel_crtc *plane_crtc;
17130                 enum pipe pipe;
17131
17132                 if (!plane->get_hw_state(plane, &pipe))
17133                         continue;
17134
17135                 if (pipe == crtc->pipe)
17136                         continue;
17137
17138                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17139                               plane->base.base.id, plane->base.name);
17140
17141                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17142                 intel_plane_disable_noatomic(plane_crtc, plane);
17143         }
17144 }
17145
17146 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17147 {
17148         struct drm_device *dev = crtc->base.dev;
17149         struct intel_encoder *encoder;
17150
17151         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17152                 return true;
17153
17154         return false;
17155 }
17156
17157 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17158 {
17159         struct drm_device *dev = encoder->base.dev;
17160         struct intel_connector *connector;
17161
17162         for_each_connector_on_encoder(dev, &encoder->base, connector)
17163                 return connector;
17164
17165         return NULL;
17166 }
17167
17168 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17169                               enum pipe pch_transcoder)
17170 {
17171         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17172                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17173 }
17174
17175 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
17176 {
17177         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
17178         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
17179         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17180
17181         if (INTEL_GEN(dev_priv) >= 9 ||
17182             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17183                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
17184                 u32 val;
17185
17186                 if (transcoder_is_dsi(cpu_transcoder))
17187                         return;
17188
17189                 val = I915_READ(reg);
17190                 val &= ~HSW_FRAME_START_DELAY_MASK;
17191                 val |= HSW_FRAME_START_DELAY(0);
17192                 I915_WRITE(reg, val);
17193         } else {
17194                 i915_reg_t reg = PIPECONF(cpu_transcoder);
17195                 u32 val;
17196
17197                 val = I915_READ(reg);
17198                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
17199                 val |= PIPECONF_FRAME_START_DELAY(0);
17200                 I915_WRITE(reg, val);
17201         }
17202
17203         if (!crtc_state->has_pch_encoder)
17204                 return;
17205
17206         if (HAS_PCH_IBX(dev_priv)) {
17207                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
17208                 u32 val;
17209
17210                 val = I915_READ(reg);
17211                 val &= ~TRANS_FRAME_START_DELAY_MASK;
17212                 val |= TRANS_FRAME_START_DELAY(0);
17213                 I915_WRITE(reg, val);
17214         } else {
17215                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
17216                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
17217                 u32 val;
17218
17219                 val = I915_READ(reg);
17220                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
17221                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
17222                 I915_WRITE(reg, val);
17223         }
17224 }
17225
17226 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17227                                 struct drm_modeset_acquire_ctx *ctx)
17228 {
17229         struct drm_device *dev = crtc->base.dev;
17230         struct drm_i915_private *dev_priv = to_i915(dev);
17231         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17232
17233         if (crtc_state->hw.active) {
17234                 struct intel_plane *plane;
17235
17236                 /* Clear any frame start delays used for debugging left by the BIOS */
17237                 intel_sanitize_frame_start_delay(crtc_state);
17238
17239                 /* Disable everything but the primary plane */
17240                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
17241                         const struct intel_plane_state *plane_state =
17242                                 to_intel_plane_state(plane->base.state);
17243
17244                         if (plane_state->uapi.visible &&
17245                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17246                                 intel_plane_disable_noatomic(crtc, plane);
17247                 }
17248
17249                 /*
17250                  * Disable any background color set by the BIOS, but enable the
17251                  * gamma and CSC to match how we program our planes.
17252                  */
17253                 if (INTEL_GEN(dev_priv) >= 9)
17254                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
17255                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
17256                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
17257         }
17258
17259         /* Adjust the state of the output pipe according to whether we
17260          * have active connectors/encoders. */
17261         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
17262                 intel_crtc_disable_noatomic(crtc, ctx);
17263
17264         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
17265                 /*
17266                  * We start out with underrun reporting disabled to avoid races.
17267                  * For correct bookkeeping mark this on active crtcs.
17268                  *
17269                  * Also on gmch platforms we dont have any hardware bits to
17270                  * disable the underrun reporting. Which means we need to start
17271                  * out with underrun reporting disabled also on inactive pipes,
17272                  * since otherwise we'll complain about the garbage we read when
17273                  * e.g. coming up after runtime pm.
17274                  *
17275                  * No protection against concurrent access is required - at
17276                  * worst a fifo underrun happens which also sets this to false.
17277                  */
17278                 crtc->cpu_fifo_underrun_disabled = true;
17279                 /*
17280                  * We track the PCH trancoder underrun reporting state
17281                  * within the crtc. With crtc for pipe A housing the underrun
17282                  * reporting state for PCH transcoder A, crtc for pipe B housing
17283                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17284                  * and marking underrun reporting as disabled for the non-existing
17285                  * PCH transcoders B and C would prevent enabling the south
17286                  * error interrupt (see cpt_can_enable_serr_int()).
17287                  */
17288                 if (has_pch_trancoder(dev_priv, crtc->pipe))
17289                         crtc->pch_fifo_underrun_disabled = true;
17290         }
17291 }
17292
17293 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17294 {
17295         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
17296
17297         /*
17298          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17299          * the hardware when a high res displays plugged in. DPLL P
17300          * divider is zero, and the pipe timings are bonkers. We'll
17301          * try to disable everything in that case.
17302          *
17303          * FIXME would be nice to be able to sanitize this state
17304          * without several WARNs, but for now let's take the easy
17305          * road.
17306          */
17307         return IS_GEN(dev_priv, 6) &&
17308                 crtc_state->hw.active &&
17309                 crtc_state->shared_dpll &&
17310                 crtc_state->port_clock == 0;
17311 }
17312
17313 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17314 {
17315         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17316         struct intel_connector *connector;
17317         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17318         struct intel_crtc_state *crtc_state = crtc ?
17319                 to_intel_crtc_state(crtc->base.state) : NULL;
17320
17321         /* We need to check both for a crtc link (meaning that the
17322          * encoder is active and trying to read from a pipe) and the
17323          * pipe itself being active. */
17324         bool has_active_crtc = crtc_state &&
17325                 crtc_state->hw.active;
17326
17327         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17328                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17329                               pipe_name(crtc->pipe));
17330                 has_active_crtc = false;
17331         }
17332
17333         connector = intel_encoder_find_connector(encoder);
17334         if (connector && !has_active_crtc) {
17335                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17336                               encoder->base.base.id,
17337                               encoder->base.name);
17338
17339                 /* Connector is active, but has no active pipe. This is
17340                  * fallout from our resume register restoring. Disable
17341                  * the encoder manually again. */
17342                 if (crtc_state) {
17343                         struct drm_encoder *best_encoder;
17344
17345                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
17346                                       encoder->base.base.id,
17347                                       encoder->base.name);
17348
17349                         /* avoid oopsing in case the hooks consult best_encoder */
17350                         best_encoder = connector->base.state->best_encoder;
17351                         connector->base.state->best_encoder = &encoder->base;
17352
17353                         if (encoder->disable)
17354                                 encoder->disable(encoder, crtc_state,
17355                                                  connector->base.state);
17356                         if (encoder->post_disable)
17357                                 encoder->post_disable(encoder, crtc_state,
17358                                                       connector->base.state);
17359
17360                         connector->base.state->best_encoder = best_encoder;
17361                 }
17362                 encoder->base.crtc = NULL;
17363
17364                 /* Inconsistent output/port/pipe state happens presumably due to
17365                  * a bug in one of the get_hw_state functions. Or someplace else
17366                  * in our code, like the register restore mess on resume. Clamp
17367                  * things to off as a safer default. */
17368
17369                 connector->base.dpms = DRM_MODE_DPMS_OFF;
17370                 connector->base.encoder = NULL;
17371         }
17372
17373         /* notify opregion of the sanitized encoder state */
17374         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17375
17376         if (INTEL_GEN(dev_priv) >= 11)
17377                 icl_sanitize_encoder_pll_mapping(encoder);
17378 }
17379
17380 /* FIXME read out full plane state for all planes */
17381 static void readout_plane_state(struct drm_i915_private *dev_priv)
17382 {
17383         struct intel_plane *plane;
17384         struct intel_crtc *crtc;
17385
17386         for_each_intel_plane(&dev_priv->drm, plane) {
17387                 struct intel_plane_state *plane_state =
17388                         to_intel_plane_state(plane->base.state);
17389                 struct intel_crtc_state *crtc_state;
17390                 enum pipe pipe = PIPE_A;
17391                 bool visible;
17392
17393                 visible = plane->get_hw_state(plane, &pipe);
17394
17395                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17396                 crtc_state = to_intel_crtc_state(crtc->base.state);
17397
17398                 intel_set_plane_visible(crtc_state, plane_state, visible);
17399
17400                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17401                               plane->base.base.id, plane->base.name,
17402                               enableddisabled(visible), pipe_name(pipe));
17403         }
17404
17405         for_each_intel_crtc(&dev_priv->drm, crtc) {
17406                 struct intel_crtc_state *crtc_state =
17407                         to_intel_crtc_state(crtc->base.state);
17408
17409                 fixup_active_planes(crtc_state);
17410         }
17411 }
17412
17413 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17414 {
17415         struct drm_i915_private *dev_priv = to_i915(dev);
17416         enum pipe pipe;
17417         struct intel_crtc *crtc;
17418         struct intel_encoder *encoder;
17419         struct intel_connector *connector;
17420         struct drm_connector_list_iter conn_iter;
17421         int i;
17422
17423         dev_priv->active_pipes = 0;
17424
17425         for_each_intel_crtc(dev, crtc) {
17426                 struct intel_crtc_state *crtc_state =
17427                         to_intel_crtc_state(crtc->base.state);
17428
17429                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
17430                 intel_crtc_free_hw_state(crtc_state);
17431                 memset(crtc_state, 0, sizeof(*crtc_state));
17432                 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->uapi);
17433
17434                 crtc_state->hw.active = crtc_state->hw.enable =
17435                         dev_priv->display.get_pipe_config(crtc, crtc_state);
17436
17437                 crtc->base.enabled = crtc_state->hw.enable;
17438                 crtc->active = crtc_state->hw.active;
17439
17440                 if (crtc_state->hw.active)
17441                         dev_priv->active_pipes |= BIT(crtc->pipe);
17442
17443                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
17444                               crtc->base.base.id, crtc->base.name,
17445                               enableddisabled(crtc_state->hw.active));
17446         }
17447
17448         readout_plane_state(dev_priv);
17449
17450         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17451                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17452
17453                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
17454                                                         &pll->state.hw_state);
17455
17456                 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
17457                     pll->info->id == DPLL_ID_EHL_DPLL4) {
17458                         pll->wakeref = intel_display_power_get(dev_priv,
17459                                                                POWER_DOMAIN_DPLL_DC_OFF);
17460                 }
17461
17462                 pll->state.crtc_mask = 0;
17463                 for_each_intel_crtc(dev, crtc) {
17464                         struct intel_crtc_state *crtc_state =
17465                                 to_intel_crtc_state(crtc->base.state);
17466
17467                         if (crtc_state->hw.active &&
17468                             crtc_state->shared_dpll == pll)
17469                                 pll->state.crtc_mask |= 1 << crtc->pipe;
17470                 }
17471                 pll->active_mask = pll->state.crtc_mask;
17472
17473                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
17474                               pll->info->name, pll->state.crtc_mask, pll->on);
17475         }
17476
17477         for_each_intel_encoder(dev, encoder) {
17478                 pipe = 0;
17479
17480                 if (encoder->get_hw_state(encoder, &pipe)) {
17481                         struct intel_crtc_state *crtc_state;
17482
17483                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17484                         crtc_state = to_intel_crtc_state(crtc->base.state);
17485
17486                         encoder->base.crtc = &crtc->base;
17487                         encoder->get_config(encoder, crtc_state);
17488                 } else {
17489                         encoder->base.crtc = NULL;
17490                 }
17491
17492                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17493                               encoder->base.base.id, encoder->base.name,
17494                               enableddisabled(encoder->base.crtc),
17495                               pipe_name(pipe));
17496         }
17497
17498         drm_connector_list_iter_begin(dev, &conn_iter);
17499         for_each_intel_connector_iter(connector, &conn_iter) {
17500                 if (connector->get_hw_state(connector)) {
17501                         struct intel_crtc_state *crtc_state;
17502                         struct intel_crtc *crtc;
17503
17504                         connector->base.dpms = DRM_MODE_DPMS_ON;
17505
17506                         encoder = connector->encoder;
17507                         connector->base.encoder = &encoder->base;
17508
17509                         crtc = to_intel_crtc(encoder->base.crtc);
17510                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17511
17512                         if (crtc_state && crtc_state->hw.active) {
17513                                 /*
17514                                  * This has to be done during hardware readout
17515                                  * because anything calling .crtc_disable may
17516                                  * rely on the connector_mask being accurate.
17517                                  */
17518                                 crtc_state->uapi.connector_mask |=
17519                                         drm_connector_mask(&connector->base);
17520                                 crtc_state->uapi.encoder_mask |=
17521                                         drm_encoder_mask(&encoder->base);
17522                         }
17523                 } else {
17524                         connector->base.dpms = DRM_MODE_DPMS_OFF;
17525                         connector->base.encoder = NULL;
17526                 }
17527                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
17528                               connector->base.base.id, connector->base.name,
17529                               enableddisabled(connector->base.encoder));
17530         }
17531         drm_connector_list_iter_end(&conn_iter);
17532
17533         for_each_intel_crtc(dev, crtc) {
17534                 struct intel_bw_state *bw_state =
17535                         to_intel_bw_state(dev_priv->bw_obj.state);
17536                 struct intel_crtc_state *crtc_state =
17537                         to_intel_crtc_state(crtc->base.state);
17538                 struct intel_plane *plane;
17539                 int min_cdclk = 0;
17540
17541                 if (crtc_state->hw.active) {
17542                         struct drm_display_mode *mode = &crtc_state->hw.mode;
17543
17544                         intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
17545                                                     crtc_state);
17546
17547                         *mode = crtc_state->hw.adjusted_mode;
17548                         mode->hdisplay = crtc_state->pipe_src_w;
17549                         mode->vdisplay = crtc_state->pipe_src_h;
17550
17551                         /*
17552                          * The initial mode needs to be set in order to keep
17553                          * the atomic core happy. It wants a valid mode if the
17554                          * crtc's enabled, so we do the above call.
17555                          *
17556                          * But we don't set all the derived state fully, hence
17557                          * set a flag to indicate that a full recalculation is
17558                          * needed on the next commit.
17559                          */
17560                         mode->private_flags = I915_MODE_FLAG_INHERITED;
17561
17562                         intel_crtc_compute_pixel_rate(crtc_state);
17563
17564                         intel_crtc_update_active_timings(crtc_state);
17565
17566                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
17567                 }
17568
17569                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17570                         const struct intel_plane_state *plane_state =
17571                                 to_intel_plane_state(plane->base.state);
17572
17573                         /*
17574                          * FIXME don't have the fb yet, so can't
17575                          * use intel_plane_data_rate() :(
17576                          */
17577                         if (plane_state->uapi.visible)
17578                                 crtc_state->data_rate[plane->id] =
17579                                         4 * crtc_state->pixel_rate;
17580                         /*
17581                          * FIXME don't have the fb yet, so can't
17582                          * use plane->min_cdclk() :(
17583                          */
17584                         if (plane_state->uapi.visible && plane->min_cdclk) {
17585                                 if (crtc_state->double_wide ||
17586                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17587                                         crtc_state->min_cdclk[plane->id] =
17588                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17589                                 else
17590                                         crtc_state->min_cdclk[plane->id] =
17591                                                 crtc_state->pixel_rate;
17592                         }
17593                         DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
17594                                       plane->base.base.id, plane->base.name,
17595                                       crtc_state->min_cdclk[plane->id]);
17596                 }
17597
17598                 if (crtc_state->hw.active) {
17599                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17600                         if (WARN_ON(min_cdclk < 0))
17601                                 min_cdclk = 0;
17602                 }
17603
17604                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
17605                 dev_priv->min_voltage_level[crtc->pipe] =
17606                         crtc_state->min_voltage_level;
17607
17608                 intel_bw_crtc_update(bw_state, crtc_state);
17609
17610                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
17611         }
17612 }
17613
17614 static void
17615 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17616 {
17617         struct intel_encoder *encoder;
17618
17619         for_each_intel_encoder(&dev_priv->drm, encoder) {
17620                 struct intel_crtc_state *crtc_state;
17621
17622                 if (!encoder->get_power_domains)
17623                         continue;
17624
17625                 /*
17626                  * MST-primary and inactive encoders don't have a crtc state
17627                  * and neither of these require any power domain references.
17628                  */
17629                 if (!encoder->base.crtc)
17630                         continue;
17631
17632                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17633                 encoder->get_power_domains(encoder, crtc_state);
17634         }
17635 }
17636
17637 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17638 {
17639         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
17640         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
17641                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
17642                            DARBF_GATING_DIS);
17643
17644         if (IS_HASWELL(dev_priv)) {
17645                 /*
17646                  * WaRsPkgCStateDisplayPMReq:hsw
17647                  * System hang if this isn't done before disabling all planes!
17648                  */
17649                 I915_WRITE(CHICKEN_PAR1_1,
17650                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17651         }
17652 }
17653
17654 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17655                                        enum port port, i915_reg_t hdmi_reg)
17656 {
17657         u32 val = I915_READ(hdmi_reg);
17658
17659         if (val & SDVO_ENABLE ||
17660             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17661                 return;
17662
17663         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
17664                       port_name(port));
17665
17666         val &= ~SDVO_PIPE_SEL_MASK;
17667         val |= SDVO_PIPE_SEL(PIPE_A);
17668
17669         I915_WRITE(hdmi_reg, val);
17670 }
17671
17672 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
17673                                      enum port port, i915_reg_t dp_reg)
17674 {
17675         u32 val = I915_READ(dp_reg);
17676
17677         if (val & DP_PORT_EN ||
17678             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
17679                 return;
17680
17681         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
17682                       port_name(port));
17683
17684         val &= ~DP_PIPE_SEL_MASK;
17685         val |= DP_PIPE_SEL(PIPE_A);
17686
17687         I915_WRITE(dp_reg, val);
17688 }
17689
17690 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
17691 {
17692         /*
17693          * The BIOS may select transcoder B on some of the PCH
17694          * ports even it doesn't enable the port. This would trip
17695          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
17696          * Sanitize the transcoder select bits to prevent that. We
17697          * assume that the BIOS never actually enabled the port,
17698          * because if it did we'd actually have to toggle the port
17699          * on and back off to make the transcoder A select stick
17700          * (see. intel_dp_link_down(), intel_disable_hdmi(),
17701          * intel_disable_sdvo()).
17702          */
17703         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
17704         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
17705         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
17706
17707         /* PCH SDVOB multiplex with HDMIB */
17708         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
17709         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
17710         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
17711 }
17712
17713 /* Scan out the current hw modeset state,
17714  * and sanitizes it to the current state
17715  */
17716 static void
17717 intel_modeset_setup_hw_state(struct drm_device *dev,
17718                              struct drm_modeset_acquire_ctx *ctx)
17719 {
17720         struct drm_i915_private *dev_priv = to_i915(dev);
17721         struct intel_encoder *encoder;
17722         struct intel_crtc *crtc;
17723         intel_wakeref_t wakeref;
17724         int i;
17725
17726         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
17727
17728         intel_early_display_was(dev_priv);
17729         intel_modeset_readout_hw_state(dev);
17730
17731         /* HW state is read out, now we need to sanitize this mess. */
17732
17733         /* Sanitize the TypeC port mode upfront, encoders depend on this */
17734         for_each_intel_encoder(dev, encoder) {
17735                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
17736
17737                 /* We need to sanitize only the MST primary port. */
17738                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
17739                     intel_phy_is_tc(dev_priv, phy))
17740                         intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
17741         }
17742
17743         get_encoder_power_domains(dev_priv);
17744
17745         if (HAS_PCH_IBX(dev_priv))
17746                 ibx_sanitize_pch_ports(dev_priv);
17747
17748         /*
17749          * intel_sanitize_plane_mapping() may need to do vblank
17750          * waits, so we need vblank interrupts restored beforehand.
17751          */
17752         for_each_intel_crtc(&dev_priv->drm, crtc) {
17753                 struct intel_crtc_state *crtc_state =
17754                         to_intel_crtc_state(crtc->base.state);
17755
17756                 drm_crtc_vblank_reset(&crtc->base);
17757
17758                 if (crtc_state->hw.active)
17759                         intel_crtc_vblank_on(crtc_state);
17760         }
17761
17762         intel_sanitize_plane_mapping(dev_priv);
17763
17764         for_each_intel_encoder(dev, encoder)
17765                 intel_sanitize_encoder(encoder);
17766
17767         for_each_intel_crtc(&dev_priv->drm, crtc) {
17768                 struct intel_crtc_state *crtc_state =
17769                         crtc_state = to_intel_crtc_state(crtc->base.state);
17770
17771                 intel_sanitize_crtc(crtc, ctx);
17772                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
17773         }
17774
17775         intel_modeset_update_connector_atomic_state(dev);
17776
17777         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17778                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17779
17780                 if (!pll->on || pll->active_mask)
17781                         continue;
17782
17783                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17784                               pll->info->name);
17785
17786                 pll->info->funcs->disable(dev_priv, pll);
17787                 pll->on = false;
17788         }
17789
17790         if (IS_G4X(dev_priv)) {
17791                 g4x_wm_get_hw_state(dev_priv);
17792                 g4x_wm_sanitize(dev_priv);
17793         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17794                 vlv_wm_get_hw_state(dev_priv);
17795                 vlv_wm_sanitize(dev_priv);
17796         } else if (INTEL_GEN(dev_priv) >= 9) {
17797                 skl_wm_get_hw_state(dev_priv);
17798         } else if (HAS_PCH_SPLIT(dev_priv)) {
17799                 ilk_wm_get_hw_state(dev_priv);
17800         }
17801
17802         for_each_intel_crtc(dev, crtc) {
17803                 struct intel_crtc_state *crtc_state =
17804                         to_intel_crtc_state(crtc->base.state);
17805                 u64 put_domains;
17806
17807                 put_domains = modeset_get_crtc_power_domains(crtc_state);
17808                 if (WARN_ON(put_domains))
17809                         modeset_put_power_domains(dev_priv, put_domains);
17810         }
17811
17812         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17813
17814         intel_fbc_init_pipe_state(dev_priv);
17815 }
17816
17817 void intel_display_resume(struct drm_device *dev)
17818 {
17819         struct drm_i915_private *dev_priv = to_i915(dev);
17820         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17821         struct drm_modeset_acquire_ctx ctx;
17822         int ret;
17823
17824         dev_priv->modeset_restore_state = NULL;
17825         if (state)
17826                 state->acquire_ctx = &ctx;
17827
17828         drm_modeset_acquire_init(&ctx, 0);
17829
17830         while (1) {
17831                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17832                 if (ret != -EDEADLK)
17833                         break;
17834
17835                 drm_modeset_backoff(&ctx);
17836         }
17837
17838         if (!ret)
17839                 ret = __intel_display_resume(dev, state, &ctx);
17840
17841         intel_enable_ipc(dev_priv);
17842         drm_modeset_drop_locks(&ctx);
17843         drm_modeset_acquire_fini(&ctx);
17844
17845         if (ret)
17846                 DRM_ERROR("Restoring old state failed with %i\n", ret);
17847         if (state)
17848                 drm_atomic_state_put(state);
17849 }
17850
17851 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
17852 {
17853         struct intel_connector *connector;
17854         struct drm_connector_list_iter conn_iter;
17855
17856         /* Kill all the work that may have been queued by hpd. */
17857         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
17858         for_each_intel_connector_iter(connector, &conn_iter) {
17859                 if (connector->modeset_retry_work.func)
17860                         cancel_work_sync(&connector->modeset_retry_work);
17861                 if (connector->hdcp.shim) {
17862                         cancel_delayed_work_sync(&connector->hdcp.check_work);
17863                         cancel_work_sync(&connector->hdcp.prop_work);
17864                 }
17865         }
17866         drm_connector_list_iter_end(&conn_iter);
17867 }
17868
17869 void intel_modeset_driver_remove(struct drm_i915_private *i915)
17870 {
17871         flush_workqueue(i915->flip_wq);
17872         flush_workqueue(i915->modeset_wq);
17873
17874         flush_work(&i915->atomic_helper.free_work);
17875         WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
17876
17877         /*
17878          * Interrupts and polling as the first thing to avoid creating havoc.
17879          * Too much stuff here (turning of connectors, ...) would
17880          * experience fancy races otherwise.
17881          */
17882         intel_irq_uninstall(i915);
17883
17884         /*
17885          * Due to the hpd irq storm handling the hotplug work can re-arm the
17886          * poll handlers. Hence disable polling after hpd handling is shut down.
17887          */
17888         intel_hpd_poll_fini(i915);
17889
17890         /*
17891          * MST topology needs to be suspended so we don't have any calls to
17892          * fbdev after it's finalized. MST will be destroyed later as part of
17893          * drm_mode_config_cleanup()
17894          */
17895         intel_dp_mst_suspend(i915);
17896
17897         /* poll work can call into fbdev, hence clean that up afterwards */
17898         intel_fbdev_fini(i915);
17899
17900         intel_unregister_dsm_handler();
17901
17902         intel_fbc_global_disable(i915);
17903
17904         /* flush any delayed tasks or pending work */
17905         flush_scheduled_work();
17906
17907         intel_hdcp_component_fini(i915);
17908
17909         drm_mode_config_cleanup(&i915->drm);
17910
17911         intel_overlay_cleanup(i915);
17912
17913         intel_gmbus_teardown(i915);
17914
17915         destroy_workqueue(i915->flip_wq);
17916         destroy_workqueue(i915->modeset_wq);
17917
17918         intel_fbc_cleanup_cfb(i915);
17919 }
17920
17921 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
17922
17923 struct intel_display_error_state {
17924
17925         u32 power_well_driver;
17926
17927         struct intel_cursor_error_state {
17928                 u32 control;
17929                 u32 position;
17930                 u32 base;
17931                 u32 size;
17932         } cursor[I915_MAX_PIPES];
17933
17934         struct intel_pipe_error_state {
17935                 bool power_domain_on;
17936                 u32 source;
17937                 u32 stat;
17938         } pipe[I915_MAX_PIPES];
17939
17940         struct intel_plane_error_state {
17941                 u32 control;
17942                 u32 stride;
17943                 u32 size;
17944                 u32 pos;
17945                 u32 addr;
17946                 u32 surface;
17947                 u32 tile_offset;
17948         } plane[I915_MAX_PIPES];
17949
17950         struct intel_transcoder_error_state {
17951                 bool available;
17952                 bool power_domain_on;
17953                 enum transcoder cpu_transcoder;
17954
17955                 u32 conf;
17956
17957                 u32 htotal;
17958                 u32 hblank;
17959                 u32 hsync;
17960                 u32 vtotal;
17961                 u32 vblank;
17962                 u32 vsync;
17963         } transcoder[5];
17964 };
17965
17966 struct intel_display_error_state *
17967 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
17968 {
17969         struct intel_display_error_state *error;
17970         int transcoders[] = {
17971                 TRANSCODER_A,
17972                 TRANSCODER_B,
17973                 TRANSCODER_C,
17974                 TRANSCODER_D,
17975                 TRANSCODER_EDP,
17976         };
17977         int i;
17978
17979         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
17980
17981         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
17982                 return NULL;
17983
17984         error = kzalloc(sizeof(*error), GFP_ATOMIC);
17985         if (error == NULL)
17986                 return NULL;
17987
17988         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17989                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
17990
17991         for_each_pipe(dev_priv, i) {
17992                 error->pipe[i].power_domain_on =
17993                         __intel_display_power_is_enabled(dev_priv,
17994                                                          POWER_DOMAIN_PIPE(i));
17995                 if (!error->pipe[i].power_domain_on)
17996                         continue;
17997
17998                 error->cursor[i].control = I915_READ(CURCNTR(i));
17999                 error->cursor[i].position = I915_READ(CURPOS(i));
18000                 error->cursor[i].base = I915_READ(CURBASE(i));
18001
18002                 error->plane[i].control = I915_READ(DSPCNTR(i));
18003                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
18004                 if (INTEL_GEN(dev_priv) <= 3) {
18005                         error->plane[i].size = I915_READ(DSPSIZE(i));
18006                         error->plane[i].pos = I915_READ(DSPPOS(i));
18007                 }
18008                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18009                         error->plane[i].addr = I915_READ(DSPADDR(i));
18010                 if (INTEL_GEN(dev_priv) >= 4) {
18011                         error->plane[i].surface = I915_READ(DSPSURF(i));
18012                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
18013                 }
18014
18015                 error->pipe[i].source = I915_READ(PIPESRC(i));
18016
18017                 if (HAS_GMCH(dev_priv))
18018                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
18019         }
18020
18021         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18022                 enum transcoder cpu_transcoder = transcoders[i];
18023
18024                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
18025                         continue;
18026
18027                 error->transcoder[i].available = true;
18028                 error->transcoder[i].power_domain_on =
18029                         __intel_display_power_is_enabled(dev_priv,
18030                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18031                 if (!error->transcoder[i].power_domain_on)
18032                         continue;
18033
18034                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18035
18036                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
18037                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
18038                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
18039                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
18040                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
18041                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
18042                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
18043         }
18044
18045         return error;
18046 }
18047
18048 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18049
18050 void
18051 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18052                                 struct intel_display_error_state *error)
18053 {
18054         struct drm_i915_private *dev_priv = m->i915;
18055         int i;
18056
18057         if (!error)
18058                 return;
18059
18060         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18061         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18062                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18063                            error->power_well_driver);
18064         for_each_pipe(dev_priv, i) {
18065                 err_printf(m, "Pipe [%d]:\n", i);
18066                 err_printf(m, "  Power: %s\n",
18067                            onoff(error->pipe[i].power_domain_on));
18068                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
18069                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
18070
18071                 err_printf(m, "Plane [%d]:\n", i);
18072                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
18073                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
18074                 if (INTEL_GEN(dev_priv) <= 3) {
18075                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
18076                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
18077                 }
18078                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18079                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
18080                 if (INTEL_GEN(dev_priv) >= 4) {
18081                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
18082                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
18083                 }
18084
18085                 err_printf(m, "Cursor [%d]:\n", i);
18086                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
18087                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
18088                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
18089         }
18090
18091         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18092                 if (!error->transcoder[i].available)
18093                         continue;
18094
18095                 err_printf(m, "CPU transcoder: %s\n",
18096                            transcoder_name(error->transcoder[i].cpu_transcoder));
18097                 err_printf(m, "  Power: %s\n",
18098                            onoff(error->transcoder[i].power_domain_on));
18099                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
18100                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
18101                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
18102                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
18103                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
18104                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
18105                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
18106         }
18107 }
18108
18109 #endif