]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/display/intel_display.c
56fac063f02539c7d22dc956b9b13fa7c4fc30c1
[linux.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57
58 #include "gt/intel_rps.h"
59
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_dp_link_training.h"
70 #include "intel_fbc.h"
71 #include "intel_fbdev.h"
72 #include "intel_fifo_underrun.h"
73 #include "intel_frontbuffer.h"
74 #include "intel_hdcp.h"
75 #include "intel_hotplug.h"
76 #include "intel_overlay.h"
77 #include "intel_pipe_crc.h"
78 #include "intel_pm.h"
79 #include "intel_psr.h"
80 #include "intel_quirks.h"
81 #include "intel_sideband.h"
82 #include "intel_sprite.h"
83 #include "intel_tc.h"
84 #include "intel_vga.h"
85
86 /* Primary plane formats for gen <= 3 */
87 static const u32 i8xx_primary_formats[] = {
88         DRM_FORMAT_C8,
89         DRM_FORMAT_XRGB1555,
90         DRM_FORMAT_RGB565,
91         DRM_FORMAT_XRGB8888,
92 };
93
94 /* Primary plane formats for ivb (no fp16 due to hw issue) */
95 static const u32 ivb_primary_formats[] = {
96         DRM_FORMAT_C8,
97         DRM_FORMAT_RGB565,
98         DRM_FORMAT_XRGB8888,
99         DRM_FORMAT_XBGR8888,
100         DRM_FORMAT_XRGB2101010,
101         DRM_FORMAT_XBGR2101010,
102 };
103
104 /* Primary plane formats for gen >= 4, except ivb */
105 static const u32 i965_primary_formats[] = {
106         DRM_FORMAT_C8,
107         DRM_FORMAT_RGB565,
108         DRM_FORMAT_XRGB8888,
109         DRM_FORMAT_XBGR8888,
110         DRM_FORMAT_XRGB2101010,
111         DRM_FORMAT_XBGR2101010,
112         DRM_FORMAT_XBGR16161616F,
113 };
114
115 /* Primary plane formats for vlv/chv */
116 static const u32 vlv_primary_formats[] = {
117         DRM_FORMAT_C8,
118         DRM_FORMAT_RGB565,
119         DRM_FORMAT_XRGB8888,
120         DRM_FORMAT_XBGR8888,
121         DRM_FORMAT_ARGB8888,
122         DRM_FORMAT_ABGR8888,
123         DRM_FORMAT_XRGB2101010,
124         DRM_FORMAT_XBGR2101010,
125         DRM_FORMAT_ARGB2101010,
126         DRM_FORMAT_ABGR2101010,
127         DRM_FORMAT_XBGR16161616F,
128 };
129
130 static const u64 i9xx_format_modifiers[] = {
131         I915_FORMAT_MOD_X_TILED,
132         DRM_FORMAT_MOD_LINEAR,
133         DRM_FORMAT_MOD_INVALID
134 };
135
136 /* Cursor formats */
137 static const u32 intel_cursor_formats[] = {
138         DRM_FORMAT_ARGB8888,
139 };
140
141 static const u64 cursor_format_modifiers[] = {
142         DRM_FORMAT_MOD_LINEAR,
143         DRM_FORMAT_MOD_INVALID
144 };
145
146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
147                                 struct intel_crtc_state *pipe_config);
148 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
149                                    struct intel_crtc_state *pipe_config);
150
151 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
152                                   struct drm_i915_gem_object *obj,
153                                   struct drm_mode_fb_cmd2 *mode_cmd);
154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
157                                          const struct intel_link_m_n *m_n,
158                                          const struct intel_link_m_n *m2_n2);
159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
160 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
163 static void vlv_prepare_pll(struct intel_crtc *crtc,
164                             const struct intel_crtc_state *pipe_config);
165 static void chv_prepare_pll(struct intel_crtc *crtc,
166                             const struct intel_crtc_state *pipe_config);
167 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
168                                     struct intel_crtc_state *crtc_state);
169 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
170 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
171 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
172 static void intel_modeset_setup_hw_state(struct drm_device *dev,
173                                          struct drm_modeset_acquire_ctx *ctx);
174 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
175
176 struct intel_limit {
177         struct {
178                 int min, max;
179         } dot, vco, n, m, m1, m2, p, p1;
180
181         struct {
182                 int dot_limit;
183                 int p2_slow, p2_fast;
184         } p2;
185 };
186
187 /* returns HPLL frequency in kHz */
188 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
189 {
190         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
191
192         /* Obtain SKU information */
193         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
194                 CCK_FUSE_HPLL_FREQ_MASK;
195
196         return vco_freq[hpll_freq] * 1000;
197 }
198
199 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
200                       const char *name, u32 reg, int ref_freq)
201 {
202         u32 val;
203         int divider;
204
205         val = vlv_cck_read(dev_priv, reg);
206         divider = val & CCK_FREQUENCY_VALUES;
207
208         WARN((val & CCK_FREQUENCY_STATUS) !=
209              (divider << CCK_FREQUENCY_STATUS_SHIFT),
210              "%s change in progress\n", name);
211
212         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
213 }
214
215 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
216                            const char *name, u32 reg)
217 {
218         int hpll;
219
220         vlv_cck_get(dev_priv);
221
222         if (dev_priv->hpll_freq == 0)
223                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
224
225         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
226
227         vlv_cck_put(dev_priv);
228
229         return hpll;
230 }
231
232 static void intel_update_czclk(struct drm_i915_private *dev_priv)
233 {
234         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
235                 return;
236
237         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
238                                                       CCK_CZ_CLOCK_CONTROL);
239
240         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
241 }
242
243 static inline u32 /* units of 100MHz */
244 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
245                     const struct intel_crtc_state *pipe_config)
246 {
247         if (HAS_DDI(dev_priv))
248                 return pipe_config->port_clock; /* SPLL */
249         else
250                 return dev_priv->fdi_pll_freq;
251 }
252
253 static const struct intel_limit intel_limits_i8xx_dac = {
254         .dot = { .min = 25000, .max = 350000 },
255         .vco = { .min = 908000, .max = 1512000 },
256         .n = { .min = 2, .max = 16 },
257         .m = { .min = 96, .max = 140 },
258         .m1 = { .min = 18, .max = 26 },
259         .m2 = { .min = 6, .max = 16 },
260         .p = { .min = 4, .max = 128 },
261         .p1 = { .min = 2, .max = 33 },
262         .p2 = { .dot_limit = 165000,
263                 .p2_slow = 4, .p2_fast = 2 },
264 };
265
266 static const struct intel_limit intel_limits_i8xx_dvo = {
267         .dot = { .min = 25000, .max = 350000 },
268         .vco = { .min = 908000, .max = 1512000 },
269         .n = { .min = 2, .max = 16 },
270         .m = { .min = 96, .max = 140 },
271         .m1 = { .min = 18, .max = 26 },
272         .m2 = { .min = 6, .max = 16 },
273         .p = { .min = 4, .max = 128 },
274         .p1 = { .min = 2, .max = 33 },
275         .p2 = { .dot_limit = 165000,
276                 .p2_slow = 4, .p2_fast = 4 },
277 };
278
279 static const struct intel_limit intel_limits_i8xx_lvds = {
280         .dot = { .min = 25000, .max = 350000 },
281         .vco = { .min = 908000, .max = 1512000 },
282         .n = { .min = 2, .max = 16 },
283         .m = { .min = 96, .max = 140 },
284         .m1 = { .min = 18, .max = 26 },
285         .m2 = { .min = 6, .max = 16 },
286         .p = { .min = 4, .max = 128 },
287         .p1 = { .min = 1, .max = 6 },
288         .p2 = { .dot_limit = 165000,
289                 .p2_slow = 14, .p2_fast = 7 },
290 };
291
292 static const struct intel_limit intel_limits_i9xx_sdvo = {
293         .dot = { .min = 20000, .max = 400000 },
294         .vco = { .min = 1400000, .max = 2800000 },
295         .n = { .min = 1, .max = 6 },
296         .m = { .min = 70, .max = 120 },
297         .m1 = { .min = 8, .max = 18 },
298         .m2 = { .min = 3, .max = 7 },
299         .p = { .min = 5, .max = 80 },
300         .p1 = { .min = 1, .max = 8 },
301         .p2 = { .dot_limit = 200000,
302                 .p2_slow = 10, .p2_fast = 5 },
303 };
304
305 static const struct intel_limit intel_limits_i9xx_lvds = {
306         .dot = { .min = 20000, .max = 400000 },
307         .vco = { .min = 1400000, .max = 2800000 },
308         .n = { .min = 1, .max = 6 },
309         .m = { .min = 70, .max = 120 },
310         .m1 = { .min = 8, .max = 18 },
311         .m2 = { .min = 3, .max = 7 },
312         .p = { .min = 7, .max = 98 },
313         .p1 = { .min = 1, .max = 8 },
314         .p2 = { .dot_limit = 112000,
315                 .p2_slow = 14, .p2_fast = 7 },
316 };
317
318
319 static const struct intel_limit intel_limits_g4x_sdvo = {
320         .dot = { .min = 25000, .max = 270000 },
321         .vco = { .min = 1750000, .max = 3500000},
322         .n = { .min = 1, .max = 4 },
323         .m = { .min = 104, .max = 138 },
324         .m1 = { .min = 17, .max = 23 },
325         .m2 = { .min = 5, .max = 11 },
326         .p = { .min = 10, .max = 30 },
327         .p1 = { .min = 1, .max = 3},
328         .p2 = { .dot_limit = 270000,
329                 .p2_slow = 10,
330                 .p2_fast = 10
331         },
332 };
333
334 static const struct intel_limit intel_limits_g4x_hdmi = {
335         .dot = { .min = 22000, .max = 400000 },
336         .vco = { .min = 1750000, .max = 3500000},
337         .n = { .min = 1, .max = 4 },
338         .m = { .min = 104, .max = 138 },
339         .m1 = { .min = 16, .max = 23 },
340         .m2 = { .min = 5, .max = 11 },
341         .p = { .min = 5, .max = 80 },
342         .p1 = { .min = 1, .max = 8},
343         .p2 = { .dot_limit = 165000,
344                 .p2_slow = 10, .p2_fast = 5 },
345 };
346
347 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
348         .dot = { .min = 20000, .max = 115000 },
349         .vco = { .min = 1750000, .max = 3500000 },
350         .n = { .min = 1, .max = 3 },
351         .m = { .min = 104, .max = 138 },
352         .m1 = { .min = 17, .max = 23 },
353         .m2 = { .min = 5, .max = 11 },
354         .p = { .min = 28, .max = 112 },
355         .p1 = { .min = 2, .max = 8 },
356         .p2 = { .dot_limit = 0,
357                 .p2_slow = 14, .p2_fast = 14
358         },
359 };
360
361 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
362         .dot = { .min = 80000, .max = 224000 },
363         .vco = { .min = 1750000, .max = 3500000 },
364         .n = { .min = 1, .max = 3 },
365         .m = { .min = 104, .max = 138 },
366         .m1 = { .min = 17, .max = 23 },
367         .m2 = { .min = 5, .max = 11 },
368         .p = { .min = 14, .max = 42 },
369         .p1 = { .min = 2, .max = 6 },
370         .p2 = { .dot_limit = 0,
371                 .p2_slow = 7, .p2_fast = 7
372         },
373 };
374
375 static const struct intel_limit intel_limits_pineview_sdvo = {
376         .dot = { .min = 20000, .max = 400000},
377         .vco = { .min = 1700000, .max = 3500000 },
378         /* Pineview's Ncounter is a ring counter */
379         .n = { .min = 3, .max = 6 },
380         .m = { .min = 2, .max = 256 },
381         /* Pineview only has one combined m divider, which we treat as m2. */
382         .m1 = { .min = 0, .max = 0 },
383         .m2 = { .min = 0, .max = 254 },
384         .p = { .min = 5, .max = 80 },
385         .p1 = { .min = 1, .max = 8 },
386         .p2 = { .dot_limit = 200000,
387                 .p2_slow = 10, .p2_fast = 5 },
388 };
389
390 static const struct intel_limit intel_limits_pineview_lvds = {
391         .dot = { .min = 20000, .max = 400000 },
392         .vco = { .min = 1700000, .max = 3500000 },
393         .n = { .min = 3, .max = 6 },
394         .m = { .min = 2, .max = 256 },
395         .m1 = { .min = 0, .max = 0 },
396         .m2 = { .min = 0, .max = 254 },
397         .p = { .min = 7, .max = 112 },
398         .p1 = { .min = 1, .max = 8 },
399         .p2 = { .dot_limit = 112000,
400                 .p2_slow = 14, .p2_fast = 14 },
401 };
402
403 /* Ironlake / Sandybridge
404  *
405  * We calculate clock using (register_value + 2) for N/M1/M2, so here
406  * the range value for them is (actual_value - 2).
407  */
408 static const struct intel_limit intel_limits_ironlake_dac = {
409         .dot = { .min = 25000, .max = 350000 },
410         .vco = { .min = 1760000, .max = 3510000 },
411         .n = { .min = 1, .max = 5 },
412         .m = { .min = 79, .max = 127 },
413         .m1 = { .min = 12, .max = 22 },
414         .m2 = { .min = 5, .max = 9 },
415         .p = { .min = 5, .max = 80 },
416         .p1 = { .min = 1, .max = 8 },
417         .p2 = { .dot_limit = 225000,
418                 .p2_slow = 10, .p2_fast = 5 },
419 };
420
421 static const struct intel_limit intel_limits_ironlake_single_lvds = {
422         .dot = { .min = 25000, .max = 350000 },
423         .vco = { .min = 1760000, .max = 3510000 },
424         .n = { .min = 1, .max = 3 },
425         .m = { .min = 79, .max = 118 },
426         .m1 = { .min = 12, .max = 22 },
427         .m2 = { .min = 5, .max = 9 },
428         .p = { .min = 28, .max = 112 },
429         .p1 = { .min = 2, .max = 8 },
430         .p2 = { .dot_limit = 225000,
431                 .p2_slow = 14, .p2_fast = 14 },
432 };
433
434 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
435         .dot = { .min = 25000, .max = 350000 },
436         .vco = { .min = 1760000, .max = 3510000 },
437         .n = { .min = 1, .max = 3 },
438         .m = { .min = 79, .max = 127 },
439         .m1 = { .min = 12, .max = 22 },
440         .m2 = { .min = 5, .max = 9 },
441         .p = { .min = 14, .max = 56 },
442         .p1 = { .min = 2, .max = 8 },
443         .p2 = { .dot_limit = 225000,
444                 .p2_slow = 7, .p2_fast = 7 },
445 };
446
447 /* LVDS 100mhz refclk limits. */
448 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
449         .dot = { .min = 25000, .max = 350000 },
450         .vco = { .min = 1760000, .max = 3510000 },
451         .n = { .min = 1, .max = 2 },
452         .m = { .min = 79, .max = 126 },
453         .m1 = { .min = 12, .max = 22 },
454         .m2 = { .min = 5, .max = 9 },
455         .p = { .min = 28, .max = 112 },
456         .p1 = { .min = 2, .max = 8 },
457         .p2 = { .dot_limit = 225000,
458                 .p2_slow = 14, .p2_fast = 14 },
459 };
460
461 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
462         .dot = { .min = 25000, .max = 350000 },
463         .vco = { .min = 1760000, .max = 3510000 },
464         .n = { .min = 1, .max = 3 },
465         .m = { .min = 79, .max = 126 },
466         .m1 = { .min = 12, .max = 22 },
467         .m2 = { .min = 5, .max = 9 },
468         .p = { .min = 14, .max = 42 },
469         .p1 = { .min = 2, .max = 6 },
470         .p2 = { .dot_limit = 225000,
471                 .p2_slow = 7, .p2_fast = 7 },
472 };
473
474 static const struct intel_limit intel_limits_vlv = {
475          /*
476           * These are the data rate limits (measured in fast clocks)
477           * since those are the strictest limits we have. The fast
478           * clock and actual rate limits are more relaxed, so checking
479           * them would make no difference.
480           */
481         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
482         .vco = { .min = 4000000, .max = 6000000 },
483         .n = { .min = 1, .max = 7 },
484         .m1 = { .min = 2, .max = 3 },
485         .m2 = { .min = 11, .max = 156 },
486         .p1 = { .min = 2, .max = 3 },
487         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
488 };
489
490 static const struct intel_limit intel_limits_chv = {
491         /*
492          * These are the data rate limits (measured in fast clocks)
493          * since those are the strictest limits we have.  The fast
494          * clock and actual rate limits are more relaxed, so checking
495          * them would make no difference.
496          */
497         .dot = { .min = 25000 * 5, .max = 540000 * 5},
498         .vco = { .min = 4800000, .max = 6480000 },
499         .n = { .min = 1, .max = 1 },
500         .m1 = { .min = 2, .max = 2 },
501         .m2 = { .min = 24 << 22, .max = 175 << 22 },
502         .p1 = { .min = 2, .max = 4 },
503         .p2 = { .p2_slow = 1, .p2_fast = 14 },
504 };
505
506 static const struct intel_limit intel_limits_bxt = {
507         /* FIXME: find real dot limits */
508         .dot = { .min = 0, .max = INT_MAX },
509         .vco = { .min = 4800000, .max = 6700000 },
510         .n = { .min = 1, .max = 1 },
511         .m1 = { .min = 2, .max = 2 },
512         /* FIXME: find real m2 limits */
513         .m2 = { .min = 2 << 22, .max = 255 << 22 },
514         .p1 = { .min = 2, .max = 4 },
515         .p2 = { .p2_slow = 1, .p2_fast = 20 },
516 };
517
518 /* WA Display #0827: Gen9:all */
519 static void
520 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
521 {
522         if (enable)
523                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
524                            I915_READ(CLKGATE_DIS_PSL(pipe)) |
525                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
526         else
527                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
528                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
529                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
530 }
531
532 /* Wa_2006604312:icl */
533 static void
534 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
535                        bool enable)
536 {
537         if (enable)
538                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
539                            I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
540         else
541                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
542                            I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
543 }
544
545 static bool
546 needs_modeset(const struct intel_crtc_state *state)
547 {
548         return drm_atomic_crtc_needs_modeset(&state->uapi);
549 }
550
551 bool
552 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
553 {
554         return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
555                 crtc_state->sync_mode_slaves_mask);
556 }
557
558 static bool
559 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
560 {
561         return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
562                 crtc_state->sync_mode_slaves_mask);
563 }
564
565 /*
566  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
567  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
568  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
569  * The helpers' return value is the rate of the clock that is fed to the
570  * display engine's pipe which can be the above fast dot clock rate or a
571  * divided-down version of it.
572  */
573 /* m1 is reserved as 0 in Pineview, n is a ring counter */
574 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
575 {
576         clock->m = clock->m2 + 2;
577         clock->p = clock->p1 * clock->p2;
578         if (WARN_ON(clock->n == 0 || clock->p == 0))
579                 return 0;
580         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
581         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
582
583         return clock->dot;
584 }
585
586 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
587 {
588         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
589 }
590
591 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
592 {
593         clock->m = i9xx_dpll_compute_m(clock);
594         clock->p = clock->p1 * clock->p2;
595         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
596                 return 0;
597         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
598         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
599
600         return clock->dot;
601 }
602
603 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
604 {
605         clock->m = clock->m1 * clock->m2;
606         clock->p = clock->p1 * clock->p2;
607         if (WARN_ON(clock->n == 0 || clock->p == 0))
608                 return 0;
609         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
610         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
611
612         return clock->dot / 5;
613 }
614
615 int chv_calc_dpll_params(int refclk, struct dpll *clock)
616 {
617         clock->m = clock->m1 * clock->m2;
618         clock->p = clock->p1 * clock->p2;
619         if (WARN_ON(clock->n == 0 || clock->p == 0))
620                 return 0;
621         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
622                                            clock->n << 22);
623         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
624
625         return clock->dot / 5;
626 }
627
628 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
629
630 /*
631  * Returns whether the given set of divisors are valid for a given refclk with
632  * the given connectors.
633  */
634 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
635                                const struct intel_limit *limit,
636                                const struct dpll *clock)
637 {
638         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
639                 INTELPllInvalid("n out of range\n");
640         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
641                 INTELPllInvalid("p1 out of range\n");
642         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
643                 INTELPllInvalid("m2 out of range\n");
644         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
645                 INTELPllInvalid("m1 out of range\n");
646
647         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
648             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
649                 if (clock->m1 <= clock->m2)
650                         INTELPllInvalid("m1 <= m2\n");
651
652         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
653             !IS_GEN9_LP(dev_priv)) {
654                 if (clock->p < limit->p.min || limit->p.max < clock->p)
655                         INTELPllInvalid("p out of range\n");
656                 if (clock->m < limit->m.min || limit->m.max < clock->m)
657                         INTELPllInvalid("m out of range\n");
658         }
659
660         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
661                 INTELPllInvalid("vco out of range\n");
662         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
663          * connector, etc., rather than just a single range.
664          */
665         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
666                 INTELPllInvalid("dot out of range\n");
667
668         return true;
669 }
670
671 static int
672 i9xx_select_p2_div(const struct intel_limit *limit,
673                    const struct intel_crtc_state *crtc_state,
674                    int target)
675 {
676         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
677
678         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
679                 /*
680                  * For LVDS just rely on its current settings for dual-channel.
681                  * We haven't figured out how to reliably set up different
682                  * single/dual channel state, if we even can.
683                  */
684                 if (intel_is_dual_link_lvds(dev_priv))
685                         return limit->p2.p2_fast;
686                 else
687                         return limit->p2.p2_slow;
688         } else {
689                 if (target < limit->p2.dot_limit)
690                         return limit->p2.p2_slow;
691                 else
692                         return limit->p2.p2_fast;
693         }
694 }
695
696 /*
697  * Returns a set of divisors for the desired target clock with the given
698  * refclk, or FALSE.  The returned values represent the clock equation:
699  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
700  *
701  * Target and reference clocks are specified in kHz.
702  *
703  * If match_clock is provided, then best_clock P divider must match the P
704  * divider from @match_clock used for LVDS downclocking.
705  */
706 static bool
707 i9xx_find_best_dpll(const struct intel_limit *limit,
708                     struct intel_crtc_state *crtc_state,
709                     int target, int refclk, struct dpll *match_clock,
710                     struct dpll *best_clock)
711 {
712         struct drm_device *dev = crtc_state->uapi.crtc->dev;
713         struct dpll clock;
714         int err = target;
715
716         memset(best_clock, 0, sizeof(*best_clock));
717
718         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
719
720         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
721              clock.m1++) {
722                 for (clock.m2 = limit->m2.min;
723                      clock.m2 <= limit->m2.max; clock.m2++) {
724                         if (clock.m2 >= clock.m1)
725                                 break;
726                         for (clock.n = limit->n.min;
727                              clock.n <= limit->n.max; clock.n++) {
728                                 for (clock.p1 = limit->p1.min;
729                                         clock.p1 <= limit->p1.max; clock.p1++) {
730                                         int this_err;
731
732                                         i9xx_calc_dpll_params(refclk, &clock);
733                                         if (!intel_PLL_is_valid(to_i915(dev),
734                                                                 limit,
735                                                                 &clock))
736                                                 continue;
737                                         if (match_clock &&
738                                             clock.p != match_clock->p)
739                                                 continue;
740
741                                         this_err = abs(clock.dot - target);
742                                         if (this_err < err) {
743                                                 *best_clock = clock;
744                                                 err = this_err;
745                                         }
746                                 }
747                         }
748                 }
749         }
750
751         return (err != target);
752 }
753
754 /*
755  * Returns a set of divisors for the desired target clock with the given
756  * refclk, or FALSE.  The returned values represent the clock equation:
757  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
758  *
759  * Target and reference clocks are specified in kHz.
760  *
761  * If match_clock is provided, then best_clock P divider must match the P
762  * divider from @match_clock used for LVDS downclocking.
763  */
764 static bool
765 pnv_find_best_dpll(const struct intel_limit *limit,
766                    struct intel_crtc_state *crtc_state,
767                    int target, int refclk, struct dpll *match_clock,
768                    struct dpll *best_clock)
769 {
770         struct drm_device *dev = crtc_state->uapi.crtc->dev;
771         struct dpll clock;
772         int err = target;
773
774         memset(best_clock, 0, sizeof(*best_clock));
775
776         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
777
778         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
779              clock.m1++) {
780                 for (clock.m2 = limit->m2.min;
781                      clock.m2 <= limit->m2.max; clock.m2++) {
782                         for (clock.n = limit->n.min;
783                              clock.n <= limit->n.max; clock.n++) {
784                                 for (clock.p1 = limit->p1.min;
785                                         clock.p1 <= limit->p1.max; clock.p1++) {
786                                         int this_err;
787
788                                         pnv_calc_dpll_params(refclk, &clock);
789                                         if (!intel_PLL_is_valid(to_i915(dev),
790                                                                 limit,
791                                                                 &clock))
792                                                 continue;
793                                         if (match_clock &&
794                                             clock.p != match_clock->p)
795                                                 continue;
796
797                                         this_err = abs(clock.dot - target);
798                                         if (this_err < err) {
799                                                 *best_clock = clock;
800                                                 err = this_err;
801                                         }
802                                 }
803                         }
804                 }
805         }
806
807         return (err != target);
808 }
809
810 /*
811  * Returns a set of divisors for the desired target clock with the given
812  * refclk, or FALSE.  The returned values represent the clock equation:
813  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
814  *
815  * Target and reference clocks are specified in kHz.
816  *
817  * If match_clock is provided, then best_clock P divider must match the P
818  * divider from @match_clock used for LVDS downclocking.
819  */
820 static bool
821 g4x_find_best_dpll(const struct intel_limit *limit,
822                    struct intel_crtc_state *crtc_state,
823                    int target, int refclk, struct dpll *match_clock,
824                    struct dpll *best_clock)
825 {
826         struct drm_device *dev = crtc_state->uapi.crtc->dev;
827         struct dpll clock;
828         int max_n;
829         bool found = false;
830         /* approximately equals target * 0.00585 */
831         int err_most = (target >> 8) + (target >> 9);
832
833         memset(best_clock, 0, sizeof(*best_clock));
834
835         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
836
837         max_n = limit->n.max;
838         /* based on hardware requirement, prefer smaller n to precision */
839         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
840                 /* based on hardware requirement, prefere larger m1,m2 */
841                 for (clock.m1 = limit->m1.max;
842                      clock.m1 >= limit->m1.min; clock.m1--) {
843                         for (clock.m2 = limit->m2.max;
844                              clock.m2 >= limit->m2.min; clock.m2--) {
845                                 for (clock.p1 = limit->p1.max;
846                                      clock.p1 >= limit->p1.min; clock.p1--) {
847                                         int this_err;
848
849                                         i9xx_calc_dpll_params(refclk, &clock);
850                                         if (!intel_PLL_is_valid(to_i915(dev),
851                                                                 limit,
852                                                                 &clock))
853                                                 continue;
854
855                                         this_err = abs(clock.dot - target);
856                                         if (this_err < err_most) {
857                                                 *best_clock = clock;
858                                                 err_most = this_err;
859                                                 max_n = clock.n;
860                                                 found = true;
861                                         }
862                                 }
863                         }
864                 }
865         }
866         return found;
867 }
868
869 /*
870  * Check if the calculated PLL configuration is more optimal compared to the
871  * best configuration and error found so far. Return the calculated error.
872  */
873 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
874                                const struct dpll *calculated_clock,
875                                const struct dpll *best_clock,
876                                unsigned int best_error_ppm,
877                                unsigned int *error_ppm)
878 {
879         /*
880          * For CHV ignore the error and consider only the P value.
881          * Prefer a bigger P value based on HW requirements.
882          */
883         if (IS_CHERRYVIEW(to_i915(dev))) {
884                 *error_ppm = 0;
885
886                 return calculated_clock->p > best_clock->p;
887         }
888
889         if (WARN_ON_ONCE(!target_freq))
890                 return false;
891
892         *error_ppm = div_u64(1000000ULL *
893                                 abs(target_freq - calculated_clock->dot),
894                              target_freq);
895         /*
896          * Prefer a better P value over a better (smaller) error if the error
897          * is small. Ensure this preference for future configurations too by
898          * setting the error to 0.
899          */
900         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
901                 *error_ppm = 0;
902
903                 return true;
904         }
905
906         return *error_ppm + 10 < best_error_ppm;
907 }
908
909 /*
910  * Returns a set of divisors for the desired target clock with the given
911  * refclk, or FALSE.  The returned values represent the clock equation:
912  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
913  */
914 static bool
915 vlv_find_best_dpll(const struct intel_limit *limit,
916                    struct intel_crtc_state *crtc_state,
917                    int target, int refclk, struct dpll *match_clock,
918                    struct dpll *best_clock)
919 {
920         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
921         struct drm_device *dev = crtc->base.dev;
922         struct dpll clock;
923         unsigned int bestppm = 1000000;
924         /* min update 19.2 MHz */
925         int max_n = min(limit->n.max, refclk / 19200);
926         bool found = false;
927
928         target *= 5; /* fast clock */
929
930         memset(best_clock, 0, sizeof(*best_clock));
931
932         /* based on hardware requirement, prefer smaller n to precision */
933         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
934                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
935                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
936                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
937                                 clock.p = clock.p1 * clock.p2;
938                                 /* based on hardware requirement, prefer bigger m1,m2 values */
939                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
940                                         unsigned int ppm;
941
942                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
943                                                                      refclk * clock.m1);
944
945                                         vlv_calc_dpll_params(refclk, &clock);
946
947                                         if (!intel_PLL_is_valid(to_i915(dev),
948                                                                 limit,
949                                                                 &clock))
950                                                 continue;
951
952                                         if (!vlv_PLL_is_optimal(dev, target,
953                                                                 &clock,
954                                                                 best_clock,
955                                                                 bestppm, &ppm))
956                                                 continue;
957
958                                         *best_clock = clock;
959                                         bestppm = ppm;
960                                         found = true;
961                                 }
962                         }
963                 }
964         }
965
966         return found;
967 }
968
969 /*
970  * Returns a set of divisors for the desired target clock with the given
971  * refclk, or FALSE.  The returned values represent the clock equation:
972  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
973  */
974 static bool
975 chv_find_best_dpll(const struct intel_limit *limit,
976                    struct intel_crtc_state *crtc_state,
977                    int target, int refclk, struct dpll *match_clock,
978                    struct dpll *best_clock)
979 {
980         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
981         struct drm_device *dev = crtc->base.dev;
982         unsigned int best_error_ppm;
983         struct dpll clock;
984         u64 m2;
985         int found = false;
986
987         memset(best_clock, 0, sizeof(*best_clock));
988         best_error_ppm = 1000000;
989
990         /*
991          * Based on hardware doc, the n always set to 1, and m1 always
992          * set to 2.  If requires to support 200Mhz refclk, we need to
993          * revisit this because n may not 1 anymore.
994          */
995         clock.n = 1, clock.m1 = 2;
996         target *= 5;    /* fast clock */
997
998         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
999                 for (clock.p2 = limit->p2.p2_fast;
1000                                 clock.p2 >= limit->p2.p2_slow;
1001                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1002                         unsigned int error_ppm;
1003
1004                         clock.p = clock.p1 * clock.p2;
1005
1006                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1007                                                    refclk * clock.m1);
1008
1009                         if (m2 > INT_MAX/clock.m1)
1010                                 continue;
1011
1012                         clock.m2 = m2;
1013
1014                         chv_calc_dpll_params(refclk, &clock);
1015
1016                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1017                                 continue;
1018
1019                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1020                                                 best_error_ppm, &error_ppm))
1021                                 continue;
1022
1023                         *best_clock = clock;
1024                         best_error_ppm = error_ppm;
1025                         found = true;
1026                 }
1027         }
1028
1029         return found;
1030 }
1031
1032 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1033                         struct dpll *best_clock)
1034 {
1035         int refclk = 100000;
1036         const struct intel_limit *limit = &intel_limits_bxt;
1037
1038         return chv_find_best_dpll(limit, crtc_state,
1039                                   crtc_state->port_clock, refclk,
1040                                   NULL, best_clock);
1041 }
1042
1043 bool intel_crtc_active(struct intel_crtc *crtc)
1044 {
1045         /* Be paranoid as we can arrive here with only partial
1046          * state retrieved from the hardware during setup.
1047          *
1048          * We can ditch the adjusted_mode.crtc_clock check as soon
1049          * as Haswell has gained clock readout/fastboot support.
1050          *
1051          * We can ditch the crtc->primary->state->fb check as soon as we can
1052          * properly reconstruct framebuffers.
1053          *
1054          * FIXME: The intel_crtc->active here should be switched to
1055          * crtc->state->active once we have proper CRTC states wired up
1056          * for atomic.
1057          */
1058         return crtc->active && crtc->base.primary->state->fb &&
1059                 crtc->config->hw.adjusted_mode.crtc_clock;
1060 }
1061
1062 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1063                                              enum pipe pipe)
1064 {
1065         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1066
1067         return crtc->config->cpu_transcoder;
1068 }
1069
1070 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1071                                     enum pipe pipe)
1072 {
1073         i915_reg_t reg = PIPEDSL(pipe);
1074         u32 line1, line2;
1075         u32 line_mask;
1076
1077         if (IS_GEN(dev_priv, 2))
1078                 line_mask = DSL_LINEMASK_GEN2;
1079         else
1080                 line_mask = DSL_LINEMASK_GEN3;
1081
1082         line1 = I915_READ(reg) & line_mask;
1083         msleep(5);
1084         line2 = I915_READ(reg) & line_mask;
1085
1086         return line1 != line2;
1087 }
1088
1089 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1090 {
1091         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1092         enum pipe pipe = crtc->pipe;
1093
1094         /* Wait for the display line to settle/start moving */
1095         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1096                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1097                           pipe_name(pipe), onoff(state));
1098 }
1099
1100 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1101 {
1102         wait_for_pipe_scanline_moving(crtc, false);
1103 }
1104
1105 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1106 {
1107         wait_for_pipe_scanline_moving(crtc, true);
1108 }
1109
1110 static void
1111 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1112 {
1113         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1114         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1115
1116         if (INTEL_GEN(dev_priv) >= 4) {
1117                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1118                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1119
1120                 /* Wait for the Pipe State to go off */
1121                 if (intel_de_wait_for_clear(dev_priv, reg,
1122                                             I965_PIPECONF_ACTIVE, 100))
1123                         WARN(1, "pipe_off wait timed out\n");
1124         } else {
1125                 intel_wait_for_pipe_scanline_stopped(crtc);
1126         }
1127 }
1128
1129 /* Only for pre-ILK configs */
1130 void assert_pll(struct drm_i915_private *dev_priv,
1131                 enum pipe pipe, bool state)
1132 {
1133         u32 val;
1134         bool cur_state;
1135
1136         val = I915_READ(DPLL(pipe));
1137         cur_state = !!(val & DPLL_VCO_ENABLE);
1138         I915_STATE_WARN(cur_state != state,
1139              "PLL state assertion failure (expected %s, current %s)\n",
1140                         onoff(state), onoff(cur_state));
1141 }
1142
1143 /* XXX: the dsi pll is shared between MIPI DSI ports */
1144 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1145 {
1146         u32 val;
1147         bool cur_state;
1148
1149         vlv_cck_get(dev_priv);
1150         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1151         vlv_cck_put(dev_priv);
1152
1153         cur_state = val & DSI_PLL_VCO_EN;
1154         I915_STATE_WARN(cur_state != state,
1155              "DSI PLL state assertion failure (expected %s, current %s)\n",
1156                         onoff(state), onoff(cur_state));
1157 }
1158
1159 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1160                           enum pipe pipe, bool state)
1161 {
1162         bool cur_state;
1163         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1164                                                                       pipe);
1165
1166         if (HAS_DDI(dev_priv)) {
1167                 /* DDI does not have a specific FDI_TX register */
1168                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1169                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1170         } else {
1171                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1172                 cur_state = !!(val & FDI_TX_ENABLE);
1173         }
1174         I915_STATE_WARN(cur_state != state,
1175              "FDI TX state assertion failure (expected %s, current %s)\n",
1176                         onoff(state), onoff(cur_state));
1177 }
1178 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1179 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1180
1181 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1182                           enum pipe pipe, bool state)
1183 {
1184         u32 val;
1185         bool cur_state;
1186
1187         val = I915_READ(FDI_RX_CTL(pipe));
1188         cur_state = !!(val & FDI_RX_ENABLE);
1189         I915_STATE_WARN(cur_state != state,
1190              "FDI RX state assertion failure (expected %s, current %s)\n",
1191                         onoff(state), onoff(cur_state));
1192 }
1193 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1194 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1195
1196 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1197                                       enum pipe pipe)
1198 {
1199         u32 val;
1200
1201         /* ILK FDI PLL is always enabled */
1202         if (IS_GEN(dev_priv, 5))
1203                 return;
1204
1205         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1206         if (HAS_DDI(dev_priv))
1207                 return;
1208
1209         val = I915_READ(FDI_TX_CTL(pipe));
1210         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1211 }
1212
1213 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1214                        enum pipe pipe, bool state)
1215 {
1216         u32 val;
1217         bool cur_state;
1218
1219         val = I915_READ(FDI_RX_CTL(pipe));
1220         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1221         I915_STATE_WARN(cur_state != state,
1222              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1223                         onoff(state), onoff(cur_state));
1224 }
1225
1226 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1227 {
1228         i915_reg_t pp_reg;
1229         u32 val;
1230         enum pipe panel_pipe = INVALID_PIPE;
1231         bool locked = true;
1232
1233         if (WARN_ON(HAS_DDI(dev_priv)))
1234                 return;
1235
1236         if (HAS_PCH_SPLIT(dev_priv)) {
1237                 u32 port_sel;
1238
1239                 pp_reg = PP_CONTROL(0);
1240                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1241
1242                 switch (port_sel) {
1243                 case PANEL_PORT_SELECT_LVDS:
1244                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1245                         break;
1246                 case PANEL_PORT_SELECT_DPA:
1247                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1248                         break;
1249                 case PANEL_PORT_SELECT_DPC:
1250                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1251                         break;
1252                 case PANEL_PORT_SELECT_DPD:
1253                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1254                         break;
1255                 default:
1256                         MISSING_CASE(port_sel);
1257                         break;
1258                 }
1259         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1260                 /* presumably write lock depends on pipe, not port select */
1261                 pp_reg = PP_CONTROL(pipe);
1262                 panel_pipe = pipe;
1263         } else {
1264                 u32 port_sel;
1265
1266                 pp_reg = PP_CONTROL(0);
1267                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1268
1269                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1270                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1271         }
1272
1273         val = I915_READ(pp_reg);
1274         if (!(val & PANEL_POWER_ON) ||
1275             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1276                 locked = false;
1277
1278         I915_STATE_WARN(panel_pipe == pipe && locked,
1279              "panel assertion failure, pipe %c regs locked\n",
1280              pipe_name(pipe));
1281 }
1282
1283 void assert_pipe(struct drm_i915_private *dev_priv,
1284                  enum pipe pipe, bool state)
1285 {
1286         bool cur_state;
1287         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1288                                                                       pipe);
1289         enum intel_display_power_domain power_domain;
1290         intel_wakeref_t wakeref;
1291
1292         /* we keep both pipes enabled on 830 */
1293         if (IS_I830(dev_priv))
1294                 state = true;
1295
1296         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1297         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1298         if (wakeref) {
1299                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1300                 cur_state = !!(val & PIPECONF_ENABLE);
1301
1302                 intel_display_power_put(dev_priv, power_domain, wakeref);
1303         } else {
1304                 cur_state = false;
1305         }
1306
1307         I915_STATE_WARN(cur_state != state,
1308              "pipe %c assertion failure (expected %s, current %s)\n",
1309                         pipe_name(pipe), onoff(state), onoff(cur_state));
1310 }
1311
1312 static void assert_plane(struct intel_plane *plane, bool state)
1313 {
1314         enum pipe pipe;
1315         bool cur_state;
1316
1317         cur_state = plane->get_hw_state(plane, &pipe);
1318
1319         I915_STATE_WARN(cur_state != state,
1320                         "%s assertion failure (expected %s, current %s)\n",
1321                         plane->base.name, onoff(state), onoff(cur_state));
1322 }
1323
1324 #define assert_plane_enabled(p) assert_plane(p, true)
1325 #define assert_plane_disabled(p) assert_plane(p, false)
1326
1327 static void assert_planes_disabled(struct intel_crtc *crtc)
1328 {
1329         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1330         struct intel_plane *plane;
1331
1332         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1333                 assert_plane_disabled(plane);
1334 }
1335
1336 static void assert_vblank_disabled(struct drm_crtc *crtc)
1337 {
1338         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1339                 drm_crtc_vblank_put(crtc);
1340 }
1341
1342 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1343                                     enum pipe pipe)
1344 {
1345         u32 val;
1346         bool enabled;
1347
1348         val = I915_READ(PCH_TRANSCONF(pipe));
1349         enabled = !!(val & TRANS_ENABLE);
1350         I915_STATE_WARN(enabled,
1351              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1352              pipe_name(pipe));
1353 }
1354
1355 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1356                                    enum pipe pipe, enum port port,
1357                                    i915_reg_t dp_reg)
1358 {
1359         enum pipe port_pipe;
1360         bool state;
1361
1362         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1363
1364         I915_STATE_WARN(state && port_pipe == pipe,
1365                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1366                         port_name(port), pipe_name(pipe));
1367
1368         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1369                         "IBX PCH DP %c still using transcoder B\n",
1370                         port_name(port));
1371 }
1372
1373 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1374                                      enum pipe pipe, enum port port,
1375                                      i915_reg_t hdmi_reg)
1376 {
1377         enum pipe port_pipe;
1378         bool state;
1379
1380         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1381
1382         I915_STATE_WARN(state && port_pipe == pipe,
1383                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1384                         port_name(port), pipe_name(pipe));
1385
1386         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1387                         "IBX PCH HDMI %c still using transcoder B\n",
1388                         port_name(port));
1389 }
1390
1391 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1392                                       enum pipe pipe)
1393 {
1394         enum pipe port_pipe;
1395
1396         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1397         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1398         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1399
1400         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1401                         port_pipe == pipe,
1402                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1403                         pipe_name(pipe));
1404
1405         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1406                         port_pipe == pipe,
1407                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1408                         pipe_name(pipe));
1409
1410         /* PCH SDVOB multiplex with HDMIB */
1411         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1412         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1413         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1414 }
1415
1416 static void _vlv_enable_pll(struct intel_crtc *crtc,
1417                             const struct intel_crtc_state *pipe_config)
1418 {
1419         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1420         enum pipe pipe = crtc->pipe;
1421
1422         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1423         POSTING_READ(DPLL(pipe));
1424         udelay(150);
1425
1426         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1427                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1428 }
1429
1430 static void vlv_enable_pll(struct intel_crtc *crtc,
1431                            const struct intel_crtc_state *pipe_config)
1432 {
1433         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1434         enum pipe pipe = crtc->pipe;
1435
1436         assert_pipe_disabled(dev_priv, pipe);
1437
1438         /* PLL is protected by panel, make sure we can write it */
1439         assert_panel_unlocked(dev_priv, pipe);
1440
1441         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1442                 _vlv_enable_pll(crtc, pipe_config);
1443
1444         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1445         POSTING_READ(DPLL_MD(pipe));
1446 }
1447
1448
1449 static void _chv_enable_pll(struct intel_crtc *crtc,
1450                             const struct intel_crtc_state *pipe_config)
1451 {
1452         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1453         enum pipe pipe = crtc->pipe;
1454         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1455         u32 tmp;
1456
1457         vlv_dpio_get(dev_priv);
1458
1459         /* Enable back the 10bit clock to display controller */
1460         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1461         tmp |= DPIO_DCLKP_EN;
1462         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1463
1464         vlv_dpio_put(dev_priv);
1465
1466         /*
1467          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1468          */
1469         udelay(1);
1470
1471         /* Enable PLL */
1472         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1473
1474         /* Check PLL is locked */
1475         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1476                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1477 }
1478
1479 static void chv_enable_pll(struct intel_crtc *crtc,
1480                            const struct intel_crtc_state *pipe_config)
1481 {
1482         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1483         enum pipe pipe = crtc->pipe;
1484
1485         assert_pipe_disabled(dev_priv, pipe);
1486
1487         /* PLL is protected by panel, make sure we can write it */
1488         assert_panel_unlocked(dev_priv, pipe);
1489
1490         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1491                 _chv_enable_pll(crtc, pipe_config);
1492
1493         if (pipe != PIPE_A) {
1494                 /*
1495                  * WaPixelRepeatModeFixForC0:chv
1496                  *
1497                  * DPLLCMD is AWOL. Use chicken bits to propagate
1498                  * the value from DPLLBMD to either pipe B or C.
1499                  */
1500                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1501                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1502                 I915_WRITE(CBR4_VLV, 0);
1503                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1504
1505                 /*
1506                  * DPLLB VGA mode also seems to cause problems.
1507                  * We should always have it disabled.
1508                  */
1509                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1510         } else {
1511                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1512                 POSTING_READ(DPLL_MD(pipe));
1513         }
1514 }
1515
1516 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1517 {
1518         if (IS_I830(dev_priv))
1519                 return false;
1520
1521         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1522 }
1523
1524 static void i9xx_enable_pll(struct intel_crtc *crtc,
1525                             const struct intel_crtc_state *crtc_state)
1526 {
1527         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1528         i915_reg_t reg = DPLL(crtc->pipe);
1529         u32 dpll = crtc_state->dpll_hw_state.dpll;
1530         int i;
1531
1532         assert_pipe_disabled(dev_priv, crtc->pipe);
1533
1534         /* PLL is protected by panel, make sure we can write it */
1535         if (i9xx_has_pps(dev_priv))
1536                 assert_panel_unlocked(dev_priv, crtc->pipe);
1537
1538         /*
1539          * Apparently we need to have VGA mode enabled prior to changing
1540          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1541          * dividers, even though the register value does change.
1542          */
1543         I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1544         I915_WRITE(reg, dpll);
1545
1546         /* Wait for the clocks to stabilize. */
1547         POSTING_READ(reg);
1548         udelay(150);
1549
1550         if (INTEL_GEN(dev_priv) >= 4) {
1551                 I915_WRITE(DPLL_MD(crtc->pipe),
1552                            crtc_state->dpll_hw_state.dpll_md);
1553         } else {
1554                 /* The pixel multiplier can only be updated once the
1555                  * DPLL is enabled and the clocks are stable.
1556                  *
1557                  * So write it again.
1558                  */
1559                 I915_WRITE(reg, dpll);
1560         }
1561
1562         /* We do this three times for luck */
1563         for (i = 0; i < 3; i++) {
1564                 I915_WRITE(reg, dpll);
1565                 POSTING_READ(reg);
1566                 udelay(150); /* wait for warmup */
1567         }
1568 }
1569
1570 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1571 {
1572         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1573         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1574         enum pipe pipe = crtc->pipe;
1575
1576         /* Don't disable pipe or pipe PLLs if needed */
1577         if (IS_I830(dev_priv))
1578                 return;
1579
1580         /* Make sure the pipe isn't still relying on us */
1581         assert_pipe_disabled(dev_priv, pipe);
1582
1583         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1584         POSTING_READ(DPLL(pipe));
1585 }
1586
1587 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1588 {
1589         u32 val;
1590
1591         /* Make sure the pipe isn't still relying on us */
1592         assert_pipe_disabled(dev_priv, pipe);
1593
1594         val = DPLL_INTEGRATED_REF_CLK_VLV |
1595                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1596         if (pipe != PIPE_A)
1597                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1598
1599         I915_WRITE(DPLL(pipe), val);
1600         POSTING_READ(DPLL(pipe));
1601 }
1602
1603 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1604 {
1605         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1606         u32 val;
1607
1608         /* Make sure the pipe isn't still relying on us */
1609         assert_pipe_disabled(dev_priv, pipe);
1610
1611         val = DPLL_SSC_REF_CLK_CHV |
1612                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1613         if (pipe != PIPE_A)
1614                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1615
1616         I915_WRITE(DPLL(pipe), val);
1617         POSTING_READ(DPLL(pipe));
1618
1619         vlv_dpio_get(dev_priv);
1620
1621         /* Disable 10bit clock to display controller */
1622         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1623         val &= ~DPIO_DCLKP_EN;
1624         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1625
1626         vlv_dpio_put(dev_priv);
1627 }
1628
1629 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1630                          struct intel_digital_port *dport,
1631                          unsigned int expected_mask)
1632 {
1633         u32 port_mask;
1634         i915_reg_t dpll_reg;
1635
1636         switch (dport->base.port) {
1637         case PORT_B:
1638                 port_mask = DPLL_PORTB_READY_MASK;
1639                 dpll_reg = DPLL(0);
1640                 break;
1641         case PORT_C:
1642                 port_mask = DPLL_PORTC_READY_MASK;
1643                 dpll_reg = DPLL(0);
1644                 expected_mask <<= 4;
1645                 break;
1646         case PORT_D:
1647                 port_mask = DPLL_PORTD_READY_MASK;
1648                 dpll_reg = DPIO_PHY_STATUS;
1649                 break;
1650         default:
1651                 BUG();
1652         }
1653
1654         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1655                                        port_mask, expected_mask, 1000))
1656                 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1657                      dport->base.base.base.id, dport->base.base.name,
1658                      I915_READ(dpll_reg) & port_mask, expected_mask);
1659 }
1660
1661 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1662 {
1663         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1664         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1665         enum pipe pipe = crtc->pipe;
1666         i915_reg_t reg;
1667         u32 val, pipeconf_val;
1668
1669         /* Make sure PCH DPLL is enabled */
1670         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1671
1672         /* FDI must be feeding us bits for PCH ports */
1673         assert_fdi_tx_enabled(dev_priv, pipe);
1674         assert_fdi_rx_enabled(dev_priv, pipe);
1675
1676         if (HAS_PCH_CPT(dev_priv)) {
1677                 reg = TRANS_CHICKEN2(pipe);
1678                 val = I915_READ(reg);
1679                 /*
1680                  * Workaround: Set the timing override bit
1681                  * before enabling the pch transcoder.
1682                  */
1683                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1684                 /* Configure frame start delay to match the CPU */
1685                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1686                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1687                 I915_WRITE(reg, val);
1688         }
1689
1690         reg = PCH_TRANSCONF(pipe);
1691         val = I915_READ(reg);
1692         pipeconf_val = I915_READ(PIPECONF(pipe));
1693
1694         if (HAS_PCH_IBX(dev_priv)) {
1695                 /* Configure frame start delay to match the CPU */
1696                 val &= ~TRANS_FRAME_START_DELAY_MASK;
1697                 val |= TRANS_FRAME_START_DELAY(0);
1698
1699                 /*
1700                  * Make the BPC in transcoder be consistent with
1701                  * that in pipeconf reg. For HDMI we must use 8bpc
1702                  * here for both 8bpc and 12bpc.
1703                  */
1704                 val &= ~PIPECONF_BPC_MASK;
1705                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1706                         val |= PIPECONF_8BPC;
1707                 else
1708                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1709         }
1710
1711         val &= ~TRANS_INTERLACE_MASK;
1712         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1713                 if (HAS_PCH_IBX(dev_priv) &&
1714                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1715                         val |= TRANS_LEGACY_INTERLACED_ILK;
1716                 else
1717                         val |= TRANS_INTERLACED;
1718         } else {
1719                 val |= TRANS_PROGRESSIVE;
1720         }
1721
1722         I915_WRITE(reg, val | TRANS_ENABLE);
1723         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1724                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1725 }
1726
1727 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1728                                       enum transcoder cpu_transcoder)
1729 {
1730         u32 val, pipeconf_val;
1731
1732         /* FDI must be feeding us bits for PCH ports */
1733         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1734         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1735
1736         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1737         /* Workaround: set timing override bit. */
1738         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1739         /* Configure frame start delay to match the CPU */
1740         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1741         val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1742         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1743
1744         val = TRANS_ENABLE;
1745         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1746
1747         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1748             PIPECONF_INTERLACED_ILK)
1749                 val |= TRANS_INTERLACED;
1750         else
1751                 val |= TRANS_PROGRESSIVE;
1752
1753         I915_WRITE(LPT_TRANSCONF, val);
1754         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1755                                   TRANS_STATE_ENABLE, 100))
1756                 DRM_ERROR("Failed to enable PCH transcoder\n");
1757 }
1758
1759 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1760                                             enum pipe pipe)
1761 {
1762         i915_reg_t reg;
1763         u32 val;
1764
1765         /* FDI relies on the transcoder */
1766         assert_fdi_tx_disabled(dev_priv, pipe);
1767         assert_fdi_rx_disabled(dev_priv, pipe);
1768
1769         /* Ports must be off as well */
1770         assert_pch_ports_disabled(dev_priv, pipe);
1771
1772         reg = PCH_TRANSCONF(pipe);
1773         val = I915_READ(reg);
1774         val &= ~TRANS_ENABLE;
1775         I915_WRITE(reg, val);
1776         /* wait for PCH transcoder off, transcoder state */
1777         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1778                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1779
1780         if (HAS_PCH_CPT(dev_priv)) {
1781                 /* Workaround: Clear the timing override chicken bit again. */
1782                 reg = TRANS_CHICKEN2(pipe);
1783                 val = I915_READ(reg);
1784                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1785                 I915_WRITE(reg, val);
1786         }
1787 }
1788
1789 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1790 {
1791         u32 val;
1792
1793         val = I915_READ(LPT_TRANSCONF);
1794         val &= ~TRANS_ENABLE;
1795         I915_WRITE(LPT_TRANSCONF, val);
1796         /* wait for PCH transcoder off, transcoder state */
1797         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1798                                     TRANS_STATE_ENABLE, 50))
1799                 DRM_ERROR("Failed to disable PCH transcoder\n");
1800
1801         /* Workaround: clear timing override bit. */
1802         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1803         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1804         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1805 }
1806
1807 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1808 {
1809         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1810
1811         if (HAS_PCH_LPT(dev_priv))
1812                 return PIPE_A;
1813         else
1814                 return crtc->pipe;
1815 }
1816
1817 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1818 {
1819         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1820
1821         /*
1822          * On i965gm the hardware frame counter reads
1823          * zero when the TV encoder is enabled :(
1824          */
1825         if (IS_I965GM(dev_priv) &&
1826             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1827                 return 0;
1828
1829         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1830                 return 0xffffffff; /* full 32 bit counter */
1831         else if (INTEL_GEN(dev_priv) >= 3)
1832                 return 0xffffff; /* only 24 bits of frame count */
1833         else
1834                 return 0; /* Gen2 doesn't have a hardware frame counter */
1835 }
1836
1837 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1838 {
1839         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1840
1841         drm_crtc_set_max_vblank_count(&crtc->base,
1842                                       intel_crtc_max_vblank_count(crtc_state));
1843         drm_crtc_vblank_on(&crtc->base);
1844 }
1845
1846 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1847 {
1848         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1849         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1850         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1851         enum pipe pipe = crtc->pipe;
1852         i915_reg_t reg;
1853         u32 val;
1854
1855         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1856
1857         assert_planes_disabled(crtc);
1858
1859         /*
1860          * A pipe without a PLL won't actually be able to drive bits from
1861          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1862          * need the check.
1863          */
1864         if (HAS_GMCH(dev_priv)) {
1865                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1866                         assert_dsi_pll_enabled(dev_priv);
1867                 else
1868                         assert_pll_enabled(dev_priv, pipe);
1869         } else {
1870                 if (new_crtc_state->has_pch_encoder) {
1871                         /* if driving the PCH, we need FDI enabled */
1872                         assert_fdi_rx_pll_enabled(dev_priv,
1873                                                   intel_crtc_pch_transcoder(crtc));
1874                         assert_fdi_tx_pll_enabled(dev_priv,
1875                                                   (enum pipe) cpu_transcoder);
1876                 }
1877                 /* FIXME: assert CPU port conditions for SNB+ */
1878         }
1879
1880         trace_intel_pipe_enable(crtc);
1881
1882         reg = PIPECONF(cpu_transcoder);
1883         val = I915_READ(reg);
1884         if (val & PIPECONF_ENABLE) {
1885                 /* we keep both pipes enabled on 830 */
1886                 WARN_ON(!IS_I830(dev_priv));
1887                 return;
1888         }
1889
1890         I915_WRITE(reg, val | PIPECONF_ENABLE);
1891         POSTING_READ(reg);
1892
1893         /*
1894          * Until the pipe starts PIPEDSL reads will return a stale value,
1895          * which causes an apparent vblank timestamp jump when PIPEDSL
1896          * resets to its proper value. That also messes up the frame count
1897          * when it's derived from the timestamps. So let's wait for the
1898          * pipe to start properly before we call drm_crtc_vblank_on()
1899          */
1900         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1901                 intel_wait_for_pipe_scanline_moving(crtc);
1902 }
1903
1904 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1905 {
1906         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1907         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1908         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1909         enum pipe pipe = crtc->pipe;
1910         i915_reg_t reg;
1911         u32 val;
1912
1913         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1914
1915         /*
1916          * Make sure planes won't keep trying to pump pixels to us,
1917          * or we might hang the display.
1918          */
1919         assert_planes_disabled(crtc);
1920
1921         trace_intel_pipe_disable(crtc);
1922
1923         reg = PIPECONF(cpu_transcoder);
1924         val = I915_READ(reg);
1925         if ((val & PIPECONF_ENABLE) == 0)
1926                 return;
1927
1928         /*
1929          * Double wide has implications for planes
1930          * so best keep it disabled when not needed.
1931          */
1932         if (old_crtc_state->double_wide)
1933                 val &= ~PIPECONF_DOUBLE_WIDE;
1934
1935         /* Don't disable pipe or pipe PLLs if needed */
1936         if (!IS_I830(dev_priv))
1937                 val &= ~PIPECONF_ENABLE;
1938
1939         I915_WRITE(reg, val);
1940         if ((val & PIPECONF_ENABLE) == 0)
1941                 intel_wait_for_pipe_off(old_crtc_state);
1942 }
1943
1944 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1945 {
1946         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1947 }
1948
1949 static unsigned int
1950 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1951 {
1952         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1953         unsigned int cpp = fb->format->cpp[color_plane];
1954
1955         switch (fb->modifier) {
1956         case DRM_FORMAT_MOD_LINEAR:
1957                 return intel_tile_size(dev_priv);
1958         case I915_FORMAT_MOD_X_TILED:
1959                 if (IS_GEN(dev_priv, 2))
1960                         return 128;
1961                 else
1962                         return 512;
1963         case I915_FORMAT_MOD_Y_TILED_CCS:
1964                 if (color_plane == 1)
1965                         return 128;
1966                 /* fall through */
1967         case I915_FORMAT_MOD_Y_TILED:
1968                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1969                         return 128;
1970                 else
1971                         return 512;
1972         case I915_FORMAT_MOD_Yf_TILED_CCS:
1973                 if (color_plane == 1)
1974                         return 128;
1975                 /* fall through */
1976         case I915_FORMAT_MOD_Yf_TILED:
1977                 switch (cpp) {
1978                 case 1:
1979                         return 64;
1980                 case 2:
1981                 case 4:
1982                         return 128;
1983                 case 8:
1984                 case 16:
1985                         return 256;
1986                 default:
1987                         MISSING_CASE(cpp);
1988                         return cpp;
1989                 }
1990                 break;
1991         default:
1992                 MISSING_CASE(fb->modifier);
1993                 return cpp;
1994         }
1995 }
1996
1997 static unsigned int
1998 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1999 {
2000         return intel_tile_size(to_i915(fb->dev)) /
2001                 intel_tile_width_bytes(fb, color_plane);
2002 }
2003
2004 /* Return the tile dimensions in pixel units */
2005 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2006                             unsigned int *tile_width,
2007                             unsigned int *tile_height)
2008 {
2009         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2010         unsigned int cpp = fb->format->cpp[color_plane];
2011
2012         *tile_width = tile_width_bytes / cpp;
2013         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
2014 }
2015
2016 unsigned int
2017 intel_fb_align_height(const struct drm_framebuffer *fb,
2018                       int color_plane, unsigned int height)
2019 {
2020         unsigned int tile_height = intel_tile_height(fb, color_plane);
2021
2022         return ALIGN(height, tile_height);
2023 }
2024
2025 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2026 {
2027         unsigned int size = 0;
2028         int i;
2029
2030         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2031                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2032
2033         return size;
2034 }
2035
2036 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2037 {
2038         unsigned int size = 0;
2039         int i;
2040
2041         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2042                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2043
2044         return size;
2045 }
2046
2047 static void
2048 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2049                         const struct drm_framebuffer *fb,
2050                         unsigned int rotation)
2051 {
2052         view->type = I915_GGTT_VIEW_NORMAL;
2053         if (drm_rotation_90_or_270(rotation)) {
2054                 view->type = I915_GGTT_VIEW_ROTATED;
2055                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2056         }
2057 }
2058
2059 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2060 {
2061         if (IS_I830(dev_priv))
2062                 return 16 * 1024;
2063         else if (IS_I85X(dev_priv))
2064                 return 256;
2065         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2066                 return 32;
2067         else
2068                 return 4 * 1024;
2069 }
2070
2071 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2072 {
2073         if (INTEL_GEN(dev_priv) >= 9)
2074                 return 256 * 1024;
2075         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2076                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2077                 return 128 * 1024;
2078         else if (INTEL_GEN(dev_priv) >= 4)
2079                 return 4 * 1024;
2080         else
2081                 return 0;
2082 }
2083
2084 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2085                                          int color_plane)
2086 {
2087         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2088
2089         /* AUX_DIST needs only 4K alignment */
2090         if (color_plane == 1)
2091                 return 4096;
2092
2093         switch (fb->modifier) {
2094         case DRM_FORMAT_MOD_LINEAR:
2095                 return intel_linear_alignment(dev_priv);
2096         case I915_FORMAT_MOD_X_TILED:
2097                 if (INTEL_GEN(dev_priv) >= 9)
2098                         return 256 * 1024;
2099                 return 0;
2100         case I915_FORMAT_MOD_Y_TILED_CCS:
2101         case I915_FORMAT_MOD_Yf_TILED_CCS:
2102         case I915_FORMAT_MOD_Y_TILED:
2103         case I915_FORMAT_MOD_Yf_TILED:
2104                 return 1 * 1024 * 1024;
2105         default:
2106                 MISSING_CASE(fb->modifier);
2107                 return 0;
2108         }
2109 }
2110
2111 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2112 {
2113         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2114         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2115
2116         return INTEL_GEN(dev_priv) < 4 ||
2117                 (plane->has_fbc &&
2118                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2119 }
2120
2121 struct i915_vma *
2122 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2123                            const struct i915_ggtt_view *view,
2124                            bool uses_fence,
2125                            unsigned long *out_flags)
2126 {
2127         struct drm_device *dev = fb->dev;
2128         struct drm_i915_private *dev_priv = to_i915(dev);
2129         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2130         intel_wakeref_t wakeref;
2131         struct i915_vma *vma;
2132         unsigned int pinctl;
2133         u32 alignment;
2134
2135         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2136                 return ERR_PTR(-EINVAL);
2137
2138         alignment = intel_surf_alignment(fb, 0);
2139
2140         /* Note that the w/a also requires 64 PTE of padding following the
2141          * bo. We currently fill all unused PTE with the shadow page and so
2142          * we should always have valid PTE following the scanout preventing
2143          * the VT-d warning.
2144          */
2145         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2146                 alignment = 256 * 1024;
2147
2148         /*
2149          * Global gtt pte registers are special registers which actually forward
2150          * writes to a chunk of system memory. Which means that there is no risk
2151          * that the register values disappear as soon as we call
2152          * intel_runtime_pm_put(), so it is correct to wrap only the
2153          * pin/unpin/fence and not more.
2154          */
2155         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2156         i915_gem_object_lock(obj);
2157
2158         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2159
2160         pinctl = 0;
2161
2162         /* Valleyview is definitely limited to scanning out the first
2163          * 512MiB. Lets presume this behaviour was inherited from the
2164          * g4x display engine and that all earlier gen are similarly
2165          * limited. Testing suggests that it is a little more
2166          * complicated than this. For example, Cherryview appears quite
2167          * happy to scanout from anywhere within its global aperture.
2168          */
2169         if (HAS_GMCH(dev_priv))
2170                 pinctl |= PIN_MAPPABLE;
2171
2172         vma = i915_gem_object_pin_to_display_plane(obj,
2173                                                    alignment, view, pinctl);
2174         if (IS_ERR(vma))
2175                 goto err;
2176
2177         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2178                 int ret;
2179
2180                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2181                  * fence, whereas 965+ only requires a fence if using
2182                  * framebuffer compression.  For simplicity, we always, when
2183                  * possible, install a fence as the cost is not that onerous.
2184                  *
2185                  * If we fail to fence the tiled scanout, then either the
2186                  * modeset will reject the change (which is highly unlikely as
2187                  * the affected systems, all but one, do not have unmappable
2188                  * space) or we will not be able to enable full powersaving
2189                  * techniques (also likely not to apply due to various limits
2190                  * FBC and the like impose on the size of the buffer, which
2191                  * presumably we violated anyway with this unmappable buffer).
2192                  * Anyway, it is presumably better to stumble onwards with
2193                  * something and try to run the system in a "less than optimal"
2194                  * mode that matches the user configuration.
2195                  */
2196                 ret = i915_vma_pin_fence(vma);
2197                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2198                         i915_gem_object_unpin_from_display_plane(vma);
2199                         vma = ERR_PTR(ret);
2200                         goto err;
2201                 }
2202
2203                 if (ret == 0 && vma->fence)
2204                         *out_flags |= PLANE_HAS_FENCE;
2205         }
2206
2207         i915_vma_get(vma);
2208 err:
2209         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2210
2211         i915_gem_object_unlock(obj);
2212         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2213         return vma;
2214 }
2215
2216 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2217 {
2218         i915_gem_object_lock(vma->obj);
2219         if (flags & PLANE_HAS_FENCE)
2220                 i915_vma_unpin_fence(vma);
2221         i915_gem_object_unpin_from_display_plane(vma);
2222         i915_gem_object_unlock(vma->obj);
2223
2224         i915_vma_put(vma);
2225 }
2226
2227 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2228                           unsigned int rotation)
2229 {
2230         if (drm_rotation_90_or_270(rotation))
2231                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2232         else
2233                 return fb->pitches[color_plane];
2234 }
2235
2236 /*
2237  * Convert the x/y offsets into a linear offset.
2238  * Only valid with 0/180 degree rotation, which is fine since linear
2239  * offset is only used with linear buffers on pre-hsw and tiled buffers
2240  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2241  */
2242 u32 intel_fb_xy_to_linear(int x, int y,
2243                           const struct intel_plane_state *state,
2244                           int color_plane)
2245 {
2246         const struct drm_framebuffer *fb = state->hw.fb;
2247         unsigned int cpp = fb->format->cpp[color_plane];
2248         unsigned int pitch = state->color_plane[color_plane].stride;
2249
2250         return y * pitch + x * cpp;
2251 }
2252
2253 /*
2254  * Add the x/y offsets derived from fb->offsets[] to the user
2255  * specified plane src x/y offsets. The resulting x/y offsets
2256  * specify the start of scanout from the beginning of the gtt mapping.
2257  */
2258 void intel_add_fb_offsets(int *x, int *y,
2259                           const struct intel_plane_state *state,
2260                           int color_plane)
2261
2262 {
2263         *x += state->color_plane[color_plane].x;
2264         *y += state->color_plane[color_plane].y;
2265 }
2266
2267 static u32 intel_adjust_tile_offset(int *x, int *y,
2268                                     unsigned int tile_width,
2269                                     unsigned int tile_height,
2270                                     unsigned int tile_size,
2271                                     unsigned int pitch_tiles,
2272                                     u32 old_offset,
2273                                     u32 new_offset)
2274 {
2275         unsigned int pitch_pixels = pitch_tiles * tile_width;
2276         unsigned int tiles;
2277
2278         WARN_ON(old_offset & (tile_size - 1));
2279         WARN_ON(new_offset & (tile_size - 1));
2280         WARN_ON(new_offset > old_offset);
2281
2282         tiles = (old_offset - new_offset) / tile_size;
2283
2284         *y += tiles / pitch_tiles * tile_height;
2285         *x += tiles % pitch_tiles * tile_width;
2286
2287         /* minimize x in case it got needlessly big */
2288         *y += *x / pitch_pixels * tile_height;
2289         *x %= pitch_pixels;
2290
2291         return new_offset;
2292 }
2293
2294 static bool is_surface_linear(u64 modifier, int color_plane)
2295 {
2296         return modifier == DRM_FORMAT_MOD_LINEAR;
2297 }
2298
2299 static u32 intel_adjust_aligned_offset(int *x, int *y,
2300                                        const struct drm_framebuffer *fb,
2301                                        int color_plane,
2302                                        unsigned int rotation,
2303                                        unsigned int pitch,
2304                                        u32 old_offset, u32 new_offset)
2305 {
2306         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2307         unsigned int cpp = fb->format->cpp[color_plane];
2308
2309         WARN_ON(new_offset > old_offset);
2310
2311         if (!is_surface_linear(fb->modifier, color_plane)) {
2312                 unsigned int tile_size, tile_width, tile_height;
2313                 unsigned int pitch_tiles;
2314
2315                 tile_size = intel_tile_size(dev_priv);
2316                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2317
2318                 if (drm_rotation_90_or_270(rotation)) {
2319                         pitch_tiles = pitch / tile_height;
2320                         swap(tile_width, tile_height);
2321                 } else {
2322                         pitch_tiles = pitch / (tile_width * cpp);
2323                 }
2324
2325                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2326                                          tile_size, pitch_tiles,
2327                                          old_offset, new_offset);
2328         } else {
2329                 old_offset += *y * pitch + *x * cpp;
2330
2331                 *y = (old_offset - new_offset) / pitch;
2332                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2333         }
2334
2335         return new_offset;
2336 }
2337
2338 /*
2339  * Adjust the tile offset by moving the difference into
2340  * the x/y offsets.
2341  */
2342 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2343                                              const struct intel_plane_state *state,
2344                                              int color_plane,
2345                                              u32 old_offset, u32 new_offset)
2346 {
2347         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2348                                            state->hw.rotation,
2349                                            state->color_plane[color_plane].stride,
2350                                            old_offset, new_offset);
2351 }
2352
2353 /*
2354  * Computes the aligned offset to the base tile and adjusts
2355  * x, y. bytes per pixel is assumed to be a power-of-two.
2356  *
2357  * In the 90/270 rotated case, x and y are assumed
2358  * to be already rotated to match the rotated GTT view, and
2359  * pitch is the tile_height aligned framebuffer height.
2360  *
2361  * This function is used when computing the derived information
2362  * under intel_framebuffer, so using any of that information
2363  * here is not allowed. Anything under drm_framebuffer can be
2364  * used. This is why the user has to pass in the pitch since it
2365  * is specified in the rotated orientation.
2366  */
2367 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2368                                         int *x, int *y,
2369                                         const struct drm_framebuffer *fb,
2370                                         int color_plane,
2371                                         unsigned int pitch,
2372                                         unsigned int rotation,
2373                                         u32 alignment)
2374 {
2375         unsigned int cpp = fb->format->cpp[color_plane];
2376         u32 offset, offset_aligned;
2377
2378         if (alignment)
2379                 alignment--;
2380
2381         if (!is_surface_linear(fb->modifier, color_plane)) {
2382                 unsigned int tile_size, tile_width, tile_height;
2383                 unsigned int tile_rows, tiles, pitch_tiles;
2384
2385                 tile_size = intel_tile_size(dev_priv);
2386                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2387
2388                 if (drm_rotation_90_or_270(rotation)) {
2389                         pitch_tiles = pitch / tile_height;
2390                         swap(tile_width, tile_height);
2391                 } else {
2392                         pitch_tiles = pitch / (tile_width * cpp);
2393                 }
2394
2395                 tile_rows = *y / tile_height;
2396                 *y %= tile_height;
2397
2398                 tiles = *x / tile_width;
2399                 *x %= tile_width;
2400
2401                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2402                 offset_aligned = offset & ~alignment;
2403
2404                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2405                                          tile_size, pitch_tiles,
2406                                          offset, offset_aligned);
2407         } else {
2408                 offset = *y * pitch + *x * cpp;
2409                 offset_aligned = offset & ~alignment;
2410
2411                 *y = (offset & alignment) / pitch;
2412                 *x = ((offset & alignment) - *y * pitch) / cpp;
2413         }
2414
2415         return offset_aligned;
2416 }
2417
2418 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2419                                               const struct intel_plane_state *state,
2420                                               int color_plane)
2421 {
2422         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2423         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2424         const struct drm_framebuffer *fb = state->hw.fb;
2425         unsigned int rotation = state->hw.rotation;
2426         int pitch = state->color_plane[color_plane].stride;
2427         u32 alignment;
2428
2429         if (intel_plane->id == PLANE_CURSOR)
2430                 alignment = intel_cursor_alignment(dev_priv);
2431         else
2432                 alignment = intel_surf_alignment(fb, color_plane);
2433
2434         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2435                                             pitch, rotation, alignment);
2436 }
2437
2438 /* Convert the fb->offset[] into x/y offsets */
2439 static int intel_fb_offset_to_xy(int *x, int *y,
2440                                  const struct drm_framebuffer *fb,
2441                                  int color_plane)
2442 {
2443         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2444         unsigned int height;
2445
2446         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2447             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2448                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2449                               fb->offsets[color_plane], color_plane);
2450                 return -EINVAL;
2451         }
2452
2453         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2454         height = ALIGN(height, intel_tile_height(fb, color_plane));
2455
2456         /* Catch potential overflows early */
2457         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2458                             fb->offsets[color_plane])) {
2459                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2460                               fb->offsets[color_plane], fb->pitches[color_plane],
2461                               color_plane);
2462                 return -ERANGE;
2463         }
2464
2465         *x = 0;
2466         *y = 0;
2467
2468         intel_adjust_aligned_offset(x, y,
2469                                     fb, color_plane, DRM_MODE_ROTATE_0,
2470                                     fb->pitches[color_plane],
2471                                     fb->offsets[color_plane], 0);
2472
2473         return 0;
2474 }
2475
2476 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2477 {
2478         switch (fb_modifier) {
2479         case I915_FORMAT_MOD_X_TILED:
2480                 return I915_TILING_X;
2481         case I915_FORMAT_MOD_Y_TILED:
2482         case I915_FORMAT_MOD_Y_TILED_CCS:
2483                 return I915_TILING_Y;
2484         default:
2485                 return I915_TILING_NONE;
2486         }
2487 }
2488
2489 /*
2490  * From the Sky Lake PRM:
2491  * "The Color Control Surface (CCS) contains the compression status of
2492  *  the cache-line pairs. The compression state of the cache-line pair
2493  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2494  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2495  *  cache-line-pairs. CCS is always Y tiled."
2496  *
2497  * Since cache line pairs refers to horizontally adjacent cache lines,
2498  * each cache line in the CCS corresponds to an area of 32x16 cache
2499  * lines on the main surface. Since each pixel is 4 bytes, this gives
2500  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2501  * main surface.
2502  */
2503 static const struct drm_format_info ccs_formats[] = {
2504         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2505           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2506         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2507           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2508         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2509           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2510         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2511           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2512 };
2513
2514 static const struct drm_format_info *
2515 lookup_format_info(const struct drm_format_info formats[],
2516                    int num_formats, u32 format)
2517 {
2518         int i;
2519
2520         for (i = 0; i < num_formats; i++) {
2521                 if (formats[i].format == format)
2522                         return &formats[i];
2523         }
2524
2525         return NULL;
2526 }
2527
2528 static const struct drm_format_info *
2529 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2530 {
2531         switch (cmd->modifier[0]) {
2532         case I915_FORMAT_MOD_Y_TILED_CCS:
2533         case I915_FORMAT_MOD_Yf_TILED_CCS:
2534                 return lookup_format_info(ccs_formats,
2535                                           ARRAY_SIZE(ccs_formats),
2536                                           cmd->pixel_format);
2537         default:
2538                 return NULL;
2539         }
2540 }
2541
2542 bool is_ccs_modifier(u64 modifier)
2543 {
2544         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2545                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2546 }
2547
2548 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2549                               u32 pixel_format, u64 modifier)
2550 {
2551         struct intel_crtc *crtc;
2552         struct intel_plane *plane;
2553
2554         /*
2555          * We assume the primary plane for pipe A has
2556          * the highest stride limits of them all.
2557          */
2558         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2559         if (!crtc)
2560                 return 0;
2561
2562         plane = to_intel_plane(crtc->base.primary);
2563
2564         return plane->max_stride(plane, pixel_format, modifier,
2565                                  DRM_MODE_ROTATE_0);
2566 }
2567
2568 static
2569 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2570                         u32 pixel_format, u64 modifier)
2571 {
2572         /*
2573          * Arbitrary limit for gen4+ chosen to match the
2574          * render engine max stride.
2575          *
2576          * The new CCS hash mode makes remapping impossible
2577          */
2578         if (!is_ccs_modifier(modifier)) {
2579                 if (INTEL_GEN(dev_priv) >= 7)
2580                         return 256*1024;
2581                 else if (INTEL_GEN(dev_priv) >= 4)
2582                         return 128*1024;
2583         }
2584
2585         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2586 }
2587
2588 static u32
2589 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2590 {
2591         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2592
2593         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2594                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2595                                                            fb->format->format,
2596                                                            fb->modifier);
2597
2598                 /*
2599                  * To make remapping with linear generally feasible
2600                  * we need the stride to be page aligned.
2601                  */
2602                 if (fb->pitches[color_plane] > max_stride)
2603                         return intel_tile_size(dev_priv);
2604                 else
2605                         return 64;
2606         } else {
2607                 return intel_tile_width_bytes(fb, color_plane);
2608         }
2609 }
2610
2611 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2612 {
2613         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2614         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2615         const struct drm_framebuffer *fb = plane_state->hw.fb;
2616         int i;
2617
2618         /* We don't want to deal with remapping with cursors */
2619         if (plane->id == PLANE_CURSOR)
2620                 return false;
2621
2622         /*
2623          * The display engine limits already match/exceed the
2624          * render engine limits, so not much point in remapping.
2625          * Would also need to deal with the fence POT alignment
2626          * and gen2 2KiB GTT tile size.
2627          */
2628         if (INTEL_GEN(dev_priv) < 4)
2629                 return false;
2630
2631         /*
2632          * The new CCS hash mode isn't compatible with remapping as
2633          * the virtual address of the pages affects the compressed data.
2634          */
2635         if (is_ccs_modifier(fb->modifier))
2636                 return false;
2637
2638         /* Linear needs a page aligned stride for remapping */
2639         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2640                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2641
2642                 for (i = 0; i < fb->format->num_planes; i++) {
2643                         if (fb->pitches[i] & alignment)
2644                                 return false;
2645                 }
2646         }
2647
2648         return true;
2649 }
2650
2651 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2652 {
2653         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2654         const struct drm_framebuffer *fb = plane_state->hw.fb;
2655         unsigned int rotation = plane_state->hw.rotation;
2656         u32 stride, max_stride;
2657
2658         /*
2659          * No remapping for invisible planes since we don't have
2660          * an actual source viewport to remap.
2661          */
2662         if (!plane_state->uapi.visible)
2663                 return false;
2664
2665         if (!intel_plane_can_remap(plane_state))
2666                 return false;
2667
2668         /*
2669          * FIXME: aux plane limits on gen9+ are
2670          * unclear in Bspec, for now no checking.
2671          */
2672         stride = intel_fb_pitch(fb, 0, rotation);
2673         max_stride = plane->max_stride(plane, fb->format->format,
2674                                        fb->modifier, rotation);
2675
2676         return stride > max_stride;
2677 }
2678
2679 static int
2680 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2681                    struct drm_framebuffer *fb)
2682 {
2683         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2684         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2685         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2686         u32 gtt_offset_rotated = 0;
2687         unsigned int max_size = 0;
2688         int i, num_planes = fb->format->num_planes;
2689         unsigned int tile_size = intel_tile_size(dev_priv);
2690
2691         for (i = 0; i < num_planes; i++) {
2692                 unsigned int width, height;
2693                 unsigned int cpp, size;
2694                 u32 offset;
2695                 int x, y;
2696                 int ret;
2697
2698                 cpp = fb->format->cpp[i];
2699                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2700                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2701
2702                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2703                 if (ret) {
2704                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2705                                       i, fb->offsets[i]);
2706                         return ret;
2707                 }
2708
2709                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2710                         int hsub = fb->format->hsub;
2711                         int vsub = fb->format->vsub;
2712                         int tile_width, tile_height;
2713                         int main_x, main_y;
2714                         int ccs_x, ccs_y;
2715
2716                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2717                         tile_width *= hsub;
2718                         tile_height *= vsub;
2719
2720                         ccs_x = (x * hsub) % tile_width;
2721                         ccs_y = (y * vsub) % tile_height;
2722                         main_x = intel_fb->normal[0].x % tile_width;
2723                         main_y = intel_fb->normal[0].y % tile_height;
2724
2725                         /*
2726                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2727                          * x/y offsets must match between CCS and the main surface.
2728                          */
2729                         if (main_x != ccs_x || main_y != ccs_y) {
2730                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2731                                               main_x, main_y,
2732                                               ccs_x, ccs_y,
2733                                               intel_fb->normal[0].x,
2734                                               intel_fb->normal[0].y,
2735                                               x, y);
2736                                 return -EINVAL;
2737                         }
2738                 }
2739
2740                 /*
2741                  * The fence (if used) is aligned to the start of the object
2742                  * so having the framebuffer wrap around across the edge of the
2743                  * fenced region doesn't really work. We have no API to configure
2744                  * the fence start offset within the object (nor could we probably
2745                  * on gen2/3). So it's just easier if we just require that the
2746                  * fb layout agrees with the fence layout. We already check that the
2747                  * fb stride matches the fence stride elsewhere.
2748                  */
2749                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2750                     (x + width) * cpp > fb->pitches[i]) {
2751                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2752                                       i, fb->offsets[i]);
2753                         return -EINVAL;
2754                 }
2755
2756                 /*
2757                  * First pixel of the framebuffer from
2758                  * the start of the normal gtt mapping.
2759                  */
2760                 intel_fb->normal[i].x = x;
2761                 intel_fb->normal[i].y = y;
2762
2763                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2764                                                       fb->pitches[i],
2765                                                       DRM_MODE_ROTATE_0,
2766                                                       tile_size);
2767                 offset /= tile_size;
2768
2769                 if (!is_surface_linear(fb->modifier, i)) {
2770                         unsigned int tile_width, tile_height;
2771                         unsigned int pitch_tiles;
2772                         struct drm_rect r;
2773
2774                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2775
2776                         rot_info->plane[i].offset = offset;
2777                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2778                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2779                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2780
2781                         intel_fb->rotated[i].pitch =
2782                                 rot_info->plane[i].height * tile_height;
2783
2784                         /* how many tiles does this plane need */
2785                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2786                         /*
2787                          * If the plane isn't horizontally tile aligned,
2788                          * we need one more tile.
2789                          */
2790                         if (x != 0)
2791                                 size++;
2792
2793                         /* rotate the x/y offsets to match the GTT view */
2794                         drm_rect_init(&r, x, y, width, height);
2795                         drm_rect_rotate(&r,
2796                                         rot_info->plane[i].width * tile_width,
2797                                         rot_info->plane[i].height * tile_height,
2798                                         DRM_MODE_ROTATE_270);
2799                         x = r.x1;
2800                         y = r.y1;
2801
2802                         /* rotate the tile dimensions to match the GTT view */
2803                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2804                         swap(tile_width, tile_height);
2805
2806                         /*
2807                          * We only keep the x/y offsets, so push all of the
2808                          * gtt offset into the x/y offsets.
2809                          */
2810                         intel_adjust_tile_offset(&x, &y,
2811                                                  tile_width, tile_height,
2812                                                  tile_size, pitch_tiles,
2813                                                  gtt_offset_rotated * tile_size, 0);
2814
2815                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2816
2817                         /*
2818                          * First pixel of the framebuffer from
2819                          * the start of the rotated gtt mapping.
2820                          */
2821                         intel_fb->rotated[i].x = x;
2822                         intel_fb->rotated[i].y = y;
2823                 } else {
2824                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2825                                             x * cpp, tile_size);
2826                 }
2827
2828                 /* how many tiles in total needed in the bo */
2829                 max_size = max(max_size, offset + size);
2830         }
2831
2832         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2833                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2834                               mul_u32_u32(max_size, tile_size), obj->base.size);
2835                 return -EINVAL;
2836         }
2837
2838         return 0;
2839 }
2840
2841 static void
2842 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2843 {
2844         struct drm_i915_private *dev_priv =
2845                 to_i915(plane_state->uapi.plane->dev);
2846         struct drm_framebuffer *fb = plane_state->hw.fb;
2847         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2848         struct intel_rotation_info *info = &plane_state->view.rotated;
2849         unsigned int rotation = plane_state->hw.rotation;
2850         int i, num_planes = fb->format->num_planes;
2851         unsigned int tile_size = intel_tile_size(dev_priv);
2852         unsigned int src_x, src_y;
2853         unsigned int src_w, src_h;
2854         u32 gtt_offset = 0;
2855
2856         memset(&plane_state->view, 0, sizeof(plane_state->view));
2857         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2858                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2859
2860         src_x = plane_state->uapi.src.x1 >> 16;
2861         src_y = plane_state->uapi.src.y1 >> 16;
2862         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2863         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2864
2865         WARN_ON(is_ccs_modifier(fb->modifier));
2866
2867         /* Make src coordinates relative to the viewport */
2868         drm_rect_translate(&plane_state->uapi.src,
2869                            -(src_x << 16), -(src_y << 16));
2870
2871         /* Rotate src coordinates to match rotated GTT view */
2872         if (drm_rotation_90_or_270(rotation))
2873                 drm_rect_rotate(&plane_state->uapi.src,
2874                                 src_w << 16, src_h << 16,
2875                                 DRM_MODE_ROTATE_270);
2876
2877         for (i = 0; i < num_planes; i++) {
2878                 unsigned int hsub = i ? fb->format->hsub : 1;
2879                 unsigned int vsub = i ? fb->format->vsub : 1;
2880                 unsigned int cpp = fb->format->cpp[i];
2881                 unsigned int tile_width, tile_height;
2882                 unsigned int width, height;
2883                 unsigned int pitch_tiles;
2884                 unsigned int x, y;
2885                 u32 offset;
2886
2887                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2888
2889                 x = src_x / hsub;
2890                 y = src_y / vsub;
2891                 width = src_w / hsub;
2892                 height = src_h / vsub;
2893
2894                 /*
2895                  * First pixel of the src viewport from the
2896                  * start of the normal gtt mapping.
2897                  */
2898                 x += intel_fb->normal[i].x;
2899                 y += intel_fb->normal[i].y;
2900
2901                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2902                                                       fb, i, fb->pitches[i],
2903                                                       DRM_MODE_ROTATE_0, tile_size);
2904                 offset /= tile_size;
2905
2906                 info->plane[i].offset = offset;
2907                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2908                                                      tile_width * cpp);
2909                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2910                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2911
2912                 if (drm_rotation_90_or_270(rotation)) {
2913                         struct drm_rect r;
2914
2915                         /* rotate the x/y offsets to match the GTT view */
2916                         drm_rect_init(&r, x, y, width, height);
2917                         drm_rect_rotate(&r,
2918                                         info->plane[i].width * tile_width,
2919                                         info->plane[i].height * tile_height,
2920                                         DRM_MODE_ROTATE_270);
2921                         x = r.x1;
2922                         y = r.y1;
2923
2924                         pitch_tiles = info->plane[i].height;
2925                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2926
2927                         /* rotate the tile dimensions to match the GTT view */
2928                         swap(tile_width, tile_height);
2929                 } else {
2930                         pitch_tiles = info->plane[i].width;
2931                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2932                 }
2933
2934                 /*
2935                  * We only keep the x/y offsets, so push all of the
2936                  * gtt offset into the x/y offsets.
2937                  */
2938                 intel_adjust_tile_offset(&x, &y,
2939                                          tile_width, tile_height,
2940                                          tile_size, pitch_tiles,
2941                                          gtt_offset * tile_size, 0);
2942
2943                 gtt_offset += info->plane[i].width * info->plane[i].height;
2944
2945                 plane_state->color_plane[i].offset = 0;
2946                 plane_state->color_plane[i].x = x;
2947                 plane_state->color_plane[i].y = y;
2948         }
2949 }
2950
2951 static int
2952 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2953 {
2954         const struct intel_framebuffer *fb =
2955                 to_intel_framebuffer(plane_state->hw.fb);
2956         unsigned int rotation = plane_state->hw.rotation;
2957         int i, num_planes;
2958
2959         if (!fb)
2960                 return 0;
2961
2962         num_planes = fb->base.format->num_planes;
2963
2964         if (intel_plane_needs_remap(plane_state)) {
2965                 intel_plane_remap_gtt(plane_state);
2966
2967                 /*
2968                  * Sometimes even remapping can't overcome
2969                  * the stride limitations :( Can happen with
2970                  * big plane sizes and suitably misaligned
2971                  * offsets.
2972                  */
2973                 return intel_plane_check_stride(plane_state);
2974         }
2975
2976         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2977
2978         for (i = 0; i < num_planes; i++) {
2979                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2980                 plane_state->color_plane[i].offset = 0;
2981
2982                 if (drm_rotation_90_or_270(rotation)) {
2983                         plane_state->color_plane[i].x = fb->rotated[i].x;
2984                         plane_state->color_plane[i].y = fb->rotated[i].y;
2985                 } else {
2986                         plane_state->color_plane[i].x = fb->normal[i].x;
2987                         plane_state->color_plane[i].y = fb->normal[i].y;
2988                 }
2989         }
2990
2991         /* Rotate src coordinates to match rotated GTT view */
2992         if (drm_rotation_90_or_270(rotation))
2993                 drm_rect_rotate(&plane_state->uapi.src,
2994                                 fb->base.width << 16, fb->base.height << 16,
2995                                 DRM_MODE_ROTATE_270);
2996
2997         return intel_plane_check_stride(plane_state);
2998 }
2999
3000 static int i9xx_format_to_fourcc(int format)
3001 {
3002         switch (format) {
3003         case DISPPLANE_8BPP:
3004                 return DRM_FORMAT_C8;
3005         case DISPPLANE_BGRA555:
3006                 return DRM_FORMAT_ARGB1555;
3007         case DISPPLANE_BGRX555:
3008                 return DRM_FORMAT_XRGB1555;
3009         case DISPPLANE_BGRX565:
3010                 return DRM_FORMAT_RGB565;
3011         default:
3012         case DISPPLANE_BGRX888:
3013                 return DRM_FORMAT_XRGB8888;
3014         case DISPPLANE_RGBX888:
3015                 return DRM_FORMAT_XBGR8888;
3016         case DISPPLANE_BGRA888:
3017                 return DRM_FORMAT_ARGB8888;
3018         case DISPPLANE_RGBA888:
3019                 return DRM_FORMAT_ABGR8888;
3020         case DISPPLANE_BGRX101010:
3021                 return DRM_FORMAT_XRGB2101010;
3022         case DISPPLANE_RGBX101010:
3023                 return DRM_FORMAT_XBGR2101010;
3024         case DISPPLANE_BGRA101010:
3025                 return DRM_FORMAT_ARGB2101010;
3026         case DISPPLANE_RGBA101010:
3027                 return DRM_FORMAT_ABGR2101010;
3028         case DISPPLANE_RGBX161616:
3029                 return DRM_FORMAT_XBGR16161616F;
3030         }
3031 }
3032
3033 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3034 {
3035         switch (format) {
3036         case PLANE_CTL_FORMAT_RGB_565:
3037                 return DRM_FORMAT_RGB565;
3038         case PLANE_CTL_FORMAT_NV12:
3039                 return DRM_FORMAT_NV12;
3040         case PLANE_CTL_FORMAT_P010:
3041                 return DRM_FORMAT_P010;
3042         case PLANE_CTL_FORMAT_P012:
3043                 return DRM_FORMAT_P012;
3044         case PLANE_CTL_FORMAT_P016:
3045                 return DRM_FORMAT_P016;
3046         case PLANE_CTL_FORMAT_Y210:
3047                 return DRM_FORMAT_Y210;
3048         case PLANE_CTL_FORMAT_Y212:
3049                 return DRM_FORMAT_Y212;
3050         case PLANE_CTL_FORMAT_Y216:
3051                 return DRM_FORMAT_Y216;
3052         case PLANE_CTL_FORMAT_Y410:
3053                 return DRM_FORMAT_XVYU2101010;
3054         case PLANE_CTL_FORMAT_Y412:
3055                 return DRM_FORMAT_XVYU12_16161616;
3056         case PLANE_CTL_FORMAT_Y416:
3057                 return DRM_FORMAT_XVYU16161616;
3058         default:
3059         case PLANE_CTL_FORMAT_XRGB_8888:
3060                 if (rgb_order) {
3061                         if (alpha)
3062                                 return DRM_FORMAT_ABGR8888;
3063                         else
3064                                 return DRM_FORMAT_XBGR8888;
3065                 } else {
3066                         if (alpha)
3067                                 return DRM_FORMAT_ARGB8888;
3068                         else
3069                                 return DRM_FORMAT_XRGB8888;
3070                 }
3071         case PLANE_CTL_FORMAT_XRGB_2101010:
3072                 if (rgb_order) {
3073                         if (alpha)
3074                                 return DRM_FORMAT_ABGR2101010;
3075                         else
3076                                 return DRM_FORMAT_XBGR2101010;
3077                 } else {
3078                         if (alpha)
3079                                 return DRM_FORMAT_ARGB2101010;
3080                         else
3081                                 return DRM_FORMAT_XRGB2101010;
3082                 }
3083         case PLANE_CTL_FORMAT_XRGB_16161616F:
3084                 if (rgb_order) {
3085                         if (alpha)
3086                                 return DRM_FORMAT_ABGR16161616F;
3087                         else
3088                                 return DRM_FORMAT_XBGR16161616F;
3089                 } else {
3090                         if (alpha)
3091                                 return DRM_FORMAT_ARGB16161616F;
3092                         else
3093                                 return DRM_FORMAT_XRGB16161616F;
3094                 }
3095         }
3096 }
3097
3098 static bool
3099 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3100                               struct intel_initial_plane_config *plane_config)
3101 {
3102         struct drm_device *dev = crtc->base.dev;
3103         struct drm_i915_private *dev_priv = to_i915(dev);
3104         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3105         struct drm_framebuffer *fb = &plane_config->fb->base;
3106         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3107         u32 size_aligned = round_up(plane_config->base + plane_config->size,
3108                                     PAGE_SIZE);
3109         struct drm_i915_gem_object *obj;
3110         bool ret = false;
3111
3112         size_aligned -= base_aligned;
3113
3114         if (plane_config->size == 0)
3115                 return false;
3116
3117         /* If the FB is too big, just don't use it since fbdev is not very
3118          * important and we should probably use that space with FBC or other
3119          * features. */
3120         if (size_aligned * 2 > dev_priv->stolen_usable_size)
3121                 return false;
3122
3123         switch (fb->modifier) {
3124         case DRM_FORMAT_MOD_LINEAR:
3125         case I915_FORMAT_MOD_X_TILED:
3126         case I915_FORMAT_MOD_Y_TILED:
3127                 break;
3128         default:
3129                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3130                                  fb->modifier);
3131                 return false;
3132         }
3133
3134         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3135                                                              base_aligned,
3136                                                              base_aligned,
3137                                                              size_aligned);
3138         if (IS_ERR(obj))
3139                 return false;
3140
3141         switch (plane_config->tiling) {
3142         case I915_TILING_NONE:
3143                 break;
3144         case I915_TILING_X:
3145         case I915_TILING_Y:
3146                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3147                 break;
3148         default:
3149                 MISSING_CASE(plane_config->tiling);
3150                 goto out;
3151         }
3152
3153         mode_cmd.pixel_format = fb->format->format;
3154         mode_cmd.width = fb->width;
3155         mode_cmd.height = fb->height;
3156         mode_cmd.pitches[0] = fb->pitches[0];
3157         mode_cmd.modifier[0] = fb->modifier;
3158         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3159
3160         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3161                 DRM_DEBUG_KMS("intel fb init failed\n");
3162                 goto out;
3163         }
3164
3165
3166         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3167         ret = true;
3168 out:
3169         i915_gem_object_put(obj);
3170         return ret;
3171 }
3172
3173 static void
3174 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3175                         struct intel_plane_state *plane_state,
3176                         bool visible)
3177 {
3178         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3179
3180         plane_state->uapi.visible = visible;
3181
3182         if (visible)
3183                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3184         else
3185                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3186 }
3187
3188 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3189 {
3190         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3191         struct drm_plane *plane;
3192
3193         /*
3194          * Active_planes aliases if multiple "primary" or cursor planes
3195          * have been used on the same (or wrong) pipe. plane_mask uses
3196          * unique ids, hence we can use that to reconstruct active_planes.
3197          */
3198         crtc_state->active_planes = 0;
3199
3200         drm_for_each_plane_mask(plane, &dev_priv->drm,
3201                                 crtc_state->uapi.plane_mask)
3202                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3203 }
3204
3205 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3206                                          struct intel_plane *plane)
3207 {
3208         struct intel_crtc_state *crtc_state =
3209                 to_intel_crtc_state(crtc->base.state);
3210         struct intel_plane_state *plane_state =
3211                 to_intel_plane_state(plane->base.state);
3212
3213         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3214                       plane->base.base.id, plane->base.name,
3215                       crtc->base.base.id, crtc->base.name);
3216
3217         intel_set_plane_visible(crtc_state, plane_state, false);
3218         fixup_active_planes(crtc_state);
3219         crtc_state->data_rate[plane->id] = 0;
3220         crtc_state->min_cdclk[plane->id] = 0;
3221
3222         if (plane->id == PLANE_PRIMARY)
3223                 intel_pre_disable_primary_noatomic(&crtc->base);
3224
3225         intel_disable_plane(plane, crtc_state);
3226 }
3227
3228 static struct intel_frontbuffer *
3229 to_intel_frontbuffer(struct drm_framebuffer *fb)
3230 {
3231         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3232 }
3233
3234 static void
3235 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3236                              struct intel_initial_plane_config *plane_config)
3237 {
3238         struct drm_device *dev = intel_crtc->base.dev;
3239         struct drm_i915_private *dev_priv = to_i915(dev);
3240         struct drm_crtc *c;
3241         struct drm_plane *primary = intel_crtc->base.primary;
3242         struct drm_plane_state *plane_state = primary->state;
3243         struct intel_plane *intel_plane = to_intel_plane(primary);
3244         struct intel_plane_state *intel_state =
3245                 to_intel_plane_state(plane_state);
3246         struct drm_framebuffer *fb;
3247
3248         if (!plane_config->fb)
3249                 return;
3250
3251         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3252                 fb = &plane_config->fb->base;
3253                 goto valid_fb;
3254         }
3255
3256         kfree(plane_config->fb);
3257
3258         /*
3259          * Failed to alloc the obj, check to see if we should share
3260          * an fb with another CRTC instead
3261          */
3262         for_each_crtc(dev, c) {
3263                 struct intel_plane_state *state;
3264
3265                 if (c == &intel_crtc->base)
3266                         continue;
3267
3268                 if (!to_intel_crtc(c)->active)
3269                         continue;
3270
3271                 state = to_intel_plane_state(c->primary->state);
3272                 if (!state->vma)
3273                         continue;
3274
3275                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3276                         fb = state->hw.fb;
3277                         drm_framebuffer_get(fb);
3278                         goto valid_fb;
3279                 }
3280         }
3281
3282         /*
3283          * We've failed to reconstruct the BIOS FB.  Current display state
3284          * indicates that the primary plane is visible, but has a NULL FB,
3285          * which will lead to problems later if we don't fix it up.  The
3286          * simplest solution is to just disable the primary plane now and
3287          * pretend the BIOS never had it enabled.
3288          */
3289         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3290
3291         return;
3292
3293 valid_fb:
3294         intel_state->hw.rotation = plane_config->rotation;
3295         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3296                                 intel_state->hw.rotation);
3297         intel_state->color_plane[0].stride =
3298                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3299
3300         intel_state->vma =
3301                 intel_pin_and_fence_fb_obj(fb,
3302                                            &intel_state->view,
3303                                            intel_plane_uses_fence(intel_state),
3304                                            &intel_state->flags);
3305         if (IS_ERR(intel_state->vma)) {
3306                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3307                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
3308
3309                 intel_state->vma = NULL;
3310                 drm_framebuffer_put(fb);
3311                 return;
3312         }
3313
3314         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3315
3316         plane_state->src_x = 0;
3317         plane_state->src_y = 0;
3318         plane_state->src_w = fb->width << 16;
3319         plane_state->src_h = fb->height << 16;
3320
3321         plane_state->crtc_x = 0;
3322         plane_state->crtc_y = 0;
3323         plane_state->crtc_w = fb->width;
3324         plane_state->crtc_h = fb->height;
3325
3326         intel_state->uapi.src = drm_plane_state_src(plane_state);
3327         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3328
3329         if (plane_config->tiling)
3330                 dev_priv->preserve_bios_swizzle = true;
3331
3332         plane_state->fb = fb;
3333         plane_state->crtc = &intel_crtc->base;
3334         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3335
3336         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3337                   &to_intel_frontbuffer(fb)->bits);
3338 }
3339
3340 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3341                                int color_plane,
3342                                unsigned int rotation)
3343 {
3344         int cpp = fb->format->cpp[color_plane];
3345
3346         switch (fb->modifier) {
3347         case DRM_FORMAT_MOD_LINEAR:
3348         case I915_FORMAT_MOD_X_TILED:
3349                 /*
3350                  * Validated limit is 4k, but has 5k should
3351                  * work apart from the following features:
3352                  * - Ytile (already limited to 4k)
3353                  * - FP16 (already limited to 4k)
3354                  * - render compression (already limited to 4k)
3355                  * - KVMR sprite and cursor (don't care)
3356                  * - horizontal panning (TODO verify this)
3357                  * - pipe and plane scaling (TODO verify this)
3358                  */
3359                 if (cpp == 8)
3360                         return 4096;
3361                 else
3362                         return 5120;
3363         case I915_FORMAT_MOD_Y_TILED_CCS:
3364         case I915_FORMAT_MOD_Yf_TILED_CCS:
3365                 /* FIXME AUX plane? */
3366         case I915_FORMAT_MOD_Y_TILED:
3367         case I915_FORMAT_MOD_Yf_TILED:
3368                 if (cpp == 8)
3369                         return 2048;
3370                 else
3371                         return 4096;
3372         default:
3373                 MISSING_CASE(fb->modifier);
3374                 return 2048;
3375         }
3376 }
3377
3378 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3379                                int color_plane,
3380                                unsigned int rotation)
3381 {
3382         int cpp = fb->format->cpp[color_plane];
3383
3384         switch (fb->modifier) {
3385         case DRM_FORMAT_MOD_LINEAR:
3386         case I915_FORMAT_MOD_X_TILED:
3387                 if (cpp == 8)
3388                         return 4096;
3389                 else
3390                         return 5120;
3391         case I915_FORMAT_MOD_Y_TILED_CCS:
3392         case I915_FORMAT_MOD_Yf_TILED_CCS:
3393                 /* FIXME AUX plane? */
3394         case I915_FORMAT_MOD_Y_TILED:
3395         case I915_FORMAT_MOD_Yf_TILED:
3396                 if (cpp == 8)
3397                         return 2048;
3398                 else
3399                         return 5120;
3400         default:
3401                 MISSING_CASE(fb->modifier);
3402                 return 2048;
3403         }
3404 }
3405
3406 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3407                                int color_plane,
3408                                unsigned int rotation)
3409 {
3410         return 5120;
3411 }
3412
3413 static int skl_max_plane_height(void)
3414 {
3415         return 4096;
3416 }
3417
3418 static int icl_max_plane_height(void)
3419 {
3420         return 4320;
3421 }
3422
3423 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3424                                            int main_x, int main_y, u32 main_offset)
3425 {
3426         const struct drm_framebuffer *fb = plane_state->hw.fb;
3427         int hsub = fb->format->hsub;
3428         int vsub = fb->format->vsub;
3429         int aux_x = plane_state->color_plane[1].x;
3430         int aux_y = plane_state->color_plane[1].y;
3431         u32 aux_offset = plane_state->color_plane[1].offset;
3432         u32 alignment = intel_surf_alignment(fb, 1);
3433
3434         while (aux_offset >= main_offset && aux_y <= main_y) {
3435                 int x, y;
3436
3437                 if (aux_x == main_x && aux_y == main_y)
3438                         break;
3439
3440                 if (aux_offset == 0)
3441                         break;
3442
3443                 x = aux_x / hsub;
3444                 y = aux_y / vsub;
3445                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3446                                                                aux_offset, aux_offset - alignment);
3447                 aux_x = x * hsub + aux_x % hsub;
3448                 aux_y = y * vsub + aux_y % vsub;
3449         }
3450
3451         if (aux_x != main_x || aux_y != main_y)
3452                 return false;
3453
3454         plane_state->color_plane[1].offset = aux_offset;
3455         plane_state->color_plane[1].x = aux_x;
3456         plane_state->color_plane[1].y = aux_y;
3457
3458         return true;
3459 }
3460
3461 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3462 {
3463         struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3464         const struct drm_framebuffer *fb = plane_state->hw.fb;
3465         unsigned int rotation = plane_state->hw.rotation;
3466         int x = plane_state->uapi.src.x1 >> 16;
3467         int y = plane_state->uapi.src.y1 >> 16;
3468         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3469         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3470         int max_width;
3471         int max_height;
3472         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3473
3474         if (INTEL_GEN(dev_priv) >= 11)
3475                 max_width = icl_max_plane_width(fb, 0, rotation);
3476         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3477                 max_width = glk_max_plane_width(fb, 0, rotation);
3478         else
3479                 max_width = skl_max_plane_width(fb, 0, rotation);
3480
3481         if (INTEL_GEN(dev_priv) >= 11)
3482                 max_height = icl_max_plane_height();
3483         else
3484                 max_height = skl_max_plane_height();
3485
3486         if (w > max_width || h > max_height) {
3487                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3488                               w, h, max_width, max_height);
3489                 return -EINVAL;
3490         }
3491
3492         intel_add_fb_offsets(&x, &y, plane_state, 0);
3493         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3494         alignment = intel_surf_alignment(fb, 0);
3495
3496         /*
3497          * AUX surface offset is specified as the distance from the
3498          * main surface offset, and it must be non-negative. Make
3499          * sure that is what we will get.
3500          */
3501         if (offset > aux_offset)
3502                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3503                                                            offset, aux_offset & ~(alignment - 1));
3504
3505         /*
3506          * When using an X-tiled surface, the plane blows up
3507          * if the x offset + width exceed the stride.
3508          *
3509          * TODO: linear and Y-tiled seem fine, Yf untested,
3510          */
3511         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3512                 int cpp = fb->format->cpp[0];
3513
3514                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3515                         if (offset == 0) {
3516                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3517                                 return -EINVAL;
3518                         }
3519
3520                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3521                                                                    offset, offset - alignment);
3522                 }
3523         }
3524
3525         /*
3526          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3527          * they match with the main surface x/y offsets.
3528          */
3529         if (is_ccs_modifier(fb->modifier)) {
3530                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3531                         if (offset == 0)
3532                                 break;
3533
3534                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3535                                                                    offset, offset - alignment);
3536                 }
3537
3538                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3539                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3540                         return -EINVAL;
3541                 }
3542         }
3543
3544         plane_state->color_plane[0].offset = offset;
3545         plane_state->color_plane[0].x = x;
3546         plane_state->color_plane[0].y = y;
3547
3548         /*
3549          * Put the final coordinates back so that the src
3550          * coordinate checks will see the right values.
3551          */
3552         drm_rect_translate_to(&plane_state->uapi.src,
3553                               x << 16, y << 16);
3554
3555         return 0;
3556 }
3557
3558 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3559 {
3560         const struct drm_framebuffer *fb = plane_state->hw.fb;
3561         unsigned int rotation = plane_state->hw.rotation;
3562         int max_width = skl_max_plane_width(fb, 1, rotation);
3563         int max_height = 4096;
3564         int x = plane_state->uapi.src.x1 >> 17;
3565         int y = plane_state->uapi.src.y1 >> 17;
3566         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3567         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3568         u32 offset;
3569
3570         intel_add_fb_offsets(&x, &y, plane_state, 1);
3571         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3572
3573         /* FIXME not quite sure how/if these apply to the chroma plane */
3574         if (w > max_width || h > max_height) {
3575                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3576                               w, h, max_width, max_height);
3577                 return -EINVAL;
3578         }
3579
3580         plane_state->color_plane[1].offset = offset;
3581         plane_state->color_plane[1].x = x;
3582         plane_state->color_plane[1].y = y;
3583
3584         return 0;
3585 }
3586
3587 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3588 {
3589         const struct drm_framebuffer *fb = plane_state->hw.fb;
3590         int src_x = plane_state->uapi.src.x1 >> 16;
3591         int src_y = plane_state->uapi.src.y1 >> 16;
3592         int hsub = fb->format->hsub;
3593         int vsub = fb->format->vsub;
3594         int x = src_x / hsub;
3595         int y = src_y / vsub;
3596         u32 offset;
3597
3598         intel_add_fb_offsets(&x, &y, plane_state, 1);
3599         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3600
3601         plane_state->color_plane[1].offset = offset;
3602         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3603         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3604
3605         return 0;
3606 }
3607
3608 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3609 {
3610         const struct drm_framebuffer *fb = plane_state->hw.fb;
3611         int ret;
3612
3613         ret = intel_plane_compute_gtt(plane_state);
3614         if (ret)
3615                 return ret;
3616
3617         if (!plane_state->uapi.visible)
3618                 return 0;
3619
3620         /*
3621          * Handle the AUX surface first since
3622          * the main surface setup depends on it.
3623          */
3624         if (drm_format_info_is_yuv_semiplanar(fb->format)) {
3625                 ret = skl_check_nv12_aux_surface(plane_state);
3626                 if (ret)
3627                         return ret;
3628         } else if (is_ccs_modifier(fb->modifier)) {
3629                 ret = skl_check_ccs_aux_surface(plane_state);
3630                 if (ret)
3631                         return ret;
3632         } else {
3633                 plane_state->color_plane[1].offset = ~0xfff;
3634                 plane_state->color_plane[1].x = 0;
3635                 plane_state->color_plane[1].y = 0;
3636         }
3637
3638         ret = skl_check_main_surface(plane_state);
3639         if (ret)
3640                 return ret;
3641
3642         return 0;
3643 }
3644
3645 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
3646                              const struct intel_plane_state *plane_state,
3647                              unsigned int *num, unsigned int *den)
3648 {
3649         const struct drm_framebuffer *fb = plane_state->hw.fb;
3650         unsigned int cpp = fb->format->cpp[0];
3651
3652         /*
3653          * g4x bspec says 64bpp pixel rate can't exceed 80%
3654          * of cdclk when the sprite plane is enabled on the
3655          * same pipe. ilk/snb bspec says 64bpp pixel rate is
3656          * never allowed to exceed 80% of cdclk. Let's just go
3657          * with the ilk/snb limit always.
3658          */
3659         if (cpp == 8) {
3660                 *num = 10;
3661                 *den = 8;
3662         } else {
3663                 *num = 1;
3664                 *den = 1;
3665         }
3666 }
3667
3668 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
3669                                 const struct intel_plane_state *plane_state)
3670 {
3671         unsigned int pixel_rate;
3672         unsigned int num, den;
3673
3674         /*
3675          * Note that crtc_state->pixel_rate accounts for both
3676          * horizontal and vertical panel fitter downscaling factors.
3677          * Pre-HSW bspec tells us to only consider the horizontal
3678          * downscaling factor here. We ignore that and just consider
3679          * both for simplicity.
3680          */
3681         pixel_rate = crtc_state->pixel_rate;
3682
3683         i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
3684
3685         /* two pixels per clock with double wide pipe */
3686         if (crtc_state->double_wide)
3687                 den *= 2;
3688
3689         return DIV_ROUND_UP(pixel_rate * num, den);
3690 }
3691
3692 unsigned int
3693 i9xx_plane_max_stride(struct intel_plane *plane,
3694                       u32 pixel_format, u64 modifier,
3695                       unsigned int rotation)
3696 {
3697         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3698
3699         if (!HAS_GMCH(dev_priv)) {
3700                 return 32*1024;
3701         } else if (INTEL_GEN(dev_priv) >= 4) {
3702                 if (modifier == I915_FORMAT_MOD_X_TILED)
3703                         return 16*1024;
3704                 else
3705                         return 32*1024;
3706         } else if (INTEL_GEN(dev_priv) >= 3) {
3707                 if (modifier == I915_FORMAT_MOD_X_TILED)
3708                         return 8*1024;
3709                 else
3710                         return 16*1024;
3711         } else {
3712                 if (plane->i9xx_plane == PLANE_C)
3713                         return 4*1024;
3714                 else
3715                         return 8*1024;
3716         }
3717 }
3718
3719 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3720 {
3721         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3722         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3723         u32 dspcntr = 0;
3724
3725         if (crtc_state->gamma_enable)
3726                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3727
3728         if (crtc_state->csc_enable)
3729                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3730
3731         if (INTEL_GEN(dev_priv) < 5)
3732                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3733
3734         return dspcntr;
3735 }
3736
3737 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3738                           const struct intel_plane_state *plane_state)
3739 {
3740         struct drm_i915_private *dev_priv =
3741                 to_i915(plane_state->uapi.plane->dev);
3742         const struct drm_framebuffer *fb = plane_state->hw.fb;
3743         unsigned int rotation = plane_state->hw.rotation;
3744         u32 dspcntr;
3745
3746         dspcntr = DISPLAY_PLANE_ENABLE;
3747
3748         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3749             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3750                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3751
3752         switch (fb->format->format) {
3753         case DRM_FORMAT_C8:
3754                 dspcntr |= DISPPLANE_8BPP;
3755                 break;
3756         case DRM_FORMAT_XRGB1555:
3757                 dspcntr |= DISPPLANE_BGRX555;
3758                 break;
3759         case DRM_FORMAT_ARGB1555:
3760                 dspcntr |= DISPPLANE_BGRA555;
3761                 break;
3762         case DRM_FORMAT_RGB565:
3763                 dspcntr |= DISPPLANE_BGRX565;
3764                 break;
3765         case DRM_FORMAT_XRGB8888:
3766                 dspcntr |= DISPPLANE_BGRX888;
3767                 break;
3768         case DRM_FORMAT_XBGR8888:
3769                 dspcntr |= DISPPLANE_RGBX888;
3770                 break;
3771         case DRM_FORMAT_ARGB8888:
3772                 dspcntr |= DISPPLANE_BGRA888;
3773                 break;
3774         case DRM_FORMAT_ABGR8888:
3775                 dspcntr |= DISPPLANE_RGBA888;
3776                 break;
3777         case DRM_FORMAT_XRGB2101010:
3778                 dspcntr |= DISPPLANE_BGRX101010;
3779                 break;
3780         case DRM_FORMAT_XBGR2101010:
3781                 dspcntr |= DISPPLANE_RGBX101010;
3782                 break;
3783         case DRM_FORMAT_ARGB2101010:
3784                 dspcntr |= DISPPLANE_BGRA101010;
3785                 break;
3786         case DRM_FORMAT_ABGR2101010:
3787                 dspcntr |= DISPPLANE_RGBA101010;
3788                 break;
3789         case DRM_FORMAT_XBGR16161616F:
3790                 dspcntr |= DISPPLANE_RGBX161616;
3791                 break;
3792         default:
3793                 MISSING_CASE(fb->format->format);
3794                 return 0;
3795         }
3796
3797         if (INTEL_GEN(dev_priv) >= 4 &&
3798             fb->modifier == I915_FORMAT_MOD_X_TILED)
3799                 dspcntr |= DISPPLANE_TILED;
3800
3801         if (rotation & DRM_MODE_ROTATE_180)
3802                 dspcntr |= DISPPLANE_ROTATE_180;
3803
3804         if (rotation & DRM_MODE_REFLECT_X)
3805                 dspcntr |= DISPPLANE_MIRROR;
3806
3807         return dspcntr;
3808 }
3809
3810 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3811 {
3812         struct drm_i915_private *dev_priv =
3813                 to_i915(plane_state->uapi.plane->dev);
3814         const struct drm_framebuffer *fb = plane_state->hw.fb;
3815         int src_x, src_y, src_w;
3816         u32 offset;
3817         int ret;
3818
3819         ret = intel_plane_compute_gtt(plane_state);
3820         if (ret)
3821                 return ret;
3822
3823         if (!plane_state->uapi.visible)
3824                 return 0;
3825
3826         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3827         src_x = plane_state->uapi.src.x1 >> 16;
3828         src_y = plane_state->uapi.src.y1 >> 16;
3829
3830         /* Undocumented hardware limit on i965/g4x/vlv/chv */
3831         if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
3832                 return -EINVAL;
3833
3834         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3835
3836         if (INTEL_GEN(dev_priv) >= 4)
3837                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3838                                                             plane_state, 0);
3839         else
3840                 offset = 0;
3841
3842         /*
3843          * Put the final coordinates back so that the src
3844          * coordinate checks will see the right values.
3845          */
3846         drm_rect_translate_to(&plane_state->uapi.src,
3847                               src_x << 16, src_y << 16);
3848
3849         /* HSW/BDW do this automagically in hardware */
3850         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3851                 unsigned int rotation = plane_state->hw.rotation;
3852                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3853                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3854
3855                 if (rotation & DRM_MODE_ROTATE_180) {
3856                         src_x += src_w - 1;
3857                         src_y += src_h - 1;
3858                 } else if (rotation & DRM_MODE_REFLECT_X) {
3859                         src_x += src_w - 1;
3860                 }
3861         }
3862
3863         plane_state->color_plane[0].offset = offset;
3864         plane_state->color_plane[0].x = src_x;
3865         plane_state->color_plane[0].y = src_y;
3866
3867         return 0;
3868 }
3869
3870 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
3871 {
3872         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3873         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3874
3875         if (IS_CHERRYVIEW(dev_priv))
3876                 return i9xx_plane == PLANE_B;
3877         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3878                 return false;
3879         else if (IS_GEN(dev_priv, 4))
3880                 return i9xx_plane == PLANE_C;
3881         else
3882                 return i9xx_plane == PLANE_B ||
3883                         i9xx_plane == PLANE_C;
3884 }
3885
3886 static int
3887 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3888                  struct intel_plane_state *plane_state)
3889 {
3890         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3891         int ret;
3892
3893         ret = chv_plane_check_rotation(plane_state);
3894         if (ret)
3895                 return ret;
3896
3897         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
3898                                                   &crtc_state->uapi,
3899                                                   DRM_PLANE_HELPER_NO_SCALING,
3900                                                   DRM_PLANE_HELPER_NO_SCALING,
3901                                                   i9xx_plane_has_windowing(plane),
3902                                                   true);
3903         if (ret)
3904                 return ret;
3905
3906         ret = i9xx_check_plane_surface(plane_state);
3907         if (ret)
3908                 return ret;
3909
3910         if (!plane_state->uapi.visible)
3911                 return 0;
3912
3913         ret = intel_plane_check_src_coordinates(plane_state);
3914         if (ret)
3915                 return ret;
3916
3917         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3918
3919         return 0;
3920 }
3921
3922 static void i9xx_update_plane(struct intel_plane *plane,
3923                               const struct intel_crtc_state *crtc_state,
3924                               const struct intel_plane_state *plane_state)
3925 {
3926         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3927         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3928         u32 linear_offset;
3929         int x = plane_state->color_plane[0].x;
3930         int y = plane_state->color_plane[0].y;
3931         int crtc_x = plane_state->uapi.dst.x1;
3932         int crtc_y = plane_state->uapi.dst.y1;
3933         int crtc_w = drm_rect_width(&plane_state->uapi.dst);
3934         int crtc_h = drm_rect_height(&plane_state->uapi.dst);
3935         unsigned long irqflags;
3936         u32 dspaddr_offset;
3937         u32 dspcntr;
3938
3939         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3940
3941         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3942
3943         if (INTEL_GEN(dev_priv) >= 4)
3944                 dspaddr_offset = plane_state->color_plane[0].offset;
3945         else
3946                 dspaddr_offset = linear_offset;
3947
3948         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3949
3950         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3951
3952         if (INTEL_GEN(dev_priv) < 4) {
3953                 /*
3954                  * PLANE_A doesn't actually have a full window
3955                  * generator but let's assume we still need to
3956                  * program whatever is there.
3957                  */
3958                 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3959                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3960                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3961         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3962                 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3963                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3964                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3965                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3966         }
3967
3968         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3969                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3970         } else if (INTEL_GEN(dev_priv) >= 4) {
3971                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3972                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3973         }
3974
3975         /*
3976          * The control register self-arms if the plane was previously
3977          * disabled. Try to make the plane enable atomic by writing
3978          * the control register just before the surface register.
3979          */
3980         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3981         if (INTEL_GEN(dev_priv) >= 4)
3982                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3983                               intel_plane_ggtt_offset(plane_state) +
3984                               dspaddr_offset);
3985         else
3986                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3987                               intel_plane_ggtt_offset(plane_state) +
3988                               dspaddr_offset);
3989
3990         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3991 }
3992
3993 static void i9xx_disable_plane(struct intel_plane *plane,
3994                                const struct intel_crtc_state *crtc_state)
3995 {
3996         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3997         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3998         unsigned long irqflags;
3999         u32 dspcntr;
4000
4001         /*
4002          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4003          * enable on ilk+ affect the pipe bottom color as
4004          * well, so we must configure them even if the plane
4005          * is disabled.
4006          *
4007          * On pre-g4x there is no way to gamma correct the
4008          * pipe bottom color but we'll keep on doing this
4009          * anyway so that the crtc state readout works correctly.
4010          */
4011         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4012
4013         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4014
4015         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
4016         if (INTEL_GEN(dev_priv) >= 4)
4017                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
4018         else
4019                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
4020
4021         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4022 }
4023
4024 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4025                                     enum pipe *pipe)
4026 {
4027         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4028         enum intel_display_power_domain power_domain;
4029         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4030         intel_wakeref_t wakeref;
4031         bool ret;
4032         u32 val;
4033
4034         /*
4035          * Not 100% correct for planes that can move between pipes,
4036          * but that's only the case for gen2-4 which don't have any
4037          * display power wells.
4038          */
4039         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4040         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4041         if (!wakeref)
4042                 return false;
4043
4044         val = I915_READ(DSPCNTR(i9xx_plane));
4045
4046         ret = val & DISPLAY_PLANE_ENABLE;
4047
4048         if (INTEL_GEN(dev_priv) >= 5)
4049                 *pipe = plane->pipe;
4050         else
4051                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4052                         DISPPLANE_SEL_PIPE_SHIFT;
4053
4054         intel_display_power_put(dev_priv, power_domain, wakeref);
4055
4056         return ret;
4057 }
4058
4059 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4060 {
4061         struct drm_device *dev = intel_crtc->base.dev;
4062         struct drm_i915_private *dev_priv = to_i915(dev);
4063
4064         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4065         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4066         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4067 }
4068
4069 /*
4070  * This function detaches (aka. unbinds) unused scalers in hardware
4071  */
4072 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4073 {
4074         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4075         const struct intel_crtc_scaler_state *scaler_state =
4076                 &crtc_state->scaler_state;
4077         int i;
4078
4079         /* loop through and disable scalers that aren't in use */
4080         for (i = 0; i < intel_crtc->num_scalers; i++) {
4081                 if (!scaler_state->scalers[i].in_use)
4082                         skl_detach_scaler(intel_crtc, i);
4083         }
4084 }
4085
4086 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4087                                           int color_plane, unsigned int rotation)
4088 {
4089         /*
4090          * The stride is either expressed as a multiple of 64 bytes chunks for
4091          * linear buffers or in number of tiles for tiled buffers.
4092          */
4093         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
4094                 return 64;
4095         else if (drm_rotation_90_or_270(rotation))
4096                 return intel_tile_height(fb, color_plane);
4097         else
4098                 return intel_tile_width_bytes(fb, color_plane);
4099 }
4100
4101 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4102                      int color_plane)
4103 {
4104         const struct drm_framebuffer *fb = plane_state->hw.fb;
4105         unsigned int rotation = plane_state->hw.rotation;
4106         u32 stride = plane_state->color_plane[color_plane].stride;
4107
4108         if (color_plane >= fb->format->num_planes)
4109                 return 0;
4110
4111         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4112 }
4113
4114 static u32 skl_plane_ctl_format(u32 pixel_format)
4115 {
4116         switch (pixel_format) {
4117         case DRM_FORMAT_C8:
4118                 return PLANE_CTL_FORMAT_INDEXED;
4119         case DRM_FORMAT_RGB565:
4120                 return PLANE_CTL_FORMAT_RGB_565;
4121         case DRM_FORMAT_XBGR8888:
4122         case DRM_FORMAT_ABGR8888:
4123                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4124         case DRM_FORMAT_XRGB8888:
4125         case DRM_FORMAT_ARGB8888:
4126                 return PLANE_CTL_FORMAT_XRGB_8888;
4127         case DRM_FORMAT_XBGR2101010:
4128         case DRM_FORMAT_ABGR2101010:
4129                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4130         case DRM_FORMAT_XRGB2101010:
4131         case DRM_FORMAT_ARGB2101010:
4132                 return PLANE_CTL_FORMAT_XRGB_2101010;
4133         case DRM_FORMAT_XBGR16161616F:
4134         case DRM_FORMAT_ABGR16161616F:
4135                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4136         case DRM_FORMAT_XRGB16161616F:
4137         case DRM_FORMAT_ARGB16161616F:
4138                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4139         case DRM_FORMAT_YUYV:
4140                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4141         case DRM_FORMAT_YVYU:
4142                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4143         case DRM_FORMAT_UYVY:
4144                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4145         case DRM_FORMAT_VYUY:
4146                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4147         case DRM_FORMAT_NV12:
4148                 return PLANE_CTL_FORMAT_NV12;
4149         case DRM_FORMAT_P010:
4150                 return PLANE_CTL_FORMAT_P010;
4151         case DRM_FORMAT_P012:
4152                 return PLANE_CTL_FORMAT_P012;
4153         case DRM_FORMAT_P016:
4154                 return PLANE_CTL_FORMAT_P016;
4155         case DRM_FORMAT_Y210:
4156                 return PLANE_CTL_FORMAT_Y210;
4157         case DRM_FORMAT_Y212:
4158                 return PLANE_CTL_FORMAT_Y212;
4159         case DRM_FORMAT_Y216:
4160                 return PLANE_CTL_FORMAT_Y216;
4161         case DRM_FORMAT_XVYU2101010:
4162                 return PLANE_CTL_FORMAT_Y410;
4163         case DRM_FORMAT_XVYU12_16161616:
4164                 return PLANE_CTL_FORMAT_Y412;
4165         case DRM_FORMAT_XVYU16161616:
4166                 return PLANE_CTL_FORMAT_Y416;
4167         default:
4168                 MISSING_CASE(pixel_format);
4169         }
4170
4171         return 0;
4172 }
4173
4174 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4175 {
4176         if (!plane_state->hw.fb->format->has_alpha)
4177                 return PLANE_CTL_ALPHA_DISABLE;
4178
4179         switch (plane_state->hw.pixel_blend_mode) {
4180         case DRM_MODE_BLEND_PIXEL_NONE:
4181                 return PLANE_CTL_ALPHA_DISABLE;
4182         case DRM_MODE_BLEND_PREMULTI:
4183                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4184         case DRM_MODE_BLEND_COVERAGE:
4185                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4186         default:
4187                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4188                 return PLANE_CTL_ALPHA_DISABLE;
4189         }
4190 }
4191
4192 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4193 {
4194         if (!plane_state->hw.fb->format->has_alpha)
4195                 return PLANE_COLOR_ALPHA_DISABLE;
4196
4197         switch (plane_state->hw.pixel_blend_mode) {
4198         case DRM_MODE_BLEND_PIXEL_NONE:
4199                 return PLANE_COLOR_ALPHA_DISABLE;
4200         case DRM_MODE_BLEND_PREMULTI:
4201                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4202         case DRM_MODE_BLEND_COVERAGE:
4203                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4204         default:
4205                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4206                 return PLANE_COLOR_ALPHA_DISABLE;
4207         }
4208 }
4209
4210 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4211 {
4212         switch (fb_modifier) {
4213         case DRM_FORMAT_MOD_LINEAR:
4214                 break;
4215         case I915_FORMAT_MOD_X_TILED:
4216                 return PLANE_CTL_TILED_X;
4217         case I915_FORMAT_MOD_Y_TILED:
4218                 return PLANE_CTL_TILED_Y;
4219         case I915_FORMAT_MOD_Y_TILED_CCS:
4220                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4221         case I915_FORMAT_MOD_Yf_TILED:
4222                 return PLANE_CTL_TILED_YF;
4223         case I915_FORMAT_MOD_Yf_TILED_CCS:
4224                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4225         default:
4226                 MISSING_CASE(fb_modifier);
4227         }
4228
4229         return 0;
4230 }
4231
4232 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4233 {
4234         switch (rotate) {
4235         case DRM_MODE_ROTATE_0:
4236                 break;
4237         /*
4238          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4239          * while i915 HW rotation is clockwise, thats why this swapping.
4240          */
4241         case DRM_MODE_ROTATE_90:
4242                 return PLANE_CTL_ROTATE_270;
4243         case DRM_MODE_ROTATE_180:
4244                 return PLANE_CTL_ROTATE_180;
4245         case DRM_MODE_ROTATE_270:
4246                 return PLANE_CTL_ROTATE_90;
4247         default:
4248                 MISSING_CASE(rotate);
4249         }
4250
4251         return 0;
4252 }
4253
4254 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4255 {
4256         switch (reflect) {
4257         case 0:
4258                 break;
4259         case DRM_MODE_REFLECT_X:
4260                 return PLANE_CTL_FLIP_HORIZONTAL;
4261         case DRM_MODE_REFLECT_Y:
4262         default:
4263                 MISSING_CASE(reflect);
4264         }
4265
4266         return 0;
4267 }
4268
4269 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4270 {
4271         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4272         u32 plane_ctl = 0;
4273
4274         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4275                 return plane_ctl;
4276
4277         if (crtc_state->gamma_enable)
4278                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4279
4280         if (crtc_state->csc_enable)
4281                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4282
4283         return plane_ctl;
4284 }
4285
4286 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4287                   const struct intel_plane_state *plane_state)
4288 {
4289         struct drm_i915_private *dev_priv =
4290                 to_i915(plane_state->uapi.plane->dev);
4291         const struct drm_framebuffer *fb = plane_state->hw.fb;
4292         unsigned int rotation = plane_state->hw.rotation;
4293         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4294         u32 plane_ctl;
4295
4296         plane_ctl = PLANE_CTL_ENABLE;
4297
4298         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4299                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4300                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4301
4302                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4303                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4304
4305                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4306                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4307         }
4308
4309         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4310         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4311         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4312
4313         if (INTEL_GEN(dev_priv) >= 10)
4314                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4315                                                 DRM_MODE_REFLECT_MASK);
4316
4317         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4318                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4319         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4320                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4321
4322         return plane_ctl;
4323 }
4324
4325 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4326 {
4327         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4328         u32 plane_color_ctl = 0;
4329
4330         if (INTEL_GEN(dev_priv) >= 11)
4331                 return plane_color_ctl;
4332
4333         if (crtc_state->gamma_enable)
4334                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4335
4336         if (crtc_state->csc_enable)
4337                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4338
4339         return plane_color_ctl;
4340 }
4341
4342 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4343                         const struct intel_plane_state *plane_state)
4344 {
4345         struct drm_i915_private *dev_priv =
4346                 to_i915(plane_state->uapi.plane->dev);
4347         const struct drm_framebuffer *fb = plane_state->hw.fb;
4348         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4349         u32 plane_color_ctl = 0;
4350
4351         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4352         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4353
4354         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4355                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4356                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4357                 else
4358                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4359
4360                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4361                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4362         } else if (fb->format->is_yuv) {
4363                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4364         }
4365
4366         return plane_color_ctl;
4367 }
4368
4369 static int
4370 __intel_display_resume(struct drm_device *dev,
4371                        struct drm_atomic_state *state,
4372                        struct drm_modeset_acquire_ctx *ctx)
4373 {
4374         struct drm_crtc_state *crtc_state;
4375         struct drm_crtc *crtc;
4376         int i, ret;
4377
4378         intel_modeset_setup_hw_state(dev, ctx);
4379         intel_vga_redisable(to_i915(dev));
4380
4381         if (!state)
4382                 return 0;
4383
4384         /*
4385          * We've duplicated the state, pointers to the old state are invalid.
4386          *
4387          * Don't attempt to use the old state until we commit the duplicated state.
4388          */
4389         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4390                 /*
4391                  * Force recalculation even if we restore
4392                  * current state. With fast modeset this may not result
4393                  * in a modeset when the state is compatible.
4394                  */
4395                 crtc_state->mode_changed = true;
4396         }
4397
4398         /* ignore any reset values/BIOS leftovers in the WM registers */
4399         if (!HAS_GMCH(to_i915(dev)))
4400                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4401
4402         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4403
4404         WARN_ON(ret == -EDEADLK);
4405         return ret;
4406 }
4407
4408 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4409 {
4410         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4411                 intel_has_gpu_reset(&dev_priv->gt));
4412 }
4413
4414 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4415 {
4416         struct drm_device *dev = &dev_priv->drm;
4417         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4418         struct drm_atomic_state *state;
4419         int ret;
4420
4421         /* reset doesn't touch the display */
4422         if (!i915_modparams.force_reset_modeset_test &&
4423             !gpu_reset_clobbers_display(dev_priv))
4424                 return;
4425
4426         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4427         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4428         smp_mb__after_atomic();
4429         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4430
4431         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4432                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4433                 intel_gt_set_wedged(&dev_priv->gt);
4434         }
4435
4436         /*
4437          * Need mode_config.mutex so that we don't
4438          * trample ongoing ->detect() and whatnot.
4439          */
4440         mutex_lock(&dev->mode_config.mutex);
4441         drm_modeset_acquire_init(ctx, 0);
4442         while (1) {
4443                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4444                 if (ret != -EDEADLK)
4445                         break;
4446
4447                 drm_modeset_backoff(ctx);
4448         }
4449         /*
4450          * Disabling the crtcs gracefully seems nicer. Also the
4451          * g33 docs say we should at least disable all the planes.
4452          */
4453         state = drm_atomic_helper_duplicate_state(dev, ctx);
4454         if (IS_ERR(state)) {
4455                 ret = PTR_ERR(state);
4456                 DRM_ERROR("Duplicating state failed with %i\n", ret);
4457                 return;
4458         }
4459
4460         ret = drm_atomic_helper_disable_all(dev, ctx);
4461         if (ret) {
4462                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4463                 drm_atomic_state_put(state);
4464                 return;
4465         }
4466
4467         dev_priv->modeset_restore_state = state;
4468         state->acquire_ctx = ctx;
4469 }
4470
4471 void intel_finish_reset(struct drm_i915_private *dev_priv)
4472 {
4473         struct drm_device *dev = &dev_priv->drm;
4474         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4475         struct drm_atomic_state *state;
4476         int ret;
4477
4478         /* reset doesn't touch the display */
4479         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4480                 return;
4481
4482         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4483         if (!state)
4484                 goto unlock;
4485
4486         /* reset doesn't touch the display */
4487         if (!gpu_reset_clobbers_display(dev_priv)) {
4488                 /* for testing only restore the display */
4489                 ret = __intel_display_resume(dev, state, ctx);
4490                 if (ret)
4491                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4492         } else {
4493                 /*
4494                  * The display has been reset as well,
4495                  * so need a full re-initialization.
4496                  */
4497                 intel_pps_unlock_regs_wa(dev_priv);
4498                 intel_modeset_init_hw(dev_priv);
4499                 intel_init_clock_gating(dev_priv);
4500
4501                 spin_lock_irq(&dev_priv->irq_lock);
4502                 if (dev_priv->display.hpd_irq_setup)
4503                         dev_priv->display.hpd_irq_setup(dev_priv);
4504                 spin_unlock_irq(&dev_priv->irq_lock);
4505
4506                 ret = __intel_display_resume(dev, state, ctx);
4507                 if (ret)
4508                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4509
4510                 intel_hpd_init(dev_priv);
4511         }
4512
4513         drm_atomic_state_put(state);
4514 unlock:
4515         drm_modeset_drop_locks(ctx);
4516         drm_modeset_acquire_fini(ctx);
4517         mutex_unlock(&dev->mode_config.mutex);
4518
4519         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4520 }
4521
4522 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4523 {
4524         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4525         enum pipe pipe = crtc->pipe;
4526         u32 tmp;
4527
4528         tmp = I915_READ(PIPE_CHICKEN(pipe));
4529
4530         /*
4531          * Display WA #1153: icl
4532          * enable hardware to bypass the alpha math
4533          * and rounding for per-pixel values 00 and 0xff
4534          */
4535         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4536         /*
4537          * Display WA # 1605353570: icl
4538          * Set the pixel rounding bit to 1 for allowing
4539          * passthrough of Frame buffer pixels unmodified
4540          * across pipe
4541          */
4542         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4543         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4544 }
4545
4546 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4547 {
4548         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4549         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4550         u32 trans_ddi_func_ctl2_val;
4551         u8 master_select;
4552
4553         /*
4554          * Configure the master select and enable Transcoder Port Sync for
4555          * Slave CRTCs transcoder.
4556          */
4557         if (crtc_state->master_transcoder == INVALID_TRANSCODER)
4558                 return;
4559
4560         if (crtc_state->master_transcoder == TRANSCODER_EDP)
4561                 master_select = 0;
4562         else
4563                 master_select = crtc_state->master_transcoder + 1;
4564
4565         /* Set the master select bits for Tranascoder Port Sync */
4566         trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
4567                                    PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
4568                 PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
4569         /* Enable Transcoder Port Sync */
4570         trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
4571
4572         I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
4573                    trans_ddi_func_ctl2_val);
4574 }
4575
4576 static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
4577 {
4578         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4579         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4580         i915_reg_t reg;
4581         u32 trans_ddi_func_ctl2_val;
4582
4583         if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
4584                 return;
4585
4586         DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
4587                       transcoder_name(old_crtc_state->cpu_transcoder));
4588
4589         reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
4590         trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
4591                                     PORT_SYNC_MODE_MASTER_SELECT_MASK);
4592         I915_WRITE(reg, trans_ddi_func_ctl2_val);
4593 }
4594
4595 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4596 {
4597         struct drm_device *dev = crtc->base.dev;
4598         struct drm_i915_private *dev_priv = to_i915(dev);
4599         enum pipe pipe = crtc->pipe;
4600         i915_reg_t reg;
4601         u32 temp;
4602
4603         /* enable normal train */
4604         reg = FDI_TX_CTL(pipe);
4605         temp = I915_READ(reg);
4606         if (IS_IVYBRIDGE(dev_priv)) {
4607                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4608                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4609         } else {
4610                 temp &= ~FDI_LINK_TRAIN_NONE;
4611                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4612         }
4613         I915_WRITE(reg, temp);
4614
4615         reg = FDI_RX_CTL(pipe);
4616         temp = I915_READ(reg);
4617         if (HAS_PCH_CPT(dev_priv)) {
4618                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4619                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4620         } else {
4621                 temp &= ~FDI_LINK_TRAIN_NONE;
4622                 temp |= FDI_LINK_TRAIN_NONE;
4623         }
4624         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4625
4626         /* wait one idle pattern time */
4627         POSTING_READ(reg);
4628         udelay(1000);
4629
4630         /* IVB wants error correction enabled */
4631         if (IS_IVYBRIDGE(dev_priv))
4632                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4633                            FDI_FE_ERRC_ENABLE);
4634 }
4635
4636 /* The FDI link training functions for ILK/Ibexpeak. */
4637 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4638                                     const struct intel_crtc_state *crtc_state)
4639 {
4640         struct drm_device *dev = crtc->base.dev;
4641         struct drm_i915_private *dev_priv = to_i915(dev);
4642         enum pipe pipe = crtc->pipe;
4643         i915_reg_t reg;
4644         u32 temp, tries;
4645
4646         /* FDI needs bits from pipe first */
4647         assert_pipe_enabled(dev_priv, pipe);
4648
4649         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4650            for train result */
4651         reg = FDI_RX_IMR(pipe);
4652         temp = I915_READ(reg);
4653         temp &= ~FDI_RX_SYMBOL_LOCK;
4654         temp &= ~FDI_RX_BIT_LOCK;
4655         I915_WRITE(reg, temp);
4656         I915_READ(reg);
4657         udelay(150);
4658
4659         /* enable CPU FDI TX and PCH FDI RX */
4660         reg = FDI_TX_CTL(pipe);
4661         temp = I915_READ(reg);
4662         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4663         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4664         temp &= ~FDI_LINK_TRAIN_NONE;
4665         temp |= FDI_LINK_TRAIN_PATTERN_1;
4666         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4667
4668         reg = FDI_RX_CTL(pipe);
4669         temp = I915_READ(reg);
4670         temp &= ~FDI_LINK_TRAIN_NONE;
4671         temp |= FDI_LINK_TRAIN_PATTERN_1;
4672         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4673
4674         POSTING_READ(reg);
4675         udelay(150);
4676
4677         /* Ironlake workaround, enable clock pointer after FDI enable*/
4678         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4679         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4680                    FDI_RX_PHASE_SYNC_POINTER_EN);
4681
4682         reg = FDI_RX_IIR(pipe);
4683         for (tries = 0; tries < 5; tries++) {
4684                 temp = I915_READ(reg);
4685                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4686
4687                 if ((temp & FDI_RX_BIT_LOCK)) {
4688                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4689                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4690                         break;
4691                 }
4692         }
4693         if (tries == 5)
4694                 DRM_ERROR("FDI train 1 fail!\n");
4695
4696         /* Train 2 */
4697         reg = FDI_TX_CTL(pipe);
4698         temp = I915_READ(reg);
4699         temp &= ~FDI_LINK_TRAIN_NONE;
4700         temp |= FDI_LINK_TRAIN_PATTERN_2;
4701         I915_WRITE(reg, temp);
4702
4703         reg = FDI_RX_CTL(pipe);
4704         temp = I915_READ(reg);
4705         temp &= ~FDI_LINK_TRAIN_NONE;
4706         temp |= FDI_LINK_TRAIN_PATTERN_2;
4707         I915_WRITE(reg, temp);
4708
4709         POSTING_READ(reg);
4710         udelay(150);
4711
4712         reg = FDI_RX_IIR(pipe);
4713         for (tries = 0; tries < 5; tries++) {
4714                 temp = I915_READ(reg);
4715                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4716
4717                 if (temp & FDI_RX_SYMBOL_LOCK) {
4718                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4719                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4720                         break;
4721                 }
4722         }
4723         if (tries == 5)
4724                 DRM_ERROR("FDI train 2 fail!\n");
4725
4726         DRM_DEBUG_KMS("FDI train done\n");
4727
4728 }
4729
4730 static const int snb_b_fdi_train_param[] = {
4731         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4732         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4733         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4734         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4735 };
4736
4737 /* The FDI link training functions for SNB/Cougarpoint. */
4738 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4739                                 const struct intel_crtc_state *crtc_state)
4740 {
4741         struct drm_device *dev = crtc->base.dev;
4742         struct drm_i915_private *dev_priv = to_i915(dev);
4743         enum pipe pipe = crtc->pipe;
4744         i915_reg_t reg;
4745         u32 temp, i, retry;
4746
4747         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4748            for train result */
4749         reg = FDI_RX_IMR(pipe);
4750         temp = I915_READ(reg);
4751         temp &= ~FDI_RX_SYMBOL_LOCK;
4752         temp &= ~FDI_RX_BIT_LOCK;
4753         I915_WRITE(reg, temp);
4754
4755         POSTING_READ(reg);
4756         udelay(150);
4757
4758         /* enable CPU FDI TX and PCH FDI RX */
4759         reg = FDI_TX_CTL(pipe);
4760         temp = I915_READ(reg);
4761         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4762         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4763         temp &= ~FDI_LINK_TRAIN_NONE;
4764         temp |= FDI_LINK_TRAIN_PATTERN_1;
4765         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4766         /* SNB-B */
4767         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4768         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4769
4770         I915_WRITE(FDI_RX_MISC(pipe),
4771                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4772
4773         reg = FDI_RX_CTL(pipe);
4774         temp = I915_READ(reg);
4775         if (HAS_PCH_CPT(dev_priv)) {
4776                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4777                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4778         } else {
4779                 temp &= ~FDI_LINK_TRAIN_NONE;
4780                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4781         }
4782         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4783
4784         POSTING_READ(reg);
4785         udelay(150);
4786
4787         for (i = 0; i < 4; i++) {
4788                 reg = FDI_TX_CTL(pipe);
4789                 temp = I915_READ(reg);
4790                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4791                 temp |= snb_b_fdi_train_param[i];
4792                 I915_WRITE(reg, temp);
4793
4794                 POSTING_READ(reg);
4795                 udelay(500);
4796
4797                 for (retry = 0; retry < 5; retry++) {
4798                         reg = FDI_RX_IIR(pipe);
4799                         temp = I915_READ(reg);
4800                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4801                         if (temp & FDI_RX_BIT_LOCK) {
4802                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4803                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4804                                 break;
4805                         }
4806                         udelay(50);
4807                 }
4808                 if (retry < 5)
4809                         break;
4810         }
4811         if (i == 4)
4812                 DRM_ERROR("FDI train 1 fail!\n");
4813
4814         /* Train 2 */
4815         reg = FDI_TX_CTL(pipe);
4816         temp = I915_READ(reg);
4817         temp &= ~FDI_LINK_TRAIN_NONE;
4818         temp |= FDI_LINK_TRAIN_PATTERN_2;
4819         if (IS_GEN(dev_priv, 6)) {
4820                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4821                 /* SNB-B */
4822                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4823         }
4824         I915_WRITE(reg, temp);
4825
4826         reg = FDI_RX_CTL(pipe);
4827         temp = I915_READ(reg);
4828         if (HAS_PCH_CPT(dev_priv)) {
4829                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4830                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4831         } else {
4832                 temp &= ~FDI_LINK_TRAIN_NONE;
4833                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4834         }
4835         I915_WRITE(reg, temp);
4836
4837         POSTING_READ(reg);
4838         udelay(150);
4839
4840         for (i = 0; i < 4; i++) {
4841                 reg = FDI_TX_CTL(pipe);
4842                 temp = I915_READ(reg);
4843                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4844                 temp |= snb_b_fdi_train_param[i];
4845                 I915_WRITE(reg, temp);
4846
4847                 POSTING_READ(reg);
4848                 udelay(500);
4849
4850                 for (retry = 0; retry < 5; retry++) {
4851                         reg = FDI_RX_IIR(pipe);
4852                         temp = I915_READ(reg);
4853                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4854                         if (temp & FDI_RX_SYMBOL_LOCK) {
4855                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4856                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4857                                 break;
4858                         }
4859                         udelay(50);
4860                 }
4861                 if (retry < 5)
4862                         break;
4863         }
4864         if (i == 4)
4865                 DRM_ERROR("FDI train 2 fail!\n");
4866
4867         DRM_DEBUG_KMS("FDI train done.\n");
4868 }
4869
4870 /* Manual link training for Ivy Bridge A0 parts */
4871 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4872                                       const struct intel_crtc_state *crtc_state)
4873 {
4874         struct drm_device *dev = crtc->base.dev;
4875         struct drm_i915_private *dev_priv = to_i915(dev);
4876         enum pipe pipe = crtc->pipe;
4877         i915_reg_t reg;
4878         u32 temp, i, j;
4879
4880         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4881            for train result */
4882         reg = FDI_RX_IMR(pipe);
4883         temp = I915_READ(reg);
4884         temp &= ~FDI_RX_SYMBOL_LOCK;
4885         temp &= ~FDI_RX_BIT_LOCK;
4886         I915_WRITE(reg, temp);
4887
4888         POSTING_READ(reg);
4889         udelay(150);
4890
4891         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4892                       I915_READ(FDI_RX_IIR(pipe)));
4893
4894         /* Try each vswing and preemphasis setting twice before moving on */
4895         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4896                 /* disable first in case we need to retry */
4897                 reg = FDI_TX_CTL(pipe);
4898                 temp = I915_READ(reg);
4899                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4900                 temp &= ~FDI_TX_ENABLE;
4901                 I915_WRITE(reg, temp);
4902
4903                 reg = FDI_RX_CTL(pipe);
4904                 temp = I915_READ(reg);
4905                 temp &= ~FDI_LINK_TRAIN_AUTO;
4906                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4907                 temp &= ~FDI_RX_ENABLE;
4908                 I915_WRITE(reg, temp);
4909
4910                 /* enable CPU FDI TX and PCH FDI RX */
4911                 reg = FDI_TX_CTL(pipe);
4912                 temp = I915_READ(reg);
4913                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4914                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4915                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4916                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4917                 temp |= snb_b_fdi_train_param[j/2];
4918                 temp |= FDI_COMPOSITE_SYNC;
4919                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4920
4921                 I915_WRITE(FDI_RX_MISC(pipe),
4922                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4923
4924                 reg = FDI_RX_CTL(pipe);
4925                 temp = I915_READ(reg);
4926                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4927                 temp |= FDI_COMPOSITE_SYNC;
4928                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4929
4930                 POSTING_READ(reg);
4931                 udelay(1); /* should be 0.5us */
4932
4933                 for (i = 0; i < 4; i++) {
4934                         reg = FDI_RX_IIR(pipe);
4935                         temp = I915_READ(reg);
4936                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4937
4938                         if (temp & FDI_RX_BIT_LOCK ||
4939                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4940                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4941                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4942                                               i);
4943                                 break;
4944                         }
4945                         udelay(1); /* should be 0.5us */
4946                 }
4947                 if (i == 4) {
4948                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4949                         continue;
4950                 }
4951
4952                 /* Train 2 */
4953                 reg = FDI_TX_CTL(pipe);
4954                 temp = I915_READ(reg);
4955                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4956                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4957                 I915_WRITE(reg, temp);
4958
4959                 reg = FDI_RX_CTL(pipe);
4960                 temp = I915_READ(reg);
4961                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4962                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4963                 I915_WRITE(reg, temp);
4964
4965                 POSTING_READ(reg);
4966                 udelay(2); /* should be 1.5us */
4967
4968                 for (i = 0; i < 4; i++) {
4969                         reg = FDI_RX_IIR(pipe);
4970                         temp = I915_READ(reg);
4971                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4972
4973                         if (temp & FDI_RX_SYMBOL_LOCK ||
4974                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4975                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4976                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4977                                               i);
4978                                 goto train_done;
4979                         }
4980                         udelay(2); /* should be 1.5us */
4981                 }
4982                 if (i == 4)
4983                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4984         }
4985
4986 train_done:
4987         DRM_DEBUG_KMS("FDI train done.\n");
4988 }
4989
4990 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4991 {
4992         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4993         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4994         enum pipe pipe = intel_crtc->pipe;
4995         i915_reg_t reg;
4996         u32 temp;
4997
4998         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4999         reg = FDI_RX_CTL(pipe);
5000         temp = I915_READ(reg);
5001         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5002         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5003         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5004         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
5005
5006         POSTING_READ(reg);
5007         udelay(200);
5008
5009         /* Switch from Rawclk to PCDclk */
5010         temp = I915_READ(reg);
5011         I915_WRITE(reg, temp | FDI_PCDCLK);
5012
5013         POSTING_READ(reg);
5014         udelay(200);
5015
5016         /* Enable CPU FDI TX PLL, always on for Ironlake */
5017         reg = FDI_TX_CTL(pipe);
5018         temp = I915_READ(reg);
5019         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5020                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5021
5022                 POSTING_READ(reg);
5023                 udelay(100);
5024         }
5025 }
5026
5027 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
5028 {
5029         struct drm_device *dev = intel_crtc->base.dev;
5030         struct drm_i915_private *dev_priv = to_i915(dev);
5031         enum pipe pipe = intel_crtc->pipe;
5032         i915_reg_t reg;
5033         u32 temp;
5034
5035         /* Switch from PCDclk to Rawclk */
5036         reg = FDI_RX_CTL(pipe);
5037         temp = I915_READ(reg);
5038         I915_WRITE(reg, temp & ~FDI_PCDCLK);
5039
5040         /* Disable CPU FDI TX PLL */
5041         reg = FDI_TX_CTL(pipe);
5042         temp = I915_READ(reg);
5043         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
5044
5045         POSTING_READ(reg);
5046         udelay(100);
5047
5048         reg = FDI_RX_CTL(pipe);
5049         temp = I915_READ(reg);
5050         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
5051
5052         /* Wait for the clocks to turn off. */
5053         POSTING_READ(reg);
5054         udelay(100);
5055 }
5056
5057 static void ironlake_fdi_disable(struct drm_crtc *crtc)
5058 {
5059         struct drm_device *dev = crtc->dev;
5060         struct drm_i915_private *dev_priv = to_i915(dev);
5061         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5062         enum pipe pipe = intel_crtc->pipe;
5063         i915_reg_t reg;
5064         u32 temp;
5065
5066         /* disable CPU FDI tx and PCH FDI rx */
5067         reg = FDI_TX_CTL(pipe);
5068         temp = I915_READ(reg);
5069         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
5070         POSTING_READ(reg);
5071
5072         reg = FDI_RX_CTL(pipe);
5073         temp = I915_READ(reg);
5074         temp &= ~(0x7 << 16);
5075         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5076         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
5077
5078         POSTING_READ(reg);
5079         udelay(100);
5080
5081         /* Ironlake workaround, disable clock pointer after downing FDI */
5082         if (HAS_PCH_IBX(dev_priv))
5083                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
5084
5085         /* still set train pattern 1 */
5086         reg = FDI_TX_CTL(pipe);
5087         temp = I915_READ(reg);
5088         temp &= ~FDI_LINK_TRAIN_NONE;
5089         temp |= FDI_LINK_TRAIN_PATTERN_1;
5090         I915_WRITE(reg, temp);
5091
5092         reg = FDI_RX_CTL(pipe);
5093         temp = I915_READ(reg);
5094         if (HAS_PCH_CPT(dev_priv)) {
5095                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5096                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5097         } else {
5098                 temp &= ~FDI_LINK_TRAIN_NONE;
5099                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5100         }
5101         /* BPC in FDI rx is consistent with that in PIPECONF */
5102         temp &= ~(0x07 << 16);
5103         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5104         I915_WRITE(reg, temp);
5105
5106         POSTING_READ(reg);
5107         udelay(100);
5108 }
5109
5110 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5111 {
5112         struct drm_crtc *crtc;
5113         bool cleanup_done;
5114
5115         drm_for_each_crtc(crtc, &dev_priv->drm) {
5116                 struct drm_crtc_commit *commit;
5117                 spin_lock(&crtc->commit_lock);
5118                 commit = list_first_entry_or_null(&crtc->commit_list,
5119                                                   struct drm_crtc_commit, commit_entry);
5120                 cleanup_done = commit ?
5121                         try_wait_for_completion(&commit->cleanup_done) : true;
5122                 spin_unlock(&crtc->commit_lock);
5123
5124                 if (cleanup_done)
5125                         continue;
5126
5127                 drm_crtc_wait_one_vblank(crtc);
5128
5129                 return true;
5130         }
5131
5132         return false;
5133 }
5134
5135 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5136 {
5137         u32 temp;
5138
5139         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
5140
5141         mutex_lock(&dev_priv->sb_lock);
5142
5143         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5144         temp |= SBI_SSCCTL_DISABLE;
5145         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5146
5147         mutex_unlock(&dev_priv->sb_lock);
5148 }
5149
5150 /* Program iCLKIP clock to the desired frequency */
5151 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5152 {
5153         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5154         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5155         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5156         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5157         u32 temp;
5158
5159         lpt_disable_iclkip(dev_priv);
5160
5161         /* The iCLK virtual clock root frequency is in MHz,
5162          * but the adjusted_mode->crtc_clock in in KHz. To get the
5163          * divisors, it is necessary to divide one by another, so we
5164          * convert the virtual clock precision to KHz here for higher
5165          * precision.
5166          */
5167         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5168                 u32 iclk_virtual_root_freq = 172800 * 1000;
5169                 u32 iclk_pi_range = 64;
5170                 u32 desired_divisor;
5171
5172                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5173                                                     clock << auxdiv);
5174                 divsel = (desired_divisor / iclk_pi_range) - 2;
5175                 phaseinc = desired_divisor % iclk_pi_range;
5176
5177                 /*
5178                  * Near 20MHz is a corner case which is
5179                  * out of range for the 7-bit divisor
5180                  */
5181                 if (divsel <= 0x7f)
5182                         break;
5183         }
5184
5185         /* This should not happen with any sane values */
5186         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5187                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5188         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5189                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5190
5191         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5192                         clock,
5193                         auxdiv,
5194                         divsel,
5195                         phasedir,
5196                         phaseinc);
5197
5198         mutex_lock(&dev_priv->sb_lock);
5199
5200         /* Program SSCDIVINTPHASE6 */
5201         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5202         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5203         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5204         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5205         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5206         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5207         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5208         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5209
5210         /* Program SSCAUXDIV */
5211         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5212         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5213         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5214         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5215
5216         /* Enable modulator and associated divider */
5217         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5218         temp &= ~SBI_SSCCTL_DISABLE;
5219         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5220
5221         mutex_unlock(&dev_priv->sb_lock);
5222
5223         /* Wait for initialization time */
5224         udelay(24);
5225
5226         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5227 }
5228
5229 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5230 {
5231         u32 divsel, phaseinc, auxdiv;
5232         u32 iclk_virtual_root_freq = 172800 * 1000;
5233         u32 iclk_pi_range = 64;
5234         u32 desired_divisor;
5235         u32 temp;
5236
5237         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5238                 return 0;
5239
5240         mutex_lock(&dev_priv->sb_lock);
5241
5242         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5243         if (temp & SBI_SSCCTL_DISABLE) {
5244                 mutex_unlock(&dev_priv->sb_lock);
5245                 return 0;
5246         }
5247
5248         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5249         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5250                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5251         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5252                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5253
5254         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5255         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5256                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5257
5258         mutex_unlock(&dev_priv->sb_lock);
5259
5260         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5261
5262         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5263                                  desired_divisor << auxdiv);
5264 }
5265
5266 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5267                                                 enum pipe pch_transcoder)
5268 {
5269         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5270         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5271         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5272
5273         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5274                    I915_READ(HTOTAL(cpu_transcoder)));
5275         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5276                    I915_READ(HBLANK(cpu_transcoder)));
5277         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5278                    I915_READ(HSYNC(cpu_transcoder)));
5279
5280         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5281                    I915_READ(VTOTAL(cpu_transcoder)));
5282         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5283                    I915_READ(VBLANK(cpu_transcoder)));
5284         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5285                    I915_READ(VSYNC(cpu_transcoder)));
5286         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5287                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
5288 }
5289
5290 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5291 {
5292         u32 temp;
5293
5294         temp = I915_READ(SOUTH_CHICKEN1);
5295         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5296                 return;
5297
5298         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5299         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5300
5301         temp &= ~FDI_BC_BIFURCATION_SELECT;
5302         if (enable)
5303                 temp |= FDI_BC_BIFURCATION_SELECT;
5304
5305         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5306         I915_WRITE(SOUTH_CHICKEN1, temp);
5307         POSTING_READ(SOUTH_CHICKEN1);
5308 }
5309
5310 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5311 {
5312         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5313         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5314
5315         switch (crtc->pipe) {
5316         case PIPE_A:
5317                 break;
5318         case PIPE_B:
5319                 if (crtc_state->fdi_lanes > 2)
5320                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5321                 else
5322                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5323
5324                 break;
5325         case PIPE_C:
5326                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5327
5328                 break;
5329         default:
5330                 BUG();
5331         }
5332 }
5333
5334 /*
5335  * Finds the encoder associated with the given CRTC. This can only be
5336  * used when we know that the CRTC isn't feeding multiple encoders!
5337  */
5338 static struct intel_encoder *
5339 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5340                            const struct intel_crtc_state *crtc_state)
5341 {
5342         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5343         const struct drm_connector_state *connector_state;
5344         const struct drm_connector *connector;
5345         struct intel_encoder *encoder = NULL;
5346         int num_encoders = 0;
5347         int i;
5348
5349         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5350                 if (connector_state->crtc != &crtc->base)
5351                         continue;
5352
5353                 encoder = to_intel_encoder(connector_state->best_encoder);
5354                 num_encoders++;
5355         }
5356
5357         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5358              num_encoders, pipe_name(crtc->pipe));
5359
5360         return encoder;
5361 }
5362
5363 /*
5364  * Enable PCH resources required for PCH ports:
5365  *   - PCH PLLs
5366  *   - FDI training & RX/TX
5367  *   - update transcoder timings
5368  *   - DP transcoding bits
5369  *   - transcoder
5370  */
5371 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5372                                 const struct intel_crtc_state *crtc_state)
5373 {
5374         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5375         struct drm_device *dev = crtc->base.dev;
5376         struct drm_i915_private *dev_priv = to_i915(dev);
5377         enum pipe pipe = crtc->pipe;
5378         u32 temp;
5379
5380         assert_pch_transcoder_disabled(dev_priv, pipe);
5381
5382         if (IS_IVYBRIDGE(dev_priv))
5383                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5384
5385         /* Write the TU size bits before fdi link training, so that error
5386          * detection works. */
5387         I915_WRITE(FDI_RX_TUSIZE1(pipe),
5388                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5389
5390         /* For PCH output, training FDI link */
5391         dev_priv->display.fdi_link_train(crtc, crtc_state);
5392
5393         /* We need to program the right clock selection before writing the pixel
5394          * mutliplier into the DPLL. */
5395         if (HAS_PCH_CPT(dev_priv)) {
5396                 u32 sel;
5397
5398                 temp = I915_READ(PCH_DPLL_SEL);
5399                 temp |= TRANS_DPLL_ENABLE(pipe);
5400                 sel = TRANS_DPLLB_SEL(pipe);
5401                 if (crtc_state->shared_dpll ==
5402                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5403                         temp |= sel;
5404                 else
5405                         temp &= ~sel;
5406                 I915_WRITE(PCH_DPLL_SEL, temp);
5407         }
5408
5409         /* XXX: pch pll's can be enabled any time before we enable the PCH
5410          * transcoder, and we actually should do this to not upset any PCH
5411          * transcoder that already use the clock when we share it.
5412          *
5413          * Note that enable_shared_dpll tries to do the right thing, but
5414          * get_shared_dpll unconditionally resets the pll - we need that to have
5415          * the right LVDS enable sequence. */
5416         intel_enable_shared_dpll(crtc_state);
5417
5418         /* set transcoder timing, panel must allow it */
5419         assert_panel_unlocked(dev_priv, pipe);
5420         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5421
5422         intel_fdi_normal_train(crtc);
5423
5424         /* For PCH DP, enable TRANS_DP_CTL */
5425         if (HAS_PCH_CPT(dev_priv) &&
5426             intel_crtc_has_dp_encoder(crtc_state)) {
5427                 const struct drm_display_mode *adjusted_mode =
5428                         &crtc_state->hw.adjusted_mode;
5429                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5430                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5431                 enum port port;
5432
5433                 temp = I915_READ(reg);
5434                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5435                           TRANS_DP_SYNC_MASK |
5436                           TRANS_DP_BPC_MASK);
5437                 temp |= TRANS_DP_OUTPUT_ENABLE;
5438                 temp |= bpc << 9; /* same format but at 11:9 */
5439
5440                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5441                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5442                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5443                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5444
5445                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5446                 WARN_ON(port < PORT_B || port > PORT_D);
5447                 temp |= TRANS_DP_PORT_SEL(port);
5448
5449                 I915_WRITE(reg, temp);
5450         }
5451
5452         ironlake_enable_pch_transcoder(crtc_state);
5453 }
5454
5455 static void lpt_pch_enable(const struct intel_atomic_state *state,
5456                            const struct intel_crtc_state *crtc_state)
5457 {
5458         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5459         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5460         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5461
5462         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5463
5464         lpt_program_iclkip(crtc_state);
5465
5466         /* Set transcoder timing. */
5467         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5468
5469         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5470 }
5471
5472 static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
5473 {
5474         struct drm_i915_private *dev_priv = to_i915(dev);
5475         i915_reg_t dslreg = PIPEDSL(pipe);
5476         u32 temp;
5477
5478         temp = I915_READ(dslreg);
5479         udelay(500);
5480         if (wait_for(I915_READ(dslreg) != temp, 5)) {
5481                 if (wait_for(I915_READ(dslreg) != temp, 5))
5482                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5483         }
5484 }
5485
5486 /*
5487  * The hardware phase 0.0 refers to the center of the pixel.
5488  * We want to start from the top/left edge which is phase
5489  * -0.5. That matches how the hardware calculates the scaling
5490  * factors (from top-left of the first pixel to bottom-right
5491  * of the last pixel, as opposed to the pixel centers).
5492  *
5493  * For 4:2:0 subsampled chroma planes we obviously have to
5494  * adjust that so that the chroma sample position lands in
5495  * the right spot.
5496  *
5497  * Note that for packed YCbCr 4:2:2 formats there is no way to
5498  * control chroma siting. The hardware simply replicates the
5499  * chroma samples for both of the luma samples, and thus we don't
5500  * actually get the expected MPEG2 chroma siting convention :(
5501  * The same behaviour is observed on pre-SKL platforms as well.
5502  *
5503  * Theory behind the formula (note that we ignore sub-pixel
5504  * source coordinates):
5505  * s = source sample position
5506  * d = destination sample position
5507  *
5508  * Downscaling 4:1:
5509  * -0.5
5510  * | 0.0
5511  * | |     1.5 (initial phase)
5512  * | |     |
5513  * v v     v
5514  * | s | s | s | s |
5515  * |       d       |
5516  *
5517  * Upscaling 1:4:
5518  * -0.5
5519  * | -0.375 (initial phase)
5520  * | |     0.0
5521  * | |     |
5522  * v v     v
5523  * |       s       |
5524  * | d | d | d | d |
5525  */
5526 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5527 {
5528         int phase = -0x8000;
5529         u16 trip = 0;
5530
5531         if (chroma_cosited)
5532                 phase += (sub - 1) * 0x8000 / sub;
5533
5534         phase += scale / (2 * sub);
5535
5536         /*
5537          * Hardware initial phase limited to [-0.5:1.5].
5538          * Since the max hardware scale factor is 3.0, we
5539          * should never actually excdeed 1.0 here.
5540          */
5541         WARN_ON(phase < -0x8000 || phase > 0x18000);
5542
5543         if (phase < 0)
5544                 phase = 0x10000 + phase;
5545         else
5546                 trip = PS_PHASE_TRIP;
5547
5548         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5549 }
5550
5551 #define SKL_MIN_SRC_W 8
5552 #define SKL_MAX_SRC_W 4096
5553 #define SKL_MIN_SRC_H 8
5554 #define SKL_MAX_SRC_H 4096
5555 #define SKL_MIN_DST_W 8
5556 #define SKL_MAX_DST_W 4096
5557 #define SKL_MIN_DST_H 8
5558 #define SKL_MAX_DST_H 4096
5559 #define ICL_MAX_SRC_W 5120
5560 #define ICL_MAX_SRC_H 4096
5561 #define ICL_MAX_DST_W 5120
5562 #define ICL_MAX_DST_H 4096
5563 #define SKL_MIN_YUV_420_SRC_W 16
5564 #define SKL_MIN_YUV_420_SRC_H 16
5565
5566 static int
5567 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5568                   unsigned int scaler_user, int *scaler_id,
5569                   int src_w, int src_h, int dst_w, int dst_h,
5570                   const struct drm_format_info *format, bool need_scaler)
5571 {
5572         struct intel_crtc_scaler_state *scaler_state =
5573                 &crtc_state->scaler_state;
5574         struct intel_crtc *intel_crtc =
5575                 to_intel_crtc(crtc_state->uapi.crtc);
5576         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5577         const struct drm_display_mode *adjusted_mode =
5578                 &crtc_state->hw.adjusted_mode;
5579
5580         /*
5581          * Src coordinates are already rotated by 270 degrees for
5582          * the 90/270 degree plane rotation cases (to match the
5583          * GTT mapping), hence no need to account for rotation here.
5584          */
5585         if (src_w != dst_w || src_h != dst_h)
5586                 need_scaler = true;
5587
5588         /*
5589          * Scaling/fitting not supported in IF-ID mode in GEN9+
5590          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5591          * Once NV12 is enabled, handle it here while allocating scaler
5592          * for NV12.
5593          */
5594         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
5595             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5596                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5597                 return -EINVAL;
5598         }
5599
5600         /*
5601          * if plane is being disabled or scaler is no more required or force detach
5602          *  - free scaler binded to this plane/crtc
5603          *  - in order to do this, update crtc->scaler_usage
5604          *
5605          * Here scaler state in crtc_state is set free so that
5606          * scaler can be assigned to other user. Actual register
5607          * update to free the scaler is done in plane/panel-fit programming.
5608          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5609          */
5610         if (force_detach || !need_scaler) {
5611                 if (*scaler_id >= 0) {
5612                         scaler_state->scaler_users &= ~(1 << scaler_user);
5613                         scaler_state->scalers[*scaler_id].in_use = 0;
5614
5615                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5616                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5617                                 intel_crtc->pipe, scaler_user, *scaler_id,
5618                                 scaler_state->scaler_users);
5619                         *scaler_id = -1;
5620                 }
5621                 return 0;
5622         }
5623
5624         if (format && drm_format_info_is_yuv_semiplanar(format) &&
5625             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5626                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5627                 return -EINVAL;
5628         }
5629
5630         /* range checks */
5631         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5632             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5633             (INTEL_GEN(dev_priv) >= 11 &&
5634              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5635               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5636             (INTEL_GEN(dev_priv) < 11 &&
5637              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5638               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5639                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5640                         "size is out of scaler range\n",
5641                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5642                 return -EINVAL;
5643         }
5644
5645         /* mark this plane as a scaler user in crtc_state */
5646         scaler_state->scaler_users |= (1 << scaler_user);
5647         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5648                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5649                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5650                 scaler_state->scaler_users);
5651
5652         return 0;
5653 }
5654
5655 /**
5656  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5657  *
5658  * @state: crtc's scaler state
5659  *
5660  * Return
5661  *     0 - scaler_usage updated successfully
5662  *    error - requested scaling cannot be supported or other error condition
5663  */
5664 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5665 {
5666         const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
5667         bool need_scaler = false;
5668
5669         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5670                 need_scaler = true;
5671
5672         return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
5673                                  &state->scaler_state.scaler_id,
5674                                  state->pipe_src_w, state->pipe_src_h,
5675                                  adjusted_mode->crtc_hdisplay,
5676                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5677 }
5678
5679 /**
5680  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5681  * @crtc_state: crtc's scaler state
5682  * @plane_state: atomic plane state to update
5683  *
5684  * Return
5685  *     0 - scaler_usage updated successfully
5686  *    error - requested scaling cannot be supported or other error condition
5687  */
5688 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5689                                    struct intel_plane_state *plane_state)
5690 {
5691         struct intel_plane *intel_plane =
5692                 to_intel_plane(plane_state->uapi.plane);
5693         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5694         struct drm_framebuffer *fb = plane_state->hw.fb;
5695         int ret;
5696         bool force_detach = !fb || !plane_state->uapi.visible;
5697         bool need_scaler = false;
5698
5699         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5700         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5701             fb && drm_format_info_is_yuv_semiplanar(fb->format))
5702                 need_scaler = true;
5703
5704         ret = skl_update_scaler(crtc_state, force_detach,
5705                                 drm_plane_index(&intel_plane->base),
5706                                 &plane_state->scaler_id,
5707                                 drm_rect_width(&plane_state->uapi.src) >> 16,
5708                                 drm_rect_height(&plane_state->uapi.src) >> 16,
5709                                 drm_rect_width(&plane_state->uapi.dst),
5710                                 drm_rect_height(&plane_state->uapi.dst),
5711                                 fb ? fb->format : NULL, need_scaler);
5712
5713         if (ret || plane_state->scaler_id < 0)
5714                 return ret;
5715
5716         /* check colorkey */
5717         if (plane_state->ckey.flags) {
5718                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5719                               intel_plane->base.base.id,
5720                               intel_plane->base.name);
5721                 return -EINVAL;
5722         }
5723
5724         /* Check src format */
5725         switch (fb->format->format) {
5726         case DRM_FORMAT_RGB565:
5727         case DRM_FORMAT_XBGR8888:
5728         case DRM_FORMAT_XRGB8888:
5729         case DRM_FORMAT_ABGR8888:
5730         case DRM_FORMAT_ARGB8888:
5731         case DRM_FORMAT_XRGB2101010:
5732         case DRM_FORMAT_XBGR2101010:
5733         case DRM_FORMAT_ARGB2101010:
5734         case DRM_FORMAT_ABGR2101010:
5735         case DRM_FORMAT_YUYV:
5736         case DRM_FORMAT_YVYU:
5737         case DRM_FORMAT_UYVY:
5738         case DRM_FORMAT_VYUY:
5739         case DRM_FORMAT_NV12:
5740         case DRM_FORMAT_P010:
5741         case DRM_FORMAT_P012:
5742         case DRM_FORMAT_P016:
5743         case DRM_FORMAT_Y210:
5744         case DRM_FORMAT_Y212:
5745         case DRM_FORMAT_Y216:
5746         case DRM_FORMAT_XVYU2101010:
5747         case DRM_FORMAT_XVYU12_16161616:
5748         case DRM_FORMAT_XVYU16161616:
5749                 break;
5750         case DRM_FORMAT_XBGR16161616F:
5751         case DRM_FORMAT_ABGR16161616F:
5752         case DRM_FORMAT_XRGB16161616F:
5753         case DRM_FORMAT_ARGB16161616F:
5754                 if (INTEL_GEN(dev_priv) >= 11)
5755                         break;
5756                 /* fall through */
5757         default:
5758                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5759                               intel_plane->base.base.id, intel_plane->base.name,
5760                               fb->base.id, fb->format->format);
5761                 return -EINVAL;
5762         }
5763
5764         return 0;
5765 }
5766
5767 static void skylake_scaler_disable(struct intel_crtc *crtc)
5768 {
5769         int i;
5770
5771         for (i = 0; i < crtc->num_scalers; i++)
5772                 skl_detach_scaler(crtc, i);
5773 }
5774
5775 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5776 {
5777         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5778         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5779         enum pipe pipe = crtc->pipe;
5780         const struct intel_crtc_scaler_state *scaler_state =
5781                 &crtc_state->scaler_state;
5782
5783         if (crtc_state->pch_pfit.enabled) {
5784                 u16 uv_rgb_hphase, uv_rgb_vphase;
5785                 int pfit_w, pfit_h, hscale, vscale;
5786                 int id;
5787
5788                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5789                         return;
5790
5791                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5792                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5793
5794                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5795                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5796
5797                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5798                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5799
5800                 id = scaler_state->scaler_id;
5801                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5802                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5803                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5804                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5805                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5806                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5807                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5808                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5809         }
5810 }
5811
5812 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5813 {
5814         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5815         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5816         enum pipe pipe = crtc->pipe;
5817
5818         if (crtc_state->pch_pfit.enabled) {
5819                 /* Force use of hard-coded filter coefficients
5820                  * as some pre-programmed values are broken,
5821                  * e.g. x201.
5822                  */
5823                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5824                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5825                                                  PF_PIPE_SEL_IVB(pipe));
5826                 else
5827                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5828                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5829                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5830         }
5831 }
5832
5833 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5834 {
5835         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5836         struct drm_device *dev = crtc->base.dev;
5837         struct drm_i915_private *dev_priv = to_i915(dev);
5838
5839         if (!crtc_state->ips_enabled)
5840                 return;
5841
5842         /*
5843          * We can only enable IPS after we enable a plane and wait for a vblank
5844          * This function is called from post_plane_update, which is run after
5845          * a vblank wait.
5846          */
5847         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5848
5849         if (IS_BROADWELL(dev_priv)) {
5850                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5851                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5852                 /* Quoting Art Runyan: "its not safe to expect any particular
5853                  * value in IPS_CTL bit 31 after enabling IPS through the
5854                  * mailbox." Moreover, the mailbox may return a bogus state,
5855                  * so we need to just enable it and continue on.
5856                  */
5857         } else {
5858                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5859                 /* The bit only becomes 1 in the next vblank, so this wait here
5860                  * is essentially intel_wait_for_vblank. If we don't have this
5861                  * and don't wait for vblanks until the end of crtc_enable, then
5862                  * the HW state readout code will complain that the expected
5863                  * IPS_CTL value is not the one we read. */
5864                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5865                         DRM_ERROR("Timed out waiting for IPS enable\n");
5866         }
5867 }
5868
5869 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5870 {
5871         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5872         struct drm_device *dev = crtc->base.dev;
5873         struct drm_i915_private *dev_priv = to_i915(dev);
5874
5875         if (!crtc_state->ips_enabled)
5876                 return;
5877
5878         if (IS_BROADWELL(dev_priv)) {
5879                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5880                 /*
5881                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5882                  * 42ms timeout value leads to occasional timeouts so use 100ms
5883                  * instead.
5884                  */
5885                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5886                         DRM_ERROR("Timed out waiting for IPS disable\n");
5887         } else {
5888                 I915_WRITE(IPS_CTL, 0);
5889                 POSTING_READ(IPS_CTL);
5890         }
5891
5892         /* We need to wait for a vblank before we can disable the plane. */
5893         intel_wait_for_vblank(dev_priv, crtc->pipe);
5894 }
5895
5896 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5897 {
5898         if (intel_crtc->overlay)
5899                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5900
5901         /* Let userspace switch the overlay on again. In most cases userspace
5902          * has to recompute where to put it anyway.
5903          */
5904 }
5905
5906 /**
5907  * intel_post_enable_primary - Perform operations after enabling primary plane
5908  * @crtc: the CRTC whose primary plane was just enabled
5909  * @new_crtc_state: the enabling state
5910  *
5911  * Performs potentially sleeping operations that must be done after the primary
5912  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5913  * called due to an explicit primary plane update, or due to an implicit
5914  * re-enable that is caused when a sprite plane is updated to no longer
5915  * completely hide the primary plane.
5916  */
5917 static void
5918 intel_post_enable_primary(struct drm_crtc *crtc,
5919                           const struct intel_crtc_state *new_crtc_state)
5920 {
5921         struct drm_device *dev = crtc->dev;
5922         struct drm_i915_private *dev_priv = to_i915(dev);
5923         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5924         enum pipe pipe = intel_crtc->pipe;
5925
5926         /*
5927          * Gen2 reports pipe underruns whenever all planes are disabled.
5928          * So don't enable underrun reporting before at least some planes
5929          * are enabled.
5930          * FIXME: Need to fix the logic to work when we turn off all planes
5931          * but leave the pipe running.
5932          */
5933         if (IS_GEN(dev_priv, 2))
5934                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5935
5936         /* Underruns don't always raise interrupts, so check manually. */
5937         intel_check_cpu_fifo_underruns(dev_priv);
5938         intel_check_pch_fifo_underruns(dev_priv);
5939 }
5940
5941 /* FIXME get rid of this and use pre_plane_update */
5942 static void
5943 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5944 {
5945         struct drm_device *dev = crtc->dev;
5946         struct drm_i915_private *dev_priv = to_i915(dev);
5947         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5948         enum pipe pipe = intel_crtc->pipe;
5949
5950         /*
5951          * Gen2 reports pipe underruns whenever all planes are disabled.
5952          * So disable underrun reporting before all the planes get disabled.
5953          */
5954         if (IS_GEN(dev_priv, 2))
5955                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5956
5957         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5958
5959         /*
5960          * Vblank time updates from the shadow to live plane control register
5961          * are blocked if the memory self-refresh mode is active at that
5962          * moment. So to make sure the plane gets truly disabled, disable
5963          * first the self-refresh mode. The self-refresh enable bit in turn
5964          * will be checked/applied by the HW only at the next frame start
5965          * event which is after the vblank start event, so we need to have a
5966          * wait-for-vblank between disabling the plane and the pipe.
5967          */
5968         if (HAS_GMCH(dev_priv) &&
5969             intel_set_memory_cxsr(dev_priv, false))
5970                 intel_wait_for_vblank(dev_priv, pipe);
5971 }
5972
5973 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5974                                        const struct intel_crtc_state *new_crtc_state)
5975 {
5976         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5977         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5978
5979         if (!old_crtc_state->ips_enabled)
5980                 return false;
5981
5982         if (needs_modeset(new_crtc_state))
5983                 return true;
5984
5985         /*
5986          * Workaround : Do not read or write the pipe palette/gamma data while
5987          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5988          *
5989          * Disable IPS before we program the LUT.
5990          */
5991         if (IS_HASWELL(dev_priv) &&
5992             (new_crtc_state->uapi.color_mgmt_changed ||
5993              new_crtc_state->update_pipe) &&
5994             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5995                 return true;
5996
5997         return !new_crtc_state->ips_enabled;
5998 }
5999
6000 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6001                                        const struct intel_crtc_state *new_crtc_state)
6002 {
6003         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6004         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6005
6006         if (!new_crtc_state->ips_enabled)
6007                 return false;
6008
6009         if (needs_modeset(new_crtc_state))
6010                 return true;
6011
6012         /*
6013          * Workaround : Do not read or write the pipe palette/gamma data while
6014          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6015          *
6016          * Re-enable IPS after the LUT has been programmed.
6017          */
6018         if (IS_HASWELL(dev_priv) &&
6019             (new_crtc_state->uapi.color_mgmt_changed ||
6020              new_crtc_state->update_pipe) &&
6021             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6022                 return true;
6023
6024         /*
6025          * We can't read out IPS on broadwell, assume the worst and
6026          * forcibly enable IPS on the first fastset.
6027          */
6028         if (new_crtc_state->update_pipe &&
6029             old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
6030                 return true;
6031
6032         return !old_crtc_state->ips_enabled;
6033 }
6034
6035 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
6036                           const struct intel_crtc_state *crtc_state)
6037 {
6038         if (!crtc_state->nv12_planes)
6039                 return false;
6040
6041         /* WA Display #0827: Gen9:all */
6042         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6043                 return true;
6044
6045         return false;
6046 }
6047
6048 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
6049                                const struct intel_crtc_state *crtc_state)
6050 {
6051         /* Wa_2006604312:icl */
6052         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
6053                 return true;
6054
6055         return false;
6056 }
6057
6058 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
6059 {
6060         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6061         struct drm_device *dev = crtc->base.dev;
6062         struct drm_i915_private *dev_priv = to_i915(dev);
6063         struct drm_atomic_state *state = old_crtc_state->uapi.state;
6064         struct intel_crtc_state *pipe_config =
6065                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
6066                                                 crtc);
6067         struct drm_plane *primary = crtc->base.primary;
6068         struct drm_plane_state *old_primary_state =
6069                 drm_atomic_get_old_plane_state(state, primary);
6070
6071         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
6072
6073         if (pipe_config->update_wm_post && pipe_config->hw.active)
6074                 intel_update_watermarks(crtc);
6075
6076         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
6077                 hsw_enable_ips(pipe_config);
6078
6079         if (old_primary_state) {
6080                 struct drm_plane_state *new_primary_state =
6081                         drm_atomic_get_new_plane_state(state, primary);
6082
6083                 intel_fbc_post_update(crtc);
6084
6085                 if (new_primary_state->visible &&
6086                     (needs_modeset(pipe_config) ||
6087                      !old_primary_state->visible))
6088                         intel_post_enable_primary(&crtc->base, pipe_config);
6089         }
6090
6091         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
6092             !needs_nv12_wa(dev_priv, pipe_config))
6093                 skl_wa_827(dev_priv, crtc->pipe, false);
6094
6095         if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6096             !needs_scalerclk_wa(dev_priv, pipe_config))
6097                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
6098 }
6099
6100 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
6101                                    struct intel_crtc_state *pipe_config)
6102 {
6103         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6104         struct drm_device *dev = crtc->base.dev;
6105         struct drm_i915_private *dev_priv = to_i915(dev);
6106         struct drm_atomic_state *state = old_crtc_state->uapi.state;
6107         struct drm_plane *primary = crtc->base.primary;
6108         struct drm_plane_state *old_primary_state =
6109                 drm_atomic_get_old_plane_state(state, primary);
6110         bool modeset = needs_modeset(pipe_config);
6111         struct intel_atomic_state *intel_state =
6112                 to_intel_atomic_state(state);
6113
6114         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
6115                 hsw_disable_ips(old_crtc_state);
6116
6117         if (old_primary_state) {
6118                 struct intel_plane_state *new_primary_state =
6119                         intel_atomic_get_new_plane_state(intel_state,
6120                                                          to_intel_plane(primary));
6121
6122                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
6123                 /*
6124                  * Gen2 reports pipe underruns whenever all planes are disabled.
6125                  * So disable underrun reporting before all the planes get disabled.
6126                  */
6127                 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
6128                     (modeset || !new_primary_state->uapi.visible))
6129                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
6130         }
6131
6132         /* Display WA 827 */
6133         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
6134             needs_nv12_wa(dev_priv, pipe_config))
6135                 skl_wa_827(dev_priv, crtc->pipe, true);
6136
6137         /* Wa_2006604312:icl */
6138         if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6139             needs_scalerclk_wa(dev_priv, pipe_config))
6140                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
6141
6142         /*
6143          * Vblank time updates from the shadow to live plane control register
6144          * are blocked if the memory self-refresh mode is active at that
6145          * moment. So to make sure the plane gets truly disabled, disable
6146          * first the self-refresh mode. The self-refresh enable bit in turn
6147          * will be checked/applied by the HW only at the next frame start
6148          * event which is after the vblank start event, so we need to have a
6149          * wait-for-vblank between disabling the plane and the pipe.
6150          */
6151         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6152             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6153                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6154
6155         /*
6156          * IVB workaround: must disable low power watermarks for at least
6157          * one frame before enabling scaling.  LP watermarks can be re-enabled
6158          * when scaling is disabled.
6159          *
6160          * WaCxSRDisabledForSpriteScaling:ivb
6161          */
6162         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
6163             old_crtc_state->hw.active)
6164                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6165
6166         /*
6167          * If we're doing a modeset, we're done.  No need to do any pre-vblank
6168          * watermark programming here.
6169          */
6170         if (needs_modeset(pipe_config))
6171                 return;
6172
6173         /*
6174          * For platforms that support atomic watermarks, program the
6175          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6176          * will be the intermediate values that are safe for both pre- and
6177          * post- vblank; when vblank happens, the 'active' values will be set
6178          * to the final 'target' values and we'll do this again to get the
6179          * optimal watermarks.  For gen9+ platforms, the values we program here
6180          * will be the final target values which will get automatically latched
6181          * at vblank time; no further programming will be necessary.
6182          *
6183          * If a platform hasn't been transitioned to atomic watermarks yet,
6184          * we'll continue to update watermarks the old way, if flags tell
6185          * us to.
6186          */
6187         if (dev_priv->display.initial_watermarks != NULL)
6188                 dev_priv->display.initial_watermarks(intel_state,
6189                                                      pipe_config);
6190         else if (pipe_config->update_wm_pre)
6191                 intel_update_watermarks(crtc);
6192 }
6193
6194 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6195                                       struct intel_crtc *crtc)
6196 {
6197         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6198         const struct intel_crtc_state *new_crtc_state =
6199                 intel_atomic_get_new_crtc_state(state, crtc);
6200         unsigned int update_mask = new_crtc_state->update_planes;
6201         const struct intel_plane_state *old_plane_state;
6202         struct intel_plane *plane;
6203         unsigned fb_bits = 0;
6204         int i;
6205
6206         intel_crtc_dpms_overlay_disable(crtc);
6207
6208         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6209                 if (crtc->pipe != plane->pipe ||
6210                     !(update_mask & BIT(plane->id)))
6211                         continue;
6212
6213                 intel_disable_plane(plane, new_crtc_state);
6214
6215                 if (old_plane_state->uapi.visible)
6216                         fb_bits |= plane->frontbuffer_bit;
6217         }
6218
6219         intel_frontbuffer_flip(dev_priv, fb_bits);
6220 }
6221
6222 /*
6223  * intel_connector_primary_encoder - get the primary encoder for a connector
6224  * @connector: connector for which to return the encoder
6225  *
6226  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6227  * all connectors to their encoder, except for DP-MST connectors which have
6228  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6229  * pointed to by as many DP-MST connectors as there are pipes.
6230  */
6231 static struct intel_encoder *
6232 intel_connector_primary_encoder(struct intel_connector *connector)
6233 {
6234         struct intel_encoder *encoder;
6235
6236         if (connector->mst_port)
6237                 return &dp_to_dig_port(connector->mst_port)->base;
6238
6239         encoder = intel_attached_encoder(&connector->base);
6240         WARN_ON(!encoder);
6241
6242         return encoder;
6243 }
6244
6245 static bool
6246 intel_connector_needs_modeset(struct intel_atomic_state *state,
6247                               const struct drm_connector_state *old_conn_state,
6248                               const struct drm_connector_state *new_conn_state)
6249 {
6250         struct intel_crtc *old_crtc = old_conn_state->crtc ?
6251                                       to_intel_crtc(old_conn_state->crtc) : NULL;
6252         struct intel_crtc *new_crtc = new_conn_state->crtc ?
6253                                       to_intel_crtc(new_conn_state->crtc) : NULL;
6254
6255         return new_crtc != old_crtc ||
6256                (new_crtc &&
6257                 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6258 }
6259
6260 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6261 {
6262         struct drm_connector_state *old_conn_state;
6263         struct drm_connector_state *new_conn_state;
6264         struct drm_connector *conn;
6265         int i;
6266
6267         for_each_oldnew_connector_in_state(&state->base, conn,
6268                                            old_conn_state, new_conn_state, i) {
6269                 struct intel_encoder *encoder;
6270                 struct intel_crtc *crtc;
6271
6272                 if (!intel_connector_needs_modeset(state,
6273                                                    old_conn_state,
6274                                                    new_conn_state))
6275                         continue;
6276
6277                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6278                 if (!encoder->update_prepare)
6279                         continue;
6280
6281                 crtc = new_conn_state->crtc ?
6282                         to_intel_crtc(new_conn_state->crtc) : NULL;
6283                 encoder->update_prepare(state, encoder, crtc);
6284         }
6285 }
6286
6287 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6288 {
6289         struct drm_connector_state *old_conn_state;
6290         struct drm_connector_state *new_conn_state;
6291         struct drm_connector *conn;
6292         int i;
6293
6294         for_each_oldnew_connector_in_state(&state->base, conn,
6295                                            old_conn_state, new_conn_state, i) {
6296                 struct intel_encoder *encoder;
6297                 struct intel_crtc *crtc;
6298
6299                 if (!intel_connector_needs_modeset(state,
6300                                                    old_conn_state,
6301                                                    new_conn_state))
6302                         continue;
6303
6304                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6305                 if (!encoder->update_complete)
6306                         continue;
6307
6308                 crtc = new_conn_state->crtc ?
6309                         to_intel_crtc(new_conn_state->crtc) : NULL;
6310                 encoder->update_complete(state, encoder, crtc);
6311         }
6312 }
6313
6314 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6315                                           struct intel_crtc *crtc)
6316 {
6317         const struct intel_crtc_state *crtc_state =
6318                 intel_atomic_get_new_crtc_state(state, crtc);
6319         const struct drm_connector_state *conn_state;
6320         struct drm_connector *conn;
6321         int i;
6322
6323         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6324                 struct intel_encoder *encoder =
6325                         to_intel_encoder(conn_state->best_encoder);
6326
6327                 if (conn_state->crtc != &crtc->base)
6328                         continue;
6329
6330                 if (encoder->pre_pll_enable)
6331                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6332         }
6333 }
6334
6335 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6336                                       struct intel_crtc *crtc)
6337 {
6338         const struct intel_crtc_state *crtc_state =
6339                 intel_atomic_get_new_crtc_state(state, crtc);
6340         const struct drm_connector_state *conn_state;
6341         struct drm_connector *conn;
6342         int i;
6343
6344         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6345                 struct intel_encoder *encoder =
6346                         to_intel_encoder(conn_state->best_encoder);
6347
6348                 if (conn_state->crtc != &crtc->base)
6349                         continue;
6350
6351                 if (encoder->pre_enable)
6352                         encoder->pre_enable(encoder, crtc_state, conn_state);
6353         }
6354 }
6355
6356 static void intel_encoders_enable(struct intel_atomic_state *state,
6357                                   struct intel_crtc *crtc)
6358 {
6359         const struct intel_crtc_state *crtc_state =
6360                 intel_atomic_get_new_crtc_state(state, crtc);
6361         const struct drm_connector_state *conn_state;
6362         struct drm_connector *conn;
6363         int i;
6364
6365         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6366                 struct intel_encoder *encoder =
6367                         to_intel_encoder(conn_state->best_encoder);
6368
6369                 if (conn_state->crtc != &crtc->base)
6370                         continue;
6371
6372                 if (encoder->enable)
6373                         encoder->enable(encoder, crtc_state, conn_state);
6374                 intel_opregion_notify_encoder(encoder, true);
6375         }
6376 }
6377
6378 static void intel_encoders_disable(struct intel_atomic_state *state,
6379                                    struct intel_crtc *crtc)
6380 {
6381         const struct intel_crtc_state *old_crtc_state =
6382                 intel_atomic_get_old_crtc_state(state, crtc);
6383         const struct drm_connector_state *old_conn_state;
6384         struct drm_connector *conn;
6385         int i;
6386
6387         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6388                 struct intel_encoder *encoder =
6389                         to_intel_encoder(old_conn_state->best_encoder);
6390
6391                 if (old_conn_state->crtc != &crtc->base)
6392                         continue;
6393
6394                 intel_opregion_notify_encoder(encoder, false);
6395                 if (encoder->disable)
6396                         encoder->disable(encoder, old_crtc_state, old_conn_state);
6397         }
6398 }
6399
6400 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6401                                         struct intel_crtc *crtc)
6402 {
6403         const struct intel_crtc_state *old_crtc_state =
6404                 intel_atomic_get_old_crtc_state(state, crtc);
6405         const struct drm_connector_state *old_conn_state;
6406         struct drm_connector *conn;
6407         int i;
6408
6409         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6410                 struct intel_encoder *encoder =
6411                         to_intel_encoder(old_conn_state->best_encoder);
6412
6413                 if (old_conn_state->crtc != &crtc->base)
6414                         continue;
6415
6416                 if (encoder->post_disable)
6417                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6418         }
6419 }
6420
6421 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6422                                             struct intel_crtc *crtc)
6423 {
6424         const struct intel_crtc_state *old_crtc_state =
6425                 intel_atomic_get_old_crtc_state(state, crtc);
6426         const struct drm_connector_state *old_conn_state;
6427         struct drm_connector *conn;
6428         int i;
6429
6430         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6431                 struct intel_encoder *encoder =
6432                         to_intel_encoder(old_conn_state->best_encoder);
6433
6434                 if (old_conn_state->crtc != &crtc->base)
6435                         continue;
6436
6437                 if (encoder->post_pll_disable)
6438                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6439         }
6440 }
6441
6442 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6443                                        struct intel_crtc *crtc)
6444 {
6445         const struct intel_crtc_state *crtc_state =
6446                 intel_atomic_get_new_crtc_state(state, crtc);
6447         const struct drm_connector_state *conn_state;
6448         struct drm_connector *conn;
6449         int i;
6450
6451         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6452                 struct intel_encoder *encoder =
6453                         to_intel_encoder(conn_state->best_encoder);
6454
6455                 if (conn_state->crtc != &crtc->base)
6456                         continue;
6457
6458                 if (encoder->update_pipe)
6459                         encoder->update_pipe(encoder, crtc_state, conn_state);
6460         }
6461 }
6462
6463 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6464 {
6465         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6466         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6467
6468         plane->disable_plane(plane, crtc_state);
6469 }
6470
6471 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6472                                  struct intel_atomic_state *state)
6473 {
6474         struct drm_crtc *crtc = pipe_config->uapi.crtc;
6475         struct drm_device *dev = crtc->dev;
6476         struct drm_i915_private *dev_priv = to_i915(dev);
6477         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6478         enum pipe pipe = intel_crtc->pipe;
6479
6480         if (WARN_ON(intel_crtc->active))
6481                 return;
6482
6483         /*
6484          * Sometimes spurious CPU pipe underruns happen during FDI
6485          * training, at least with VGA+HDMI cloning. Suppress them.
6486          *
6487          * On ILK we get an occasional spurious CPU pipe underruns
6488          * between eDP port A enable and vdd enable. Also PCH port
6489          * enable seems to result in the occasional CPU pipe underrun.
6490          *
6491          * Spurious PCH underruns also occur during PCH enabling.
6492          */
6493         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6494         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6495
6496         if (pipe_config->has_pch_encoder)
6497                 intel_prepare_shared_dpll(pipe_config);
6498
6499         if (intel_crtc_has_dp_encoder(pipe_config))
6500                 intel_dp_set_m_n(pipe_config, M1_N1);
6501
6502         intel_set_pipe_timings(pipe_config);
6503         intel_set_pipe_src_size(pipe_config);
6504
6505         if (pipe_config->has_pch_encoder) {
6506                 intel_cpu_transcoder_set_m_n(pipe_config,
6507                                              &pipe_config->fdi_m_n, NULL);
6508         }
6509
6510         ironlake_set_pipeconf(pipe_config);
6511
6512         intel_crtc->active = true;
6513
6514         intel_encoders_pre_enable(state, intel_crtc);
6515
6516         if (pipe_config->has_pch_encoder) {
6517                 /* Note: FDI PLL enabling _must_ be done before we enable the
6518                  * cpu pipes, hence this is separate from all the other fdi/pch
6519                  * enabling. */
6520                 ironlake_fdi_pll_enable(pipe_config);
6521         } else {
6522                 assert_fdi_tx_disabled(dev_priv, pipe);
6523                 assert_fdi_rx_disabled(dev_priv, pipe);
6524         }
6525
6526         ironlake_pfit_enable(pipe_config);
6527
6528         /*
6529          * On ILK+ LUT must be loaded before the pipe is running but with
6530          * clocks enabled
6531          */
6532         intel_color_load_luts(pipe_config);
6533         intel_color_commit(pipe_config);
6534         /* update DSPCNTR to configure gamma for pipe bottom color */
6535         intel_disable_primary_plane(pipe_config);
6536
6537         if (dev_priv->display.initial_watermarks != NULL)
6538                 dev_priv->display.initial_watermarks(state, pipe_config);
6539         intel_enable_pipe(pipe_config);
6540
6541         if (pipe_config->has_pch_encoder)
6542                 ironlake_pch_enable(state, pipe_config);
6543
6544         assert_vblank_disabled(crtc);
6545         intel_crtc_vblank_on(pipe_config);
6546
6547         intel_encoders_enable(state, intel_crtc);
6548
6549         if (HAS_PCH_CPT(dev_priv))
6550                 cpt_verify_modeset(dev, intel_crtc->pipe);
6551
6552         /*
6553          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6554          * And a second vblank wait is needed at least on ILK with
6555          * some interlaced HDMI modes. Let's do the double wait always
6556          * in case there are more corner cases we don't know about.
6557          */
6558         if (pipe_config->has_pch_encoder) {
6559                 intel_wait_for_vblank(dev_priv, pipe);
6560                 intel_wait_for_vblank(dev_priv, pipe);
6561         }
6562         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6563         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6564 }
6565
6566 /* IPS only exists on ULT machines and is tied to pipe A. */
6567 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6568 {
6569         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6570 }
6571
6572 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6573                                             enum pipe pipe, bool apply)
6574 {
6575         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6576         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6577
6578         if (apply)
6579                 val |= mask;
6580         else
6581                 val &= ~mask;
6582
6583         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6584 }
6585
6586 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6587 {
6588         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6589         enum pipe pipe = crtc->pipe;
6590         u32 val;
6591
6592         val = MBUS_DBOX_A_CREDIT(2);
6593
6594         if (INTEL_GEN(dev_priv) >= 12) {
6595                 val |= MBUS_DBOX_BW_CREDIT(2);
6596                 val |= MBUS_DBOX_B_CREDIT(12);
6597         } else {
6598                 val |= MBUS_DBOX_BW_CREDIT(1);
6599                 val |= MBUS_DBOX_B_CREDIT(8);
6600         }
6601
6602         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6603 }
6604
6605 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6606 {
6607         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6608         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6609         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6610         u32 val;
6611
6612         val = I915_READ(reg);
6613         val &= ~HSW_FRAME_START_DELAY_MASK;
6614         val |= HSW_FRAME_START_DELAY(0);
6615         I915_WRITE(reg, val);
6616 }
6617
6618 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6619                                 struct intel_atomic_state *state)
6620 {
6621         struct drm_crtc *crtc = pipe_config->uapi.crtc;
6622         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6623         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6624         enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
6625         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6626         bool psl_clkgate_wa;
6627
6628         if (WARN_ON(intel_crtc->active))
6629                 return;
6630
6631         intel_encoders_pre_pll_enable(state, intel_crtc);
6632
6633         if (pipe_config->shared_dpll)
6634                 intel_enable_shared_dpll(pipe_config);
6635
6636         intel_encoders_pre_enable(state, intel_crtc);
6637
6638         if (intel_crtc_has_dp_encoder(pipe_config))
6639                 intel_dp_set_m_n(pipe_config, M1_N1);
6640
6641         if (!transcoder_is_dsi(cpu_transcoder))
6642                 intel_set_pipe_timings(pipe_config);
6643
6644         if (INTEL_GEN(dev_priv) >= 11)
6645                 icl_enable_trans_port_sync(pipe_config);
6646
6647         intel_set_pipe_src_size(pipe_config);
6648
6649         if (cpu_transcoder != TRANSCODER_EDP &&
6650             !transcoder_is_dsi(cpu_transcoder)) {
6651                 I915_WRITE(PIPE_MULT(cpu_transcoder),
6652                            pipe_config->pixel_multiplier - 1);
6653         }
6654
6655         if (pipe_config->has_pch_encoder) {
6656                 intel_cpu_transcoder_set_m_n(pipe_config,
6657                                              &pipe_config->fdi_m_n, NULL);
6658         }
6659
6660         if (!transcoder_is_dsi(cpu_transcoder)) {
6661                 hsw_set_frame_start_delay(pipe_config);
6662                 haswell_set_pipeconf(pipe_config);
6663         }
6664
6665         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6666                 bdw_set_pipemisc(pipe_config);
6667
6668         intel_crtc->active = true;
6669
6670         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6671         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6672                          pipe_config->pch_pfit.enabled;
6673         if (psl_clkgate_wa)
6674                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6675
6676         if (INTEL_GEN(dev_priv) >= 9)
6677                 skylake_pfit_enable(pipe_config);
6678         else
6679                 ironlake_pfit_enable(pipe_config);
6680
6681         /*
6682          * On ILK+ LUT must be loaded before the pipe is running but with
6683          * clocks enabled
6684          */
6685         intel_color_load_luts(pipe_config);
6686         intel_color_commit(pipe_config);
6687         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6688         if (INTEL_GEN(dev_priv) < 9)
6689                 intel_disable_primary_plane(pipe_config);
6690
6691         if (INTEL_GEN(dev_priv) >= 11)
6692                 icl_set_pipe_chicken(intel_crtc);
6693
6694         if (!transcoder_is_dsi(cpu_transcoder))
6695                 intel_ddi_enable_transcoder_func(pipe_config);
6696
6697         if (dev_priv->display.initial_watermarks != NULL)
6698                 dev_priv->display.initial_watermarks(state, pipe_config);
6699
6700         if (INTEL_GEN(dev_priv) >= 11)
6701                 icl_pipe_mbus_enable(intel_crtc);
6702
6703         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6704         if (!transcoder_is_dsi(cpu_transcoder))
6705                 intel_enable_pipe(pipe_config);
6706
6707         if (pipe_config->has_pch_encoder)
6708                 lpt_pch_enable(state, pipe_config);
6709
6710         assert_vblank_disabled(crtc);
6711         intel_crtc_vblank_on(pipe_config);
6712
6713         intel_encoders_enable(state, intel_crtc);
6714
6715         if (psl_clkgate_wa) {
6716                 intel_wait_for_vblank(dev_priv, pipe);
6717                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6718         }
6719
6720         /* If we change the relative order between pipe/planes enabling, we need
6721          * to change the workaround. */
6722         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6723         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6724                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6725                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6726         }
6727 }
6728
6729 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6730 {
6731         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6732         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6733         enum pipe pipe = crtc->pipe;
6734
6735         /* To avoid upsetting the power well on haswell only disable the pfit if
6736          * it's in use. The hw state code will make sure we get this right. */
6737         if (old_crtc_state->pch_pfit.enabled) {
6738                 I915_WRITE(PF_CTL(pipe), 0);
6739                 I915_WRITE(PF_WIN_POS(pipe), 0);
6740                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6741         }
6742 }
6743
6744 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6745                                   struct intel_atomic_state *state)
6746 {
6747         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
6748         struct drm_device *dev = crtc->dev;
6749         struct drm_i915_private *dev_priv = to_i915(dev);
6750         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6751         enum pipe pipe = intel_crtc->pipe;
6752
6753         /*
6754          * Sometimes spurious CPU pipe underruns happen when the
6755          * pipe is already disabled, but FDI RX/TX is still enabled.
6756          * Happens at least with VGA+HDMI cloning. Suppress them.
6757          */
6758         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6759         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6760
6761         intel_encoders_disable(state, intel_crtc);
6762
6763         drm_crtc_vblank_off(crtc);
6764         assert_vblank_disabled(crtc);
6765
6766         intel_disable_pipe(old_crtc_state);
6767
6768         ironlake_pfit_disable(old_crtc_state);
6769
6770         if (old_crtc_state->has_pch_encoder)
6771                 ironlake_fdi_disable(crtc);
6772
6773         intel_encoders_post_disable(state, intel_crtc);
6774
6775         if (old_crtc_state->has_pch_encoder) {
6776                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6777
6778                 if (HAS_PCH_CPT(dev_priv)) {
6779                         i915_reg_t reg;
6780                         u32 temp;
6781
6782                         /* disable TRANS_DP_CTL */
6783                         reg = TRANS_DP_CTL(pipe);
6784                         temp = I915_READ(reg);
6785                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6786                                   TRANS_DP_PORT_SEL_MASK);
6787                         temp |= TRANS_DP_PORT_SEL_NONE;
6788                         I915_WRITE(reg, temp);
6789
6790                         /* disable DPLL_SEL */
6791                         temp = I915_READ(PCH_DPLL_SEL);
6792                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6793                         I915_WRITE(PCH_DPLL_SEL, temp);
6794                 }
6795
6796                 ironlake_fdi_pll_disable(intel_crtc);
6797         }
6798
6799         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6800         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6801 }
6802
6803 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6804                                  struct intel_atomic_state *state)
6805 {
6806         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
6807         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6808         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6809         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6810
6811         intel_encoders_disable(state, intel_crtc);
6812
6813         drm_crtc_vblank_off(crtc);
6814         assert_vblank_disabled(crtc);
6815
6816         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6817         if (!transcoder_is_dsi(cpu_transcoder))
6818                 intel_disable_pipe(old_crtc_state);
6819
6820         if (INTEL_GEN(dev_priv) >= 11)
6821                 icl_disable_transcoder_port_sync(old_crtc_state);
6822
6823         if (!transcoder_is_dsi(cpu_transcoder))
6824                 intel_ddi_disable_transcoder_func(old_crtc_state);
6825
6826         intel_dsc_disable(old_crtc_state);
6827
6828         if (INTEL_GEN(dev_priv) >= 9)
6829                 skylake_scaler_disable(intel_crtc);
6830         else
6831                 ironlake_pfit_disable(old_crtc_state);
6832
6833         intel_encoders_post_disable(state, intel_crtc);
6834
6835         intel_encoders_post_pll_disable(state, intel_crtc);
6836 }
6837
6838 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6839 {
6840         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6841         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6842
6843         if (!crtc_state->gmch_pfit.control)
6844                 return;
6845
6846         /*
6847          * The panel fitter should only be adjusted whilst the pipe is disabled,
6848          * according to register description and PRM.
6849          */
6850         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6851         assert_pipe_disabled(dev_priv, crtc->pipe);
6852
6853         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6854         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6855
6856         /* Border color in case we don't scale up to the full screen. Black by
6857          * default, change to something else for debugging. */
6858         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6859 }
6860
6861 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6862 {
6863         if (phy == PHY_NONE)
6864                 return false;
6865
6866         if (IS_ELKHARTLAKE(dev_priv))
6867                 return phy <= PHY_C;
6868
6869         if (INTEL_GEN(dev_priv) >= 11)
6870                 return phy <= PHY_B;
6871
6872         return false;
6873 }
6874
6875 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6876 {
6877         if (INTEL_GEN(dev_priv) >= 12)
6878                 return phy >= PHY_D && phy <= PHY_I;
6879
6880         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6881                 return phy >= PHY_C && phy <= PHY_F;
6882
6883         return false;
6884 }
6885
6886 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6887 {
6888         if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6889                 return PHY_A;
6890
6891         return (enum phy)port;
6892 }
6893
6894 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6895 {
6896         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6897                 return PORT_TC_NONE;
6898
6899         if (INTEL_GEN(dev_priv) >= 12)
6900                 return port - PORT_D;
6901
6902         return port - PORT_C;
6903 }
6904
6905 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6906 {
6907         switch (port) {
6908         case PORT_A:
6909                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6910         case PORT_B:
6911                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6912         case PORT_C:
6913                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6914         case PORT_D:
6915                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6916         case PORT_E:
6917                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6918         case PORT_F:
6919                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6920         case PORT_G:
6921                 return POWER_DOMAIN_PORT_DDI_G_LANES;
6922         default:
6923                 MISSING_CASE(port);
6924                 return POWER_DOMAIN_PORT_OTHER;
6925         }
6926 }
6927
6928 enum intel_display_power_domain
6929 intel_aux_power_domain(struct intel_digital_port *dig_port)
6930 {
6931         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
6932         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
6933
6934         if (intel_phy_is_tc(dev_priv, phy) &&
6935             dig_port->tc_mode == TC_PORT_TBT_ALT) {
6936                 switch (dig_port->aux_ch) {
6937                 case AUX_CH_C:
6938                         return POWER_DOMAIN_AUX_C_TBT;
6939                 case AUX_CH_D:
6940                         return POWER_DOMAIN_AUX_D_TBT;
6941                 case AUX_CH_E:
6942                         return POWER_DOMAIN_AUX_E_TBT;
6943                 case AUX_CH_F:
6944                         return POWER_DOMAIN_AUX_F_TBT;
6945                 case AUX_CH_G:
6946                         return POWER_DOMAIN_AUX_G_TBT;
6947                 default:
6948                         MISSING_CASE(dig_port->aux_ch);
6949                         return POWER_DOMAIN_AUX_C_TBT;
6950                 }
6951         }
6952
6953         switch (dig_port->aux_ch) {
6954         case AUX_CH_A:
6955                 return POWER_DOMAIN_AUX_A;
6956         case AUX_CH_B:
6957                 return POWER_DOMAIN_AUX_B;
6958         case AUX_CH_C:
6959                 return POWER_DOMAIN_AUX_C;
6960         case AUX_CH_D:
6961                 return POWER_DOMAIN_AUX_D;
6962         case AUX_CH_E:
6963                 return POWER_DOMAIN_AUX_E;
6964         case AUX_CH_F:
6965                 return POWER_DOMAIN_AUX_F;
6966         case AUX_CH_G:
6967                 return POWER_DOMAIN_AUX_G;
6968         default:
6969                 MISSING_CASE(dig_port->aux_ch);
6970                 return POWER_DOMAIN_AUX_A;
6971         }
6972 }
6973
6974 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6975 {
6976         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6977         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6978         struct drm_encoder *encoder;
6979         enum pipe pipe = crtc->pipe;
6980         u64 mask;
6981         enum transcoder transcoder = crtc_state->cpu_transcoder;
6982
6983         if (!crtc_state->hw.active)
6984                 return 0;
6985
6986         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6987         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6988         if (crtc_state->pch_pfit.enabled ||
6989             crtc_state->pch_pfit.force_thru)
6990                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6991
6992         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
6993                                   crtc_state->uapi.encoder_mask) {
6994                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6995
6996                 mask |= BIT_ULL(intel_encoder->power_domain);
6997         }
6998
6999         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7000                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7001
7002         if (crtc_state->shared_dpll)
7003                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7004
7005         return mask;
7006 }
7007
7008 static u64
7009 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7010 {
7011         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7012         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7013         enum intel_display_power_domain domain;
7014         u64 domains, new_domains, old_domains;
7015
7016         old_domains = crtc->enabled_power_domains;
7017         crtc->enabled_power_domains = new_domains =
7018                 get_crtc_power_domains(crtc_state);
7019
7020         domains = new_domains & ~old_domains;
7021
7022         for_each_power_domain(domain, domains)
7023                 intel_display_power_get(dev_priv, domain);
7024
7025         return old_domains & ~new_domains;
7026 }
7027
7028 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7029                                       u64 domains)
7030 {
7031         enum intel_display_power_domain domain;
7032
7033         for_each_power_domain(domain, domains)
7034                 intel_display_power_put_unchecked(dev_priv, domain);
7035 }
7036
7037 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
7038                                    struct intel_atomic_state *state)
7039 {
7040         struct drm_crtc *crtc = pipe_config->uapi.crtc;
7041         struct drm_device *dev = crtc->dev;
7042         struct drm_i915_private *dev_priv = to_i915(dev);
7043         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7044         enum pipe pipe = intel_crtc->pipe;
7045
7046         if (WARN_ON(intel_crtc->active))
7047                 return;
7048
7049         if (intel_crtc_has_dp_encoder(pipe_config))
7050                 intel_dp_set_m_n(pipe_config, M1_N1);
7051
7052         intel_set_pipe_timings(pipe_config);
7053         intel_set_pipe_src_size(pipe_config);
7054
7055         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7056                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7057                 I915_WRITE(CHV_CANVAS(pipe), 0);
7058         }
7059
7060         i9xx_set_pipeconf(pipe_config);
7061
7062         intel_crtc->active = true;
7063
7064         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7065
7066         intel_encoders_pre_pll_enable(state, intel_crtc);
7067
7068         if (IS_CHERRYVIEW(dev_priv)) {
7069                 chv_prepare_pll(intel_crtc, pipe_config);
7070                 chv_enable_pll(intel_crtc, pipe_config);
7071         } else {
7072                 vlv_prepare_pll(intel_crtc, pipe_config);
7073                 vlv_enable_pll(intel_crtc, pipe_config);
7074         }
7075
7076         intel_encoders_pre_enable(state, intel_crtc);
7077
7078         i9xx_pfit_enable(pipe_config);
7079
7080         intel_color_load_luts(pipe_config);
7081         intel_color_commit(pipe_config);
7082         /* update DSPCNTR to configure gamma for pipe bottom color */
7083         intel_disable_primary_plane(pipe_config);
7084
7085         dev_priv->display.initial_watermarks(state, pipe_config);
7086         intel_enable_pipe(pipe_config);
7087
7088         assert_vblank_disabled(crtc);
7089         intel_crtc_vblank_on(pipe_config);
7090
7091         intel_encoders_enable(state, intel_crtc);
7092 }
7093
7094 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7095 {
7096         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7097         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7098
7099         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
7100         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
7101 }
7102
7103 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
7104                              struct intel_atomic_state *state)
7105 {
7106         struct drm_crtc *crtc = pipe_config->uapi.crtc;
7107         struct drm_device *dev = crtc->dev;
7108         struct drm_i915_private *dev_priv = to_i915(dev);
7109         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7110         enum pipe pipe = intel_crtc->pipe;
7111
7112         if (WARN_ON(intel_crtc->active))
7113                 return;
7114
7115         i9xx_set_pll_dividers(pipe_config);
7116
7117         if (intel_crtc_has_dp_encoder(pipe_config))
7118                 intel_dp_set_m_n(pipe_config, M1_N1);
7119
7120         intel_set_pipe_timings(pipe_config);
7121         intel_set_pipe_src_size(pipe_config);
7122
7123         i9xx_set_pipeconf(pipe_config);
7124
7125         intel_crtc->active = true;
7126
7127         if (!IS_GEN(dev_priv, 2))
7128                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7129
7130         intel_encoders_pre_enable(state, intel_crtc);
7131
7132         i9xx_enable_pll(intel_crtc, pipe_config);
7133
7134         i9xx_pfit_enable(pipe_config);
7135
7136         intel_color_load_luts(pipe_config);
7137         intel_color_commit(pipe_config);
7138         /* update DSPCNTR to configure gamma for pipe bottom color */
7139         intel_disable_primary_plane(pipe_config);
7140
7141         if (dev_priv->display.initial_watermarks != NULL)
7142                 dev_priv->display.initial_watermarks(state,
7143                                                      pipe_config);
7144         else
7145                 intel_update_watermarks(intel_crtc);
7146         intel_enable_pipe(pipe_config);
7147
7148         assert_vblank_disabled(crtc);
7149         intel_crtc_vblank_on(pipe_config);
7150
7151         intel_encoders_enable(state, intel_crtc);
7152 }
7153
7154 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7155 {
7156         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7157         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7158
7159         if (!old_crtc_state->gmch_pfit.control)
7160                 return;
7161
7162         assert_pipe_disabled(dev_priv, crtc->pipe);
7163
7164         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
7165                       I915_READ(PFIT_CONTROL));
7166         I915_WRITE(PFIT_CONTROL, 0);
7167 }
7168
7169 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
7170                               struct intel_atomic_state *state)
7171 {
7172         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
7173         struct drm_device *dev = crtc->dev;
7174         struct drm_i915_private *dev_priv = to_i915(dev);
7175         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7176         enum pipe pipe = intel_crtc->pipe;
7177
7178         /*
7179          * On gen2 planes are double buffered but the pipe isn't, so we must
7180          * wait for planes to fully turn off before disabling the pipe.
7181          */
7182         if (IS_GEN(dev_priv, 2))
7183                 intel_wait_for_vblank(dev_priv, pipe);
7184
7185         intel_encoders_disable(state, intel_crtc);
7186
7187         drm_crtc_vblank_off(crtc);
7188         assert_vblank_disabled(crtc);
7189
7190         intel_disable_pipe(old_crtc_state);
7191
7192         i9xx_pfit_disable(old_crtc_state);
7193
7194         intel_encoders_post_disable(state, intel_crtc);
7195
7196         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7197                 if (IS_CHERRYVIEW(dev_priv))
7198                         chv_disable_pll(dev_priv, pipe);
7199                 else if (IS_VALLEYVIEW(dev_priv))
7200                         vlv_disable_pll(dev_priv, pipe);
7201                 else
7202                         i9xx_disable_pll(old_crtc_state);
7203         }
7204
7205         intel_encoders_post_pll_disable(state, intel_crtc);
7206
7207         if (!IS_GEN(dev_priv, 2))
7208                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7209
7210         if (!dev_priv->display.initial_watermarks)
7211                 intel_update_watermarks(intel_crtc);
7212
7213         /* clock the pipe down to 640x480@60 to potentially save power */
7214         if (IS_I830(dev_priv))
7215                 i830_enable_pipe(dev_priv, pipe);
7216 }
7217
7218 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
7219                                         struct drm_modeset_acquire_ctx *ctx)
7220 {
7221         struct intel_encoder *encoder;
7222         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7223         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
7224         struct intel_bw_state *bw_state =
7225                 to_intel_bw_state(dev_priv->bw_obj.state);
7226         struct intel_crtc_state *crtc_state =
7227                 to_intel_crtc_state(crtc->state);
7228         enum intel_display_power_domain domain;
7229         struct intel_plane *plane;
7230         u64 domains;
7231         struct drm_atomic_state *state;
7232         struct intel_crtc_state *temp_crtc_state;
7233         int ret;
7234
7235         if (!intel_crtc->active)
7236                 return;
7237
7238         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
7239                 const struct intel_plane_state *plane_state =
7240                         to_intel_plane_state(plane->base.state);
7241
7242                 if (plane_state->uapi.visible)
7243                         intel_plane_disable_noatomic(intel_crtc, plane);
7244         }
7245
7246         state = drm_atomic_state_alloc(crtc->dev);
7247         if (!state) {
7248                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7249                               crtc->base.id, crtc->name);
7250                 return;
7251         }
7252
7253         state->acquire_ctx = ctx;
7254
7255         /* Everything's already locked, -EDEADLK can't happen. */
7256         temp_crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7257         ret = drm_atomic_add_affected_connectors(state, crtc);
7258
7259         WARN_ON(IS_ERR(temp_crtc_state) || ret);
7260
7261         dev_priv->display.crtc_disable(temp_crtc_state, to_intel_atomic_state(state));
7262
7263         drm_atomic_state_put(state);
7264
7265         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7266                       crtc->base.id, crtc->name);
7267
7268         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
7269         crtc->state->active = false;
7270         intel_crtc->active = false;
7271         crtc->enabled = false;
7272         crtc->state->connector_mask = 0;
7273         crtc->state->encoder_mask = 0;
7274         intel_crtc_free_hw_state(crtc_state);
7275         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7276
7277         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
7278                 encoder->base.crtc = NULL;
7279
7280         intel_fbc_disable(intel_crtc);
7281         intel_update_watermarks(intel_crtc);
7282         intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
7283
7284         domains = intel_crtc->enabled_power_domains;
7285         for_each_power_domain(domain, domains)
7286                 intel_display_power_put_unchecked(dev_priv, domain);
7287         intel_crtc->enabled_power_domains = 0;
7288
7289         dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
7290         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
7291         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
7292
7293         bw_state->data_rate[intel_crtc->pipe] = 0;
7294         bw_state->num_active_planes[intel_crtc->pipe] = 0;
7295 }
7296
7297 /*
7298  * turn all crtc's off, but do not adjust state
7299  * This has to be paired with a call to intel_modeset_setup_hw_state.
7300  */
7301 int intel_display_suspend(struct drm_device *dev)
7302 {
7303         struct drm_i915_private *dev_priv = to_i915(dev);
7304         struct drm_atomic_state *state;
7305         int ret;
7306
7307         state = drm_atomic_helper_suspend(dev);
7308         ret = PTR_ERR_OR_ZERO(state);
7309         if (ret)
7310                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7311         else
7312                 dev_priv->modeset_restore_state = state;
7313         return ret;
7314 }
7315
7316 void intel_encoder_destroy(struct drm_encoder *encoder)
7317 {
7318         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7319
7320         drm_encoder_cleanup(encoder);
7321         kfree(intel_encoder);
7322 }
7323
7324 /* Cross check the actual hw state with our own modeset state tracking (and it's
7325  * internal consistency). */
7326 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7327                                          struct drm_connector_state *conn_state)
7328 {
7329         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7330
7331         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7332                       connector->base.base.id,
7333                       connector->base.name);
7334
7335         if (connector->get_hw_state(connector)) {
7336                 struct intel_encoder *encoder = connector->encoder;
7337
7338                 I915_STATE_WARN(!crtc_state,
7339                          "connector enabled without attached crtc\n");
7340
7341                 if (!crtc_state)
7342                         return;
7343
7344                 I915_STATE_WARN(!crtc_state->hw.active,
7345                                 "connector is active, but attached crtc isn't\n");
7346
7347                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7348                         return;
7349
7350                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7351                         "atomic encoder doesn't match attached encoder\n");
7352
7353                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7354                         "attached encoder crtc differs from connector crtc\n");
7355         } else {
7356                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7357                                 "attached crtc is active, but connector isn't\n");
7358                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7359                         "best encoder set without crtc!\n");
7360         }
7361 }
7362
7363 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7364 {
7365         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7366                 return crtc_state->fdi_lanes;
7367
7368         return 0;
7369 }
7370
7371 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7372                                      struct intel_crtc_state *pipe_config)
7373 {
7374         struct drm_i915_private *dev_priv = to_i915(dev);
7375         struct drm_atomic_state *state = pipe_config->uapi.state;
7376         struct intel_crtc *other_crtc;
7377         struct intel_crtc_state *other_crtc_state;
7378
7379         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7380                       pipe_name(pipe), pipe_config->fdi_lanes);
7381         if (pipe_config->fdi_lanes > 4) {
7382                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7383                               pipe_name(pipe), pipe_config->fdi_lanes);
7384                 return -EINVAL;
7385         }
7386
7387         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7388                 if (pipe_config->fdi_lanes > 2) {
7389                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7390                                       pipe_config->fdi_lanes);
7391                         return -EINVAL;
7392                 } else {
7393                         return 0;
7394                 }
7395         }
7396
7397         if (INTEL_NUM_PIPES(dev_priv) == 2)
7398                 return 0;
7399
7400         /* Ivybridge 3 pipe is really complicated */
7401         switch (pipe) {
7402         case PIPE_A:
7403                 return 0;
7404         case PIPE_B:
7405                 if (pipe_config->fdi_lanes <= 2)
7406                         return 0;
7407
7408                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7409                 other_crtc_state =
7410                         intel_atomic_get_crtc_state(state, other_crtc);
7411                 if (IS_ERR(other_crtc_state))
7412                         return PTR_ERR(other_crtc_state);
7413
7414                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7415                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7416                                       pipe_name(pipe), pipe_config->fdi_lanes);
7417                         return -EINVAL;
7418                 }
7419                 return 0;
7420         case PIPE_C:
7421                 if (pipe_config->fdi_lanes > 2) {
7422                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7423                                       pipe_name(pipe), pipe_config->fdi_lanes);
7424                         return -EINVAL;
7425                 }
7426
7427                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7428                 other_crtc_state =
7429                         intel_atomic_get_crtc_state(state, other_crtc);
7430                 if (IS_ERR(other_crtc_state))
7431                         return PTR_ERR(other_crtc_state);
7432
7433                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7434                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7435                         return -EINVAL;
7436                 }
7437                 return 0;
7438         default:
7439                 BUG();
7440         }
7441 }
7442
7443 #define RETRY 1
7444 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7445                                        struct intel_crtc_state *pipe_config)
7446 {
7447         struct drm_device *dev = intel_crtc->base.dev;
7448         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7449         int lane, link_bw, fdi_dotclock, ret;
7450         bool needs_recompute = false;
7451
7452 retry:
7453         /* FDI is a binary signal running at ~2.7GHz, encoding
7454          * each output octet as 10 bits. The actual frequency
7455          * is stored as a divider into a 100MHz clock, and the
7456          * mode pixel clock is stored in units of 1KHz.
7457          * Hence the bw of each lane in terms of the mode signal
7458          * is:
7459          */
7460         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7461
7462         fdi_dotclock = adjusted_mode->crtc_clock;
7463
7464         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7465                                            pipe_config->pipe_bpp);
7466
7467         pipe_config->fdi_lanes = lane;
7468
7469         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7470                                link_bw, &pipe_config->fdi_m_n, false, false);
7471
7472         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7473         if (ret == -EDEADLK)
7474                 return ret;
7475
7476         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7477                 pipe_config->pipe_bpp -= 2*3;
7478                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7479                               pipe_config->pipe_bpp);
7480                 needs_recompute = true;
7481                 pipe_config->bw_constrained = true;
7482
7483                 goto retry;
7484         }
7485
7486         if (needs_recompute)
7487                 return RETRY;
7488
7489         return ret;
7490 }
7491
7492 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7493 {
7494         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7495         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7496
7497         /* IPS only exists on ULT machines and is tied to pipe A. */
7498         if (!hsw_crtc_supports_ips(crtc))
7499                 return false;
7500
7501         if (!i915_modparams.enable_ips)
7502                 return false;
7503
7504         if (crtc_state->pipe_bpp > 24)
7505                 return false;
7506
7507         /*
7508          * We compare against max which means we must take
7509          * the increased cdclk requirement into account when
7510          * calculating the new cdclk.
7511          *
7512          * Should measure whether using a lower cdclk w/o IPS
7513          */
7514         if (IS_BROADWELL(dev_priv) &&
7515             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7516                 return false;
7517
7518         return true;
7519 }
7520
7521 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7522 {
7523         struct drm_i915_private *dev_priv =
7524                 to_i915(crtc_state->uapi.crtc->dev);
7525         struct intel_atomic_state *intel_state =
7526                 to_intel_atomic_state(crtc_state->uapi.state);
7527
7528         if (!hsw_crtc_state_ips_capable(crtc_state))
7529                 return false;
7530
7531         /*
7532          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7533          * enabled and disabled dynamically based on package C states,
7534          * user space can't make reliable use of the CRCs, so let's just
7535          * completely disable it.
7536          */
7537         if (crtc_state->crc_enabled)
7538                 return false;
7539
7540         /* IPS should be fine as long as at least one plane is enabled. */
7541         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7542                 return false;
7543
7544         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7545         if (IS_BROADWELL(dev_priv) &&
7546             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7547                 return false;
7548
7549         return true;
7550 }
7551
7552 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7553 {
7554         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7555
7556         /* GDG double wide on either pipe, otherwise pipe A only */
7557         return INTEL_GEN(dev_priv) < 4 &&
7558                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7559 }
7560
7561 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7562 {
7563         u32 pixel_rate;
7564
7565         pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7566
7567         /*
7568          * We only use IF-ID interlacing. If we ever use
7569          * PF-ID we'll need to adjust the pixel_rate here.
7570          */
7571
7572         if (pipe_config->pch_pfit.enabled) {
7573                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7574                 u32 pfit_size = pipe_config->pch_pfit.size;
7575
7576                 pipe_w = pipe_config->pipe_src_w;
7577                 pipe_h = pipe_config->pipe_src_h;
7578
7579                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7580                 pfit_h = pfit_size & 0xFFFF;
7581                 if (pipe_w < pfit_w)
7582                         pipe_w = pfit_w;
7583                 if (pipe_h < pfit_h)
7584                         pipe_h = pfit_h;
7585
7586                 if (WARN_ON(!pfit_w || !pfit_h))
7587                         return pixel_rate;
7588
7589                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7590                                      pfit_w * pfit_h);
7591         }
7592
7593         return pixel_rate;
7594 }
7595
7596 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7597 {
7598         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7599
7600         if (HAS_GMCH(dev_priv))
7601                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7602                 crtc_state->pixel_rate =
7603                         crtc_state->hw.adjusted_mode.crtc_clock;
7604         else
7605                 crtc_state->pixel_rate =
7606                         ilk_pipe_pixel_rate(crtc_state);
7607 }
7608
7609 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7610                                      struct intel_crtc_state *pipe_config)
7611 {
7612         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7613         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7614         int clock_limit = dev_priv->max_dotclk_freq;
7615
7616         if (INTEL_GEN(dev_priv) < 4) {
7617                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7618
7619                 /*
7620                  * Enable double wide mode when the dot clock
7621                  * is > 90% of the (display) core speed.
7622                  */
7623                 if (intel_crtc_supports_double_wide(crtc) &&
7624                     adjusted_mode->crtc_clock > clock_limit) {
7625                         clock_limit = dev_priv->max_dotclk_freq;
7626                         pipe_config->double_wide = true;
7627                 }
7628         }
7629
7630         if (adjusted_mode->crtc_clock > clock_limit) {
7631                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7632                               adjusted_mode->crtc_clock, clock_limit,
7633                               yesno(pipe_config->double_wide));
7634                 return -EINVAL;
7635         }
7636
7637         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7638              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7639              pipe_config->hw.ctm) {
7640                 /*
7641                  * There is only one pipe CSC unit per pipe, and we need that
7642                  * for output conversion from RGB->YCBCR. So if CTM is already
7643                  * applied we can't support YCBCR420 output.
7644                  */
7645                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7646                 return -EINVAL;
7647         }
7648
7649         /*
7650          * Pipe horizontal size must be even in:
7651          * - DVO ganged mode
7652          * - LVDS dual channel mode
7653          * - Double wide pipe
7654          */
7655         if (pipe_config->pipe_src_w & 1) {
7656                 if (pipe_config->double_wide) {
7657                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7658                         return -EINVAL;
7659                 }
7660
7661                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7662                     intel_is_dual_link_lvds(dev_priv)) {
7663                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7664                         return -EINVAL;
7665                 }
7666         }
7667
7668         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7669          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7670          */
7671         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7672                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7673                 return -EINVAL;
7674
7675         intel_crtc_compute_pixel_rate(pipe_config);
7676
7677         if (pipe_config->has_pch_encoder)
7678                 return ironlake_fdi_compute_config(crtc, pipe_config);
7679
7680         return 0;
7681 }
7682
7683 static void
7684 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7685 {
7686         while (*num > DATA_LINK_M_N_MASK ||
7687                *den > DATA_LINK_M_N_MASK) {
7688                 *num >>= 1;
7689                 *den >>= 1;
7690         }
7691 }
7692
7693 static void compute_m_n(unsigned int m, unsigned int n,
7694                         u32 *ret_m, u32 *ret_n,
7695                         bool constant_n)
7696 {
7697         /*
7698          * Several DP dongles in particular seem to be fussy about
7699          * too large link M/N values. Give N value as 0x8000 that
7700          * should be acceptable by specific devices. 0x8000 is the
7701          * specified fixed N value for asynchronous clock mode,
7702          * which the devices expect also in synchronous clock mode.
7703          */
7704         if (constant_n)
7705                 *ret_n = 0x8000;
7706         else
7707                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7708
7709         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7710         intel_reduce_m_n_ratio(ret_m, ret_n);
7711 }
7712
7713 void
7714 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7715                        int pixel_clock, int link_clock,
7716                        struct intel_link_m_n *m_n,
7717                        bool constant_n, bool fec_enable)
7718 {
7719         u32 data_clock = bits_per_pixel * pixel_clock;
7720
7721         if (fec_enable)
7722                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7723
7724         m_n->tu = 64;
7725         compute_m_n(data_clock,
7726                     link_clock * nlanes * 8,
7727                     &m_n->gmch_m, &m_n->gmch_n,
7728                     constant_n);
7729
7730         compute_m_n(pixel_clock, link_clock,
7731                     &m_n->link_m, &m_n->link_n,
7732                     constant_n);
7733 }
7734
7735 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7736 {
7737         /*
7738          * There may be no VBT; and if the BIOS enabled SSC we can
7739          * just keep using it to avoid unnecessary flicker.  Whereas if the
7740          * BIOS isn't using it, don't assume it will work even if the VBT
7741          * indicates as much.
7742          */
7743         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7744                 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
7745                         DREF_SSC1_ENABLE;
7746
7747                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7748                         DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
7749                                       enableddisabled(bios_lvds_use_ssc),
7750                                       enableddisabled(dev_priv->vbt.lvds_use_ssc));
7751                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7752                 }
7753         }
7754 }
7755
7756 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7757 {
7758         if (i915_modparams.panel_use_ssc >= 0)
7759                 return i915_modparams.panel_use_ssc != 0;
7760         return dev_priv->vbt.lvds_use_ssc
7761                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7762 }
7763
7764 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7765 {
7766         return (1 << dpll->n) << 16 | dpll->m2;
7767 }
7768
7769 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7770 {
7771         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7772 }
7773
7774 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7775                                      struct intel_crtc_state *crtc_state,
7776                                      struct dpll *reduced_clock)
7777 {
7778         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7779         u32 fp, fp2 = 0;
7780
7781         if (IS_PINEVIEW(dev_priv)) {
7782                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7783                 if (reduced_clock)
7784                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7785         } else {
7786                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7787                 if (reduced_clock)
7788                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7789         }
7790
7791         crtc_state->dpll_hw_state.fp0 = fp;
7792
7793         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7794             reduced_clock) {
7795                 crtc_state->dpll_hw_state.fp1 = fp2;
7796         } else {
7797                 crtc_state->dpll_hw_state.fp1 = fp;
7798         }
7799 }
7800
7801 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7802                 pipe)
7803 {
7804         u32 reg_val;
7805
7806         /*
7807          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7808          * and set it to a reasonable value instead.
7809          */
7810         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7811         reg_val &= 0xffffff00;
7812         reg_val |= 0x00000030;
7813         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7814
7815         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7816         reg_val &= 0x00ffffff;
7817         reg_val |= 0x8c000000;
7818         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7819
7820         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7821         reg_val &= 0xffffff00;
7822         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7823
7824         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7825         reg_val &= 0x00ffffff;
7826         reg_val |= 0xb0000000;
7827         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7828 }
7829
7830 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7831                                          const struct intel_link_m_n *m_n)
7832 {
7833         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7834         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7835         enum pipe pipe = crtc->pipe;
7836
7837         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7838         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7839         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7840         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7841 }
7842
7843 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7844                                  enum transcoder transcoder)
7845 {
7846         if (IS_HASWELL(dev_priv))
7847                 return transcoder == TRANSCODER_EDP;
7848
7849         /*
7850          * Strictly speaking some registers are available before
7851          * gen7, but we only support DRRS on gen7+
7852          */
7853         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7854 }
7855
7856 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7857                                          const struct intel_link_m_n *m_n,
7858                                          const struct intel_link_m_n *m2_n2)
7859 {
7860         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7861         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7862         enum pipe pipe = crtc->pipe;
7863         enum transcoder transcoder = crtc_state->cpu_transcoder;
7864
7865         if (INTEL_GEN(dev_priv) >= 5) {
7866                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7867                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7868                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7869                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7870                 /*
7871                  *  M2_N2 registers are set only if DRRS is supported
7872                  * (to make sure the registers are not unnecessarily accessed).
7873                  */
7874                 if (m2_n2 && crtc_state->has_drrs &&
7875                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7876                         I915_WRITE(PIPE_DATA_M2(transcoder),
7877                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7878                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7879                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7880                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7881                 }
7882         } else {
7883                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7884                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7885                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7886                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7887         }
7888 }
7889
7890 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7891 {
7892         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7893
7894         if (m_n == M1_N1) {
7895                 dp_m_n = &crtc_state->dp_m_n;
7896                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7897         } else if (m_n == M2_N2) {
7898
7899                 /*
7900                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7901                  * needs to be programmed into M1_N1.
7902                  */
7903                 dp_m_n = &crtc_state->dp_m2_n2;
7904         } else {
7905                 DRM_ERROR("Unsupported divider value\n");
7906                 return;
7907         }
7908
7909         if (crtc_state->has_pch_encoder)
7910                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7911         else
7912                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7913 }
7914
7915 static void vlv_compute_dpll(struct intel_crtc *crtc,
7916                              struct intel_crtc_state *pipe_config)
7917 {
7918         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7919                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7920         if (crtc->pipe != PIPE_A)
7921                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7922
7923         /* DPLL not used with DSI, but still need the rest set up */
7924         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7925                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7926                         DPLL_EXT_BUFFER_ENABLE_VLV;
7927
7928         pipe_config->dpll_hw_state.dpll_md =
7929                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7930 }
7931
7932 static void chv_compute_dpll(struct intel_crtc *crtc,
7933                              struct intel_crtc_state *pipe_config)
7934 {
7935         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7936                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7937         if (crtc->pipe != PIPE_A)
7938                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7939
7940         /* DPLL not used with DSI, but still need the rest set up */
7941         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7942                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7943
7944         pipe_config->dpll_hw_state.dpll_md =
7945                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7946 }
7947
7948 static void vlv_prepare_pll(struct intel_crtc *crtc,
7949                             const struct intel_crtc_state *pipe_config)
7950 {
7951         struct drm_device *dev = crtc->base.dev;
7952         struct drm_i915_private *dev_priv = to_i915(dev);
7953         enum pipe pipe = crtc->pipe;
7954         u32 mdiv;
7955         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7956         u32 coreclk, reg_val;
7957
7958         /* Enable Refclk */
7959         I915_WRITE(DPLL(pipe),
7960                    pipe_config->dpll_hw_state.dpll &
7961                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7962
7963         /* No need to actually set up the DPLL with DSI */
7964         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7965                 return;
7966
7967         vlv_dpio_get(dev_priv);
7968
7969         bestn = pipe_config->dpll.n;
7970         bestm1 = pipe_config->dpll.m1;
7971         bestm2 = pipe_config->dpll.m2;
7972         bestp1 = pipe_config->dpll.p1;
7973         bestp2 = pipe_config->dpll.p2;
7974
7975         /* See eDP HDMI DPIO driver vbios notes doc */
7976
7977         /* PLL B needs special handling */
7978         if (pipe == PIPE_B)
7979                 vlv_pllb_recal_opamp(dev_priv, pipe);
7980
7981         /* Set up Tx target for periodic Rcomp update */
7982         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7983
7984         /* Disable target IRef on PLL */
7985         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7986         reg_val &= 0x00ffffff;
7987         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7988
7989         /* Disable fast lock */
7990         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7991
7992         /* Set idtafcrecal before PLL is enabled */
7993         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7994         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7995         mdiv |= ((bestn << DPIO_N_SHIFT));
7996         mdiv |= (1 << DPIO_K_SHIFT);
7997
7998         /*
7999          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8000          * but we don't support that).
8001          * Note: don't use the DAC post divider as it seems unstable.
8002          */
8003         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8004         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8005
8006         mdiv |= DPIO_ENABLE_CALIBRATION;
8007         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8008
8009         /* Set HBR and RBR LPF coefficients */
8010         if (pipe_config->port_clock == 162000 ||
8011             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8012             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8013                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8014                                  0x009f0003);
8015         else
8016                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8017                                  0x00d0000f);
8018
8019         if (intel_crtc_has_dp_encoder(pipe_config)) {
8020                 /* Use SSC source */
8021                 if (pipe == PIPE_A)
8022                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8023                                          0x0df40000);
8024                 else
8025                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8026                                          0x0df70000);
8027         } else { /* HDMI or VGA */
8028                 /* Use bend source */
8029                 if (pipe == PIPE_A)
8030                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8031                                          0x0df70000);
8032                 else
8033                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8034                                          0x0df40000);
8035         }
8036
8037         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8038         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8039         if (intel_crtc_has_dp_encoder(pipe_config))
8040                 coreclk |= 0x01000000;
8041         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8042
8043         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8044
8045         vlv_dpio_put(dev_priv);
8046 }
8047
8048 static void chv_prepare_pll(struct intel_crtc *crtc,
8049                             const struct intel_crtc_state *pipe_config)
8050 {
8051         struct drm_device *dev = crtc->base.dev;
8052         struct drm_i915_private *dev_priv = to_i915(dev);
8053         enum pipe pipe = crtc->pipe;
8054         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8055         u32 loopfilter, tribuf_calcntr;
8056         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8057         u32 dpio_val;
8058         int vco;
8059
8060         /* Enable Refclk and SSC */
8061         I915_WRITE(DPLL(pipe),
8062                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8063
8064         /* No need to actually set up the DPLL with DSI */
8065         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8066                 return;
8067
8068         bestn = pipe_config->dpll.n;
8069         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8070         bestm1 = pipe_config->dpll.m1;
8071         bestm2 = pipe_config->dpll.m2 >> 22;
8072         bestp1 = pipe_config->dpll.p1;
8073         bestp2 = pipe_config->dpll.p2;
8074         vco = pipe_config->dpll.vco;
8075         dpio_val = 0;
8076         loopfilter = 0;
8077
8078         vlv_dpio_get(dev_priv);
8079
8080         /* p1 and p2 divider */
8081         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8082                         5 << DPIO_CHV_S1_DIV_SHIFT |
8083                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8084                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8085                         1 << DPIO_CHV_K_DIV_SHIFT);
8086
8087         /* Feedback post-divider - m2 */
8088         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8089
8090         /* Feedback refclk divider - n and m1 */
8091         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8092                         DPIO_CHV_M1_DIV_BY_2 |
8093                         1 << DPIO_CHV_N_DIV_SHIFT);
8094
8095         /* M2 fraction division */
8096         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8097
8098         /* M2 fraction division enable */
8099         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8100         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8101         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8102         if (bestm2_frac)
8103                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8104         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8105
8106         /* Program digital lock detect threshold */
8107         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8108         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8109                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8110         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8111         if (!bestm2_frac)
8112                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8113         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8114
8115         /* Loop filter */
8116         if (vco == 5400000) {
8117                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8118                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8119                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8120                 tribuf_calcntr = 0x9;
8121         } else if (vco <= 6200000) {
8122                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8123                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8124                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8125                 tribuf_calcntr = 0x9;
8126         } else if (vco <= 6480000) {
8127                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8128                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8129                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8130                 tribuf_calcntr = 0x8;
8131         } else {
8132                 /* Not supported. Apply the same limits as in the max case */
8133                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8134                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8135                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8136                 tribuf_calcntr = 0;
8137         }
8138         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8139
8140         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8141         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8142         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8143         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8144
8145         /* AFC Recal */
8146         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8147                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8148                         DPIO_AFC_RECAL);
8149
8150         vlv_dpio_put(dev_priv);
8151 }
8152
8153 /**
8154  * vlv_force_pll_on - forcibly enable just the PLL
8155  * @dev_priv: i915 private structure
8156  * @pipe: pipe PLL to enable
8157  * @dpll: PLL configuration
8158  *
8159  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8160  * in cases where we need the PLL enabled even when @pipe is not going to
8161  * be enabled.
8162  */
8163 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8164                      const struct dpll *dpll)
8165 {
8166         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8167         struct intel_crtc_state *pipe_config;
8168
8169         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8170         if (!pipe_config)
8171                 return -ENOMEM;
8172
8173         pipe_config->uapi.crtc = &crtc->base;
8174         pipe_config->pixel_multiplier = 1;
8175         pipe_config->dpll = *dpll;
8176
8177         if (IS_CHERRYVIEW(dev_priv)) {
8178                 chv_compute_dpll(crtc, pipe_config);
8179                 chv_prepare_pll(crtc, pipe_config);
8180                 chv_enable_pll(crtc, pipe_config);
8181         } else {
8182                 vlv_compute_dpll(crtc, pipe_config);
8183                 vlv_prepare_pll(crtc, pipe_config);
8184                 vlv_enable_pll(crtc, pipe_config);
8185         }
8186
8187         kfree(pipe_config);
8188
8189         return 0;
8190 }
8191
8192 /**
8193  * vlv_force_pll_off - forcibly disable just the PLL
8194  * @dev_priv: i915 private structure
8195  * @pipe: pipe PLL to disable
8196  *
8197  * Disable the PLL for @pipe. To be used in cases where we need
8198  * the PLL enabled even when @pipe is not going to be enabled.
8199  */
8200 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8201 {
8202         if (IS_CHERRYVIEW(dev_priv))
8203                 chv_disable_pll(dev_priv, pipe);
8204         else
8205                 vlv_disable_pll(dev_priv, pipe);
8206 }
8207
8208 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8209                               struct intel_crtc_state *crtc_state,
8210                               struct dpll *reduced_clock)
8211 {
8212         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8213         u32 dpll;
8214         struct dpll *clock = &crtc_state->dpll;
8215
8216         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8217
8218         dpll = DPLL_VGA_MODE_DIS;
8219
8220         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8221                 dpll |= DPLLB_MODE_LVDS;
8222         else
8223                 dpll |= DPLLB_MODE_DAC_SERIAL;
8224
8225         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8226             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8227                 dpll |= (crtc_state->pixel_multiplier - 1)
8228                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8229         }
8230
8231         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8232             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8233                 dpll |= DPLL_SDVO_HIGH_SPEED;
8234
8235         if (intel_crtc_has_dp_encoder(crtc_state))
8236                 dpll |= DPLL_SDVO_HIGH_SPEED;
8237
8238         /* compute bitmask from p1 value */
8239         if (IS_PINEVIEW(dev_priv))
8240                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8241         else {
8242                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8243                 if (IS_G4X(dev_priv) && reduced_clock)
8244                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8245         }
8246         switch (clock->p2) {
8247         case 5:
8248                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8249                 break;
8250         case 7:
8251                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8252                 break;
8253         case 10:
8254                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8255                 break;
8256         case 14:
8257                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8258                 break;
8259         }
8260         if (INTEL_GEN(dev_priv) >= 4)
8261                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8262
8263         if (crtc_state->sdvo_tv_clock)
8264                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8265         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8266                  intel_panel_use_ssc(dev_priv))
8267                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8268         else
8269                 dpll |= PLL_REF_INPUT_DREFCLK;
8270
8271         dpll |= DPLL_VCO_ENABLE;
8272         crtc_state->dpll_hw_state.dpll = dpll;
8273
8274         if (INTEL_GEN(dev_priv) >= 4) {
8275                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8276                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8277                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8278         }
8279 }
8280
8281 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8282                               struct intel_crtc_state *crtc_state,
8283                               struct dpll *reduced_clock)
8284 {
8285         struct drm_device *dev = crtc->base.dev;
8286         struct drm_i915_private *dev_priv = to_i915(dev);
8287         u32 dpll;
8288         struct dpll *clock = &crtc_state->dpll;
8289
8290         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8291
8292         dpll = DPLL_VGA_MODE_DIS;
8293
8294         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8295                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8296         } else {
8297                 if (clock->p1 == 2)
8298                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8299                 else
8300                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8301                 if (clock->p2 == 4)
8302                         dpll |= PLL_P2_DIVIDE_BY_4;
8303         }
8304
8305         /*
8306          * Bspec:
8307          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8308          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8309          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8310          *  Enable) must be set to “1” in both the DPLL A Control Register
8311          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8312          *
8313          * For simplicity We simply keep both bits always enabled in
8314          * both DPLLS. The spec says we should disable the DVO 2X clock
8315          * when not needed, but this seems to work fine in practice.
8316          */
8317         if (IS_I830(dev_priv) ||
8318             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8319                 dpll |= DPLL_DVO_2X_MODE;
8320
8321         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8322             intel_panel_use_ssc(dev_priv))
8323                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8324         else
8325                 dpll |= PLL_REF_INPUT_DREFCLK;
8326
8327         dpll |= DPLL_VCO_ENABLE;
8328         crtc_state->dpll_hw_state.dpll = dpll;
8329 }
8330
8331 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8332 {
8333         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8334         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8335         enum pipe pipe = crtc->pipe;
8336         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8337         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8338         u32 crtc_vtotal, crtc_vblank_end;
8339         int vsyncshift = 0;
8340
8341         /* We need to be careful not to changed the adjusted mode, for otherwise
8342          * the hw state checker will get angry at the mismatch. */
8343         crtc_vtotal = adjusted_mode->crtc_vtotal;
8344         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8345
8346         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8347                 /* the chip adds 2 halflines automatically */
8348                 crtc_vtotal -= 1;
8349                 crtc_vblank_end -= 1;
8350
8351                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8352                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8353                 else
8354                         vsyncshift = adjusted_mode->crtc_hsync_start -
8355                                 adjusted_mode->crtc_htotal / 2;
8356                 if (vsyncshift < 0)
8357                         vsyncshift += adjusted_mode->crtc_htotal;
8358         }
8359
8360         if (INTEL_GEN(dev_priv) > 3)
8361                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8362
8363         I915_WRITE(HTOTAL(cpu_transcoder),
8364                    (adjusted_mode->crtc_hdisplay - 1) |
8365                    ((adjusted_mode->crtc_htotal - 1) << 16));
8366         I915_WRITE(HBLANK(cpu_transcoder),
8367                    (adjusted_mode->crtc_hblank_start - 1) |
8368                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
8369         I915_WRITE(HSYNC(cpu_transcoder),
8370                    (adjusted_mode->crtc_hsync_start - 1) |
8371                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
8372
8373         I915_WRITE(VTOTAL(cpu_transcoder),
8374                    (adjusted_mode->crtc_vdisplay - 1) |
8375                    ((crtc_vtotal - 1) << 16));
8376         I915_WRITE(VBLANK(cpu_transcoder),
8377                    (adjusted_mode->crtc_vblank_start - 1) |
8378                    ((crtc_vblank_end - 1) << 16));
8379         I915_WRITE(VSYNC(cpu_transcoder),
8380                    (adjusted_mode->crtc_vsync_start - 1) |
8381                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
8382
8383         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8384          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8385          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8386          * bits. */
8387         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8388             (pipe == PIPE_B || pipe == PIPE_C))
8389                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8390
8391 }
8392
8393 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8394 {
8395         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8396         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8397         enum pipe pipe = crtc->pipe;
8398
8399         /* pipesrc controls the size that is scaled from, which should
8400          * always be the user's requested size.
8401          */
8402         I915_WRITE(PIPESRC(pipe),
8403                    ((crtc_state->pipe_src_w - 1) << 16) |
8404                    (crtc_state->pipe_src_h - 1));
8405 }
8406
8407 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8408 {
8409         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8410         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8411
8412         if (IS_GEN(dev_priv, 2))
8413                 return false;
8414
8415         if (INTEL_GEN(dev_priv) >= 9 ||
8416             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8417                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8418         else
8419                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8420 }
8421
8422 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8423                                    struct intel_crtc_state *pipe_config)
8424 {
8425         struct drm_device *dev = crtc->base.dev;
8426         struct drm_i915_private *dev_priv = to_i915(dev);
8427         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8428         u32 tmp;
8429
8430         tmp = I915_READ(HTOTAL(cpu_transcoder));
8431         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8432         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8433
8434         if (!transcoder_is_dsi(cpu_transcoder)) {
8435                 tmp = I915_READ(HBLANK(cpu_transcoder));
8436                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8437                                                         (tmp & 0xffff) + 1;
8438                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8439                                                 ((tmp >> 16) & 0xffff) + 1;
8440         }
8441         tmp = I915_READ(HSYNC(cpu_transcoder));
8442         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8443         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8444
8445         tmp = I915_READ(VTOTAL(cpu_transcoder));
8446         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8447         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8448
8449         if (!transcoder_is_dsi(cpu_transcoder)) {
8450                 tmp = I915_READ(VBLANK(cpu_transcoder));
8451                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8452                                                         (tmp & 0xffff) + 1;
8453                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8454                                                 ((tmp >> 16) & 0xffff) + 1;
8455         }
8456         tmp = I915_READ(VSYNC(cpu_transcoder));
8457         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8458         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8459
8460         if (intel_pipe_is_interlaced(pipe_config)) {
8461                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8462                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8463                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8464         }
8465 }
8466
8467 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8468                                     struct intel_crtc_state *pipe_config)
8469 {
8470         struct drm_device *dev = crtc->base.dev;
8471         struct drm_i915_private *dev_priv = to_i915(dev);
8472         u32 tmp;
8473
8474         tmp = I915_READ(PIPESRC(crtc->pipe));
8475         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8476         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8477
8478         pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8479         pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8480 }
8481
8482 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8483                                  struct intel_crtc_state *pipe_config)
8484 {
8485         mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8486         mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8487         mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8488         mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8489
8490         mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8491         mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8492         mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8493         mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8494
8495         mode->flags = pipe_config->hw.adjusted_mode.flags;
8496         mode->type = DRM_MODE_TYPE_DRIVER;
8497
8498         mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8499
8500         mode->hsync = drm_mode_hsync(mode);
8501         mode->vrefresh = drm_mode_vrefresh(mode);
8502         drm_mode_set_name(mode);
8503 }
8504
8505 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8506 {
8507         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8508         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8509         u32 pipeconf;
8510
8511         pipeconf = 0;
8512
8513         /* we keep both pipes enabled on 830 */
8514         if (IS_I830(dev_priv))
8515                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8516
8517         if (crtc_state->double_wide)
8518                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8519
8520         /* only g4x and later have fancy bpc/dither controls */
8521         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8522             IS_CHERRYVIEW(dev_priv)) {
8523                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8524                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8525                         pipeconf |= PIPECONF_DITHER_EN |
8526                                     PIPECONF_DITHER_TYPE_SP;
8527
8528                 switch (crtc_state->pipe_bpp) {
8529                 case 18:
8530                         pipeconf |= PIPECONF_6BPC;
8531                         break;
8532                 case 24:
8533                         pipeconf |= PIPECONF_8BPC;
8534                         break;
8535                 case 30:
8536                         pipeconf |= PIPECONF_10BPC;
8537                         break;
8538                 default:
8539                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8540                         BUG();
8541                 }
8542         }
8543
8544         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8545                 if (INTEL_GEN(dev_priv) < 4 ||
8546                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8547                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8548                 else
8549                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8550         } else {
8551                 pipeconf |= PIPECONF_PROGRESSIVE;
8552         }
8553
8554         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8555              crtc_state->limited_color_range)
8556                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8557
8558         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8559
8560         pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8561
8562         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8563         POSTING_READ(PIPECONF(crtc->pipe));
8564 }
8565
8566 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8567                                    struct intel_crtc_state *crtc_state)
8568 {
8569         struct drm_device *dev = crtc->base.dev;
8570         struct drm_i915_private *dev_priv = to_i915(dev);
8571         const struct intel_limit *limit;
8572         int refclk = 48000;
8573
8574         memset(&crtc_state->dpll_hw_state, 0,
8575                sizeof(crtc_state->dpll_hw_state));
8576
8577         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8578                 if (intel_panel_use_ssc(dev_priv)) {
8579                         refclk = dev_priv->vbt.lvds_ssc_freq;
8580                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8581                 }
8582
8583                 limit = &intel_limits_i8xx_lvds;
8584         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8585                 limit = &intel_limits_i8xx_dvo;
8586         } else {
8587                 limit = &intel_limits_i8xx_dac;
8588         }
8589
8590         if (!crtc_state->clock_set &&
8591             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8592                                  refclk, NULL, &crtc_state->dpll)) {
8593                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8594                 return -EINVAL;
8595         }
8596
8597         i8xx_compute_dpll(crtc, crtc_state, NULL);
8598
8599         return 0;
8600 }
8601
8602 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8603                                   struct intel_crtc_state *crtc_state)
8604 {
8605         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8606         const struct intel_limit *limit;
8607         int refclk = 96000;
8608
8609         memset(&crtc_state->dpll_hw_state, 0,
8610                sizeof(crtc_state->dpll_hw_state));
8611
8612         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8613                 if (intel_panel_use_ssc(dev_priv)) {
8614                         refclk = dev_priv->vbt.lvds_ssc_freq;
8615                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8616                 }
8617
8618                 if (intel_is_dual_link_lvds(dev_priv))
8619                         limit = &intel_limits_g4x_dual_channel_lvds;
8620                 else
8621                         limit = &intel_limits_g4x_single_channel_lvds;
8622         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8623                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8624                 limit = &intel_limits_g4x_hdmi;
8625         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8626                 limit = &intel_limits_g4x_sdvo;
8627         } else {
8628                 /* The option is for other outputs */
8629                 limit = &intel_limits_i9xx_sdvo;
8630         }
8631
8632         if (!crtc_state->clock_set &&
8633             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8634                                 refclk, NULL, &crtc_state->dpll)) {
8635                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8636                 return -EINVAL;
8637         }
8638
8639         i9xx_compute_dpll(crtc, crtc_state, NULL);
8640
8641         return 0;
8642 }
8643
8644 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8645                                   struct intel_crtc_state *crtc_state)
8646 {
8647         struct drm_device *dev = crtc->base.dev;
8648         struct drm_i915_private *dev_priv = to_i915(dev);
8649         const struct intel_limit *limit;
8650         int refclk = 96000;
8651
8652         memset(&crtc_state->dpll_hw_state, 0,
8653                sizeof(crtc_state->dpll_hw_state));
8654
8655         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8656                 if (intel_panel_use_ssc(dev_priv)) {
8657                         refclk = dev_priv->vbt.lvds_ssc_freq;
8658                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8659                 }
8660
8661                 limit = &intel_limits_pineview_lvds;
8662         } else {
8663                 limit = &intel_limits_pineview_sdvo;
8664         }
8665
8666         if (!crtc_state->clock_set &&
8667             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8668                                 refclk, NULL, &crtc_state->dpll)) {
8669                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8670                 return -EINVAL;
8671         }
8672
8673         i9xx_compute_dpll(crtc, crtc_state, NULL);
8674
8675         return 0;
8676 }
8677
8678 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8679                                    struct intel_crtc_state *crtc_state)
8680 {
8681         struct drm_device *dev = crtc->base.dev;
8682         struct drm_i915_private *dev_priv = to_i915(dev);
8683         const struct intel_limit *limit;
8684         int refclk = 96000;
8685
8686         memset(&crtc_state->dpll_hw_state, 0,
8687                sizeof(crtc_state->dpll_hw_state));
8688
8689         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8690                 if (intel_panel_use_ssc(dev_priv)) {
8691                         refclk = dev_priv->vbt.lvds_ssc_freq;
8692                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8693                 }
8694
8695                 limit = &intel_limits_i9xx_lvds;
8696         } else {
8697                 limit = &intel_limits_i9xx_sdvo;
8698         }
8699
8700         if (!crtc_state->clock_set &&
8701             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8702                                  refclk, NULL, &crtc_state->dpll)) {
8703                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8704                 return -EINVAL;
8705         }
8706
8707         i9xx_compute_dpll(crtc, crtc_state, NULL);
8708
8709         return 0;
8710 }
8711
8712 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8713                                   struct intel_crtc_state *crtc_state)
8714 {
8715         int refclk = 100000;
8716         const struct intel_limit *limit = &intel_limits_chv;
8717
8718         memset(&crtc_state->dpll_hw_state, 0,
8719                sizeof(crtc_state->dpll_hw_state));
8720
8721         if (!crtc_state->clock_set &&
8722             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8723                                 refclk, NULL, &crtc_state->dpll)) {
8724                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8725                 return -EINVAL;
8726         }
8727
8728         chv_compute_dpll(crtc, crtc_state);
8729
8730         return 0;
8731 }
8732
8733 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8734                                   struct intel_crtc_state *crtc_state)
8735 {
8736         int refclk = 100000;
8737         const struct intel_limit *limit = &intel_limits_vlv;
8738
8739         memset(&crtc_state->dpll_hw_state, 0,
8740                sizeof(crtc_state->dpll_hw_state));
8741
8742         if (!crtc_state->clock_set &&
8743             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8744                                 refclk, NULL, &crtc_state->dpll)) {
8745                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8746                 return -EINVAL;
8747         }
8748
8749         vlv_compute_dpll(crtc, crtc_state);
8750
8751         return 0;
8752 }
8753
8754 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8755 {
8756         if (IS_I830(dev_priv))
8757                 return false;
8758
8759         return INTEL_GEN(dev_priv) >= 4 ||
8760                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8761 }
8762
8763 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8764                                  struct intel_crtc_state *pipe_config)
8765 {
8766         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8767         u32 tmp;
8768
8769         if (!i9xx_has_pfit(dev_priv))
8770                 return;
8771
8772         tmp = I915_READ(PFIT_CONTROL);
8773         if (!(tmp & PFIT_ENABLE))
8774                 return;
8775
8776         /* Check whether the pfit is attached to our pipe. */
8777         if (INTEL_GEN(dev_priv) < 4) {
8778                 if (crtc->pipe != PIPE_B)
8779                         return;
8780         } else {
8781                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8782                         return;
8783         }
8784
8785         pipe_config->gmch_pfit.control = tmp;
8786         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8787 }
8788
8789 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8790                                struct intel_crtc_state *pipe_config)
8791 {
8792         struct drm_device *dev = crtc->base.dev;
8793         struct drm_i915_private *dev_priv = to_i915(dev);
8794         enum pipe pipe = crtc->pipe;
8795         struct dpll clock;
8796         u32 mdiv;
8797         int refclk = 100000;
8798
8799         /* In case of DSI, DPLL will not be used */
8800         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8801                 return;
8802
8803         vlv_dpio_get(dev_priv);
8804         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8805         vlv_dpio_put(dev_priv);
8806
8807         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8808         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8809         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8810         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8811         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8812
8813         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8814 }
8815
8816 static void
8817 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8818                               struct intel_initial_plane_config *plane_config)
8819 {
8820         struct drm_device *dev = crtc->base.dev;
8821         struct drm_i915_private *dev_priv = to_i915(dev);
8822         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8823         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8824         enum pipe pipe;
8825         u32 val, base, offset;
8826         int fourcc, pixel_format;
8827         unsigned int aligned_height;
8828         struct drm_framebuffer *fb;
8829         struct intel_framebuffer *intel_fb;
8830
8831         if (!plane->get_hw_state(plane, &pipe))
8832                 return;
8833
8834         WARN_ON(pipe != crtc->pipe);
8835
8836         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8837         if (!intel_fb) {
8838                 DRM_DEBUG_KMS("failed to alloc fb\n");
8839                 return;
8840         }
8841
8842         fb = &intel_fb->base;
8843
8844         fb->dev = dev;
8845
8846         val = I915_READ(DSPCNTR(i9xx_plane));
8847
8848         if (INTEL_GEN(dev_priv) >= 4) {
8849                 if (val & DISPPLANE_TILED) {
8850                         plane_config->tiling = I915_TILING_X;
8851                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8852                 }
8853
8854                 if (val & DISPPLANE_ROTATE_180)
8855                         plane_config->rotation = DRM_MODE_ROTATE_180;
8856         }
8857
8858         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8859             val & DISPPLANE_MIRROR)
8860                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8861
8862         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8863         fourcc = i9xx_format_to_fourcc(pixel_format);
8864         fb->format = drm_format_info(fourcc);
8865
8866         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8867                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8868                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8869         } else if (INTEL_GEN(dev_priv) >= 4) {
8870                 if (plane_config->tiling)
8871                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8872                 else
8873                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8874                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8875         } else {
8876                 base = I915_READ(DSPADDR(i9xx_plane));
8877         }
8878         plane_config->base = base;
8879
8880         val = I915_READ(PIPESRC(pipe));
8881         fb->width = ((val >> 16) & 0xfff) + 1;
8882         fb->height = ((val >> 0) & 0xfff) + 1;
8883
8884         val = I915_READ(DSPSTRIDE(i9xx_plane));
8885         fb->pitches[0] = val & 0xffffffc0;
8886
8887         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8888
8889         plane_config->size = fb->pitches[0] * aligned_height;
8890
8891         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8892                       crtc->base.name, plane->base.name, fb->width, fb->height,
8893                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8894                       plane_config->size);
8895
8896         plane_config->fb = intel_fb;
8897 }
8898
8899 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8900                                struct intel_crtc_state *pipe_config)
8901 {
8902         struct drm_device *dev = crtc->base.dev;
8903         struct drm_i915_private *dev_priv = to_i915(dev);
8904         enum pipe pipe = crtc->pipe;
8905         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8906         struct dpll clock;
8907         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8908         int refclk = 100000;
8909
8910         /* In case of DSI, DPLL will not be used */
8911         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8912                 return;
8913
8914         vlv_dpio_get(dev_priv);
8915         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8916         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8917         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8918         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8919         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8920         vlv_dpio_put(dev_priv);
8921
8922         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8923         clock.m2 = (pll_dw0 & 0xff) << 22;
8924         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8925                 clock.m2 |= pll_dw2 & 0x3fffff;
8926         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8927         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8928         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8929
8930         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8931 }
8932
8933 static enum intel_output_format
8934 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
8935 {
8936         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8937         u32 tmp;
8938
8939         tmp = I915_READ(PIPEMISC(crtc->pipe));
8940
8941         if (tmp & PIPEMISC_YUV420_ENABLE) {
8942                 /* We support 4:2:0 in full blend mode only */
8943                 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
8944
8945                 return INTEL_OUTPUT_FORMAT_YCBCR420;
8946         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8947                 return INTEL_OUTPUT_FORMAT_YCBCR444;
8948         } else {
8949                 return INTEL_OUTPUT_FORMAT_RGB;
8950         }
8951 }
8952
8953 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8954 {
8955         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8956         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8957         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8958         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8959         u32 tmp;
8960
8961         tmp = I915_READ(DSPCNTR(i9xx_plane));
8962
8963         if (tmp & DISPPLANE_GAMMA_ENABLE)
8964                 crtc_state->gamma_enable = true;
8965
8966         if (!HAS_GMCH(dev_priv) &&
8967             tmp & DISPPLANE_PIPE_CSC_ENABLE)
8968                 crtc_state->csc_enable = true;
8969 }
8970
8971 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8972                                  struct intel_crtc_state *pipe_config)
8973 {
8974         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8975         enum intel_display_power_domain power_domain;
8976         intel_wakeref_t wakeref;
8977         u32 tmp;
8978         bool ret;
8979
8980         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8981         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8982         if (!wakeref)
8983                 return false;
8984
8985         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8986         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8987         pipe_config->shared_dpll = NULL;
8988         pipe_config->master_transcoder = INVALID_TRANSCODER;
8989
8990         ret = false;
8991
8992         tmp = I915_READ(PIPECONF(crtc->pipe));
8993         if (!(tmp & PIPECONF_ENABLE))
8994                 goto out;
8995
8996         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8997             IS_CHERRYVIEW(dev_priv)) {
8998                 switch (tmp & PIPECONF_BPC_MASK) {
8999                 case PIPECONF_6BPC:
9000                         pipe_config->pipe_bpp = 18;
9001                         break;
9002                 case PIPECONF_8BPC:
9003                         pipe_config->pipe_bpp = 24;
9004                         break;
9005                 case PIPECONF_10BPC:
9006                         pipe_config->pipe_bpp = 30;
9007                         break;
9008                 default:
9009                         break;
9010                 }
9011         }
9012
9013         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9014             (tmp & PIPECONF_COLOR_RANGE_SELECT))
9015                 pipe_config->limited_color_range = true;
9016
9017         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9018                 PIPECONF_GAMMA_MODE_SHIFT;
9019
9020         if (IS_CHERRYVIEW(dev_priv))
9021                 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
9022
9023         i9xx_get_pipe_color_config(pipe_config);
9024         intel_color_get_config(pipe_config);
9025
9026         if (INTEL_GEN(dev_priv) < 4)
9027                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9028
9029         intel_get_pipe_timings(crtc, pipe_config);
9030         intel_get_pipe_src_size(crtc, pipe_config);
9031
9032         i9xx_get_pfit_config(crtc, pipe_config);
9033
9034         if (INTEL_GEN(dev_priv) >= 4) {
9035                 /* No way to read it out on pipes B and C */
9036                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9037                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
9038                 else
9039                         tmp = I915_READ(DPLL_MD(crtc->pipe));
9040                 pipe_config->pixel_multiplier =
9041                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9042                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9043                 pipe_config->dpll_hw_state.dpll_md = tmp;
9044         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9045                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9046                 tmp = I915_READ(DPLL(crtc->pipe));
9047                 pipe_config->pixel_multiplier =
9048                         ((tmp & SDVO_MULTIPLIER_MASK)
9049                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9050         } else {
9051                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9052                  * port and will be fixed up in the encoder->get_config
9053                  * function. */
9054                 pipe_config->pixel_multiplier = 1;
9055         }
9056         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
9057         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9058                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
9059                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
9060         } else {
9061                 /* Mask out read-only status bits. */
9062                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9063                                                      DPLL_PORTC_READY_MASK |
9064                                                      DPLL_PORTB_READY_MASK);
9065         }
9066
9067         if (IS_CHERRYVIEW(dev_priv))
9068                 chv_crtc_clock_get(crtc, pipe_config);
9069         else if (IS_VALLEYVIEW(dev_priv))
9070                 vlv_crtc_clock_get(crtc, pipe_config);
9071         else
9072                 i9xx_crtc_clock_get(crtc, pipe_config);
9073
9074         /*
9075          * Normally the dotclock is filled in by the encoder .get_config()
9076          * but in case the pipe is enabled w/o any ports we need a sane
9077          * default.
9078          */
9079         pipe_config->hw.adjusted_mode.crtc_clock =
9080                 pipe_config->port_clock / pipe_config->pixel_multiplier;
9081
9082         ret = true;
9083
9084 out:
9085         intel_display_power_put(dev_priv, power_domain, wakeref);
9086
9087         return ret;
9088 }
9089
9090 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
9091 {
9092         struct intel_encoder *encoder;
9093         int i;
9094         u32 val, final;
9095         bool has_lvds = false;
9096         bool has_cpu_edp = false;
9097         bool has_panel = false;
9098         bool has_ck505 = false;
9099         bool can_ssc = false;
9100         bool using_ssc_source = false;
9101
9102         /* We need to take the global config into account */
9103         for_each_intel_encoder(&dev_priv->drm, encoder) {
9104                 switch (encoder->type) {
9105                 case INTEL_OUTPUT_LVDS:
9106                         has_panel = true;
9107                         has_lvds = true;
9108                         break;
9109                 case INTEL_OUTPUT_EDP:
9110                         has_panel = true;
9111                         if (encoder->port == PORT_A)
9112                                 has_cpu_edp = true;
9113                         break;
9114                 default:
9115                         break;
9116                 }
9117         }
9118
9119         if (HAS_PCH_IBX(dev_priv)) {
9120                 has_ck505 = dev_priv->vbt.display_clock_mode;
9121                 can_ssc = has_ck505;
9122         } else {
9123                 has_ck505 = false;
9124                 can_ssc = true;
9125         }
9126
9127         /* Check if any DPLLs are using the SSC source */
9128         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9129                 u32 temp = I915_READ(PCH_DPLL(i));
9130
9131                 if (!(temp & DPLL_VCO_ENABLE))
9132                         continue;
9133
9134                 if ((temp & PLL_REF_INPUT_MASK) ==
9135                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9136                         using_ssc_source = true;
9137                         break;
9138                 }
9139         }
9140
9141         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9142                       has_panel, has_lvds, has_ck505, using_ssc_source);
9143
9144         /* Ironlake: try to setup display ref clock before DPLL
9145          * enabling. This is only under driver's control after
9146          * PCH B stepping, previous chipset stepping should be
9147          * ignoring this setting.
9148          */
9149         val = I915_READ(PCH_DREF_CONTROL);
9150
9151         /* As we must carefully and slowly disable/enable each source in turn,
9152          * compute the final state we want first and check if we need to
9153          * make any changes at all.
9154          */
9155         final = val;
9156         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9157         if (has_ck505)
9158                 final |= DREF_NONSPREAD_CK505_ENABLE;
9159         else
9160                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9161
9162         final &= ~DREF_SSC_SOURCE_MASK;
9163         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9164         final &= ~DREF_SSC1_ENABLE;
9165
9166         if (has_panel) {
9167                 final |= DREF_SSC_SOURCE_ENABLE;
9168
9169                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9170                         final |= DREF_SSC1_ENABLE;
9171
9172                 if (has_cpu_edp) {
9173                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9174                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9175                         else
9176                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9177                 } else
9178                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9179         } else if (using_ssc_source) {
9180                 final |= DREF_SSC_SOURCE_ENABLE;
9181                 final |= DREF_SSC1_ENABLE;
9182         }
9183
9184         if (final == val)
9185                 return;
9186
9187         /* Always enable nonspread source */
9188         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9189
9190         if (has_ck505)
9191                 val |= DREF_NONSPREAD_CK505_ENABLE;
9192         else
9193                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9194
9195         if (has_panel) {
9196                 val &= ~DREF_SSC_SOURCE_MASK;
9197                 val |= DREF_SSC_SOURCE_ENABLE;
9198
9199                 /* SSC must be turned on before enabling the CPU output  */
9200                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9201                         DRM_DEBUG_KMS("Using SSC on panel\n");
9202                         val |= DREF_SSC1_ENABLE;
9203                 } else
9204                         val &= ~DREF_SSC1_ENABLE;
9205
9206                 /* Get SSC going before enabling the outputs */
9207                 I915_WRITE(PCH_DREF_CONTROL, val);
9208                 POSTING_READ(PCH_DREF_CONTROL);
9209                 udelay(200);
9210
9211                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9212
9213                 /* Enable CPU source on CPU attached eDP */
9214                 if (has_cpu_edp) {
9215                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9216                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
9217                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9218                         } else
9219                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9220                 } else
9221                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9222
9223                 I915_WRITE(PCH_DREF_CONTROL, val);
9224                 POSTING_READ(PCH_DREF_CONTROL);
9225                 udelay(200);
9226         } else {
9227                 DRM_DEBUG_KMS("Disabling CPU source output\n");
9228
9229                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9230
9231                 /* Turn off CPU output */
9232                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9233
9234                 I915_WRITE(PCH_DREF_CONTROL, val);
9235                 POSTING_READ(PCH_DREF_CONTROL);
9236                 udelay(200);
9237
9238                 if (!using_ssc_source) {
9239                         DRM_DEBUG_KMS("Disabling SSC source\n");
9240
9241                         /* Turn off the SSC source */
9242                         val &= ~DREF_SSC_SOURCE_MASK;
9243                         val |= DREF_SSC_SOURCE_DISABLE;
9244
9245                         /* Turn off SSC1 */
9246                         val &= ~DREF_SSC1_ENABLE;
9247
9248                         I915_WRITE(PCH_DREF_CONTROL, val);
9249                         POSTING_READ(PCH_DREF_CONTROL);
9250                         udelay(200);
9251                 }
9252         }
9253
9254         BUG_ON(val != final);
9255 }
9256
9257 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9258 {
9259         u32 tmp;
9260
9261         tmp = I915_READ(SOUTH_CHICKEN2);
9262         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9263         I915_WRITE(SOUTH_CHICKEN2, tmp);
9264
9265         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9266                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9267                 DRM_ERROR("FDI mPHY reset assert timeout\n");
9268
9269         tmp = I915_READ(SOUTH_CHICKEN2);
9270         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9271         I915_WRITE(SOUTH_CHICKEN2, tmp);
9272
9273         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9274                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9275                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9276 }
9277
9278 /* WaMPhyProgramming:hsw */
9279 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9280 {
9281         u32 tmp;
9282
9283         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9284         tmp &= ~(0xFF << 24);
9285         tmp |= (0x12 << 24);
9286         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9287
9288         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9289         tmp |= (1 << 11);
9290         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9291
9292         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9293         tmp |= (1 << 11);
9294         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9295
9296         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9297         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9298         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9299
9300         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9301         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9302         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9303
9304         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9305         tmp &= ~(7 << 13);
9306         tmp |= (5 << 13);
9307         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9308
9309         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9310         tmp &= ~(7 << 13);
9311         tmp |= (5 << 13);
9312         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9313
9314         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9315         tmp &= ~0xFF;
9316         tmp |= 0x1C;
9317         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9318
9319         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9320         tmp &= ~0xFF;
9321         tmp |= 0x1C;
9322         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9323
9324         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9325         tmp &= ~(0xFF << 16);
9326         tmp |= (0x1C << 16);
9327         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9328
9329         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9330         tmp &= ~(0xFF << 16);
9331         tmp |= (0x1C << 16);
9332         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9333
9334         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9335         tmp |= (1 << 27);
9336         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9337
9338         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9339         tmp |= (1 << 27);
9340         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9341
9342         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9343         tmp &= ~(0xF << 28);
9344         tmp |= (4 << 28);
9345         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9346
9347         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9348         tmp &= ~(0xF << 28);
9349         tmp |= (4 << 28);
9350         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9351 }
9352
9353 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9354  * Programming" based on the parameters passed:
9355  * - Sequence to enable CLKOUT_DP
9356  * - Sequence to enable CLKOUT_DP without spread
9357  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9358  */
9359 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9360                                  bool with_spread, bool with_fdi)
9361 {
9362         u32 reg, tmp;
9363
9364         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9365                 with_spread = true;
9366         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9367             with_fdi, "LP PCH doesn't have FDI\n"))
9368                 with_fdi = false;
9369
9370         mutex_lock(&dev_priv->sb_lock);
9371
9372         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9373         tmp &= ~SBI_SSCCTL_DISABLE;
9374         tmp |= SBI_SSCCTL_PATHALT;
9375         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9376
9377         udelay(24);
9378
9379         if (with_spread) {
9380                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9381                 tmp &= ~SBI_SSCCTL_PATHALT;
9382                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9383
9384                 if (with_fdi) {
9385                         lpt_reset_fdi_mphy(dev_priv);
9386                         lpt_program_fdi_mphy(dev_priv);
9387                 }
9388         }
9389
9390         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9391         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9392         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9393         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9394
9395         mutex_unlock(&dev_priv->sb_lock);
9396 }
9397
9398 /* Sequence to disable CLKOUT_DP */
9399 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9400 {
9401         u32 reg, tmp;
9402
9403         mutex_lock(&dev_priv->sb_lock);
9404
9405         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9406         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9407         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9408         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9409
9410         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9411         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9412                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9413                         tmp |= SBI_SSCCTL_PATHALT;
9414                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9415                         udelay(32);
9416                 }
9417                 tmp |= SBI_SSCCTL_DISABLE;
9418                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9419         }
9420
9421         mutex_unlock(&dev_priv->sb_lock);
9422 }
9423
9424 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9425
9426 static const u16 sscdivintphase[] = {
9427         [BEND_IDX( 50)] = 0x3B23,
9428         [BEND_IDX( 45)] = 0x3B23,
9429         [BEND_IDX( 40)] = 0x3C23,
9430         [BEND_IDX( 35)] = 0x3C23,
9431         [BEND_IDX( 30)] = 0x3D23,
9432         [BEND_IDX( 25)] = 0x3D23,
9433         [BEND_IDX( 20)] = 0x3E23,
9434         [BEND_IDX( 15)] = 0x3E23,
9435         [BEND_IDX( 10)] = 0x3F23,
9436         [BEND_IDX(  5)] = 0x3F23,
9437         [BEND_IDX(  0)] = 0x0025,
9438         [BEND_IDX( -5)] = 0x0025,
9439         [BEND_IDX(-10)] = 0x0125,
9440         [BEND_IDX(-15)] = 0x0125,
9441         [BEND_IDX(-20)] = 0x0225,
9442         [BEND_IDX(-25)] = 0x0225,
9443         [BEND_IDX(-30)] = 0x0325,
9444         [BEND_IDX(-35)] = 0x0325,
9445         [BEND_IDX(-40)] = 0x0425,
9446         [BEND_IDX(-45)] = 0x0425,
9447         [BEND_IDX(-50)] = 0x0525,
9448 };
9449
9450 /*
9451  * Bend CLKOUT_DP
9452  * steps -50 to 50 inclusive, in steps of 5
9453  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9454  * change in clock period = -(steps / 10) * 5.787 ps
9455  */
9456 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9457 {
9458         u32 tmp;
9459         int idx = BEND_IDX(steps);
9460
9461         if (WARN_ON(steps % 5 != 0))
9462                 return;
9463
9464         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9465                 return;
9466
9467         mutex_lock(&dev_priv->sb_lock);
9468
9469         if (steps % 10 != 0)
9470                 tmp = 0xAAAAAAAB;
9471         else
9472                 tmp = 0x00000000;
9473         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9474
9475         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9476         tmp &= 0xffff0000;
9477         tmp |= sscdivintphase[idx];
9478         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9479
9480         mutex_unlock(&dev_priv->sb_lock);
9481 }
9482
9483 #undef BEND_IDX
9484
9485 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9486 {
9487         u32 fuse_strap = I915_READ(FUSE_STRAP);
9488         u32 ctl = I915_READ(SPLL_CTL);
9489
9490         if ((ctl & SPLL_PLL_ENABLE) == 0)
9491                 return false;
9492
9493         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9494             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9495                 return true;
9496
9497         if (IS_BROADWELL(dev_priv) &&
9498             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9499                 return true;
9500
9501         return false;
9502 }
9503
9504 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9505                                enum intel_dpll_id id)
9506 {
9507         u32 fuse_strap = I915_READ(FUSE_STRAP);
9508         u32 ctl = I915_READ(WRPLL_CTL(id));
9509
9510         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9511                 return false;
9512
9513         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9514                 return true;
9515
9516         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9517             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9518             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9519                 return true;
9520
9521         return false;
9522 }
9523
9524 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9525 {
9526         struct intel_encoder *encoder;
9527         bool has_fdi = false;
9528
9529         for_each_intel_encoder(&dev_priv->drm, encoder) {
9530                 switch (encoder->type) {
9531                 case INTEL_OUTPUT_ANALOG:
9532                         has_fdi = true;
9533                         break;
9534                 default:
9535                         break;
9536                 }
9537         }
9538
9539         /*
9540          * The BIOS may have decided to use the PCH SSC
9541          * reference so we must not disable it until the
9542          * relevant PLLs have stopped relying on it. We'll
9543          * just leave the PCH SSC reference enabled in case
9544          * any active PLL is using it. It will get disabled
9545          * after runtime suspend if we don't have FDI.
9546          *
9547          * TODO: Move the whole reference clock handling
9548          * to the modeset sequence proper so that we can
9549          * actually enable/disable/reconfigure these things
9550          * safely. To do that we need to introduce a real
9551          * clock hierarchy. That would also allow us to do
9552          * clock bending finally.
9553          */
9554         dev_priv->pch_ssc_use = 0;
9555
9556         if (spll_uses_pch_ssc(dev_priv)) {
9557                 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9558                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9559         }
9560
9561         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9562                 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9563                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9564         }
9565
9566         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9567                 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9568                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9569         }
9570
9571         if (dev_priv->pch_ssc_use)
9572                 return;
9573
9574         if (has_fdi) {
9575                 lpt_bend_clkout_dp(dev_priv, 0);
9576                 lpt_enable_clkout_dp(dev_priv, true, true);
9577         } else {
9578                 lpt_disable_clkout_dp(dev_priv);
9579         }
9580 }
9581
9582 /*
9583  * Initialize reference clocks when the driver loads
9584  */
9585 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9586 {
9587         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9588                 ironlake_init_pch_refclk(dev_priv);
9589         else if (HAS_PCH_LPT(dev_priv))
9590                 lpt_init_pch_refclk(dev_priv);
9591 }
9592
9593 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9594 {
9595         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9596         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9597         enum pipe pipe = crtc->pipe;
9598         u32 val;
9599
9600         val = 0;
9601
9602         switch (crtc_state->pipe_bpp) {
9603         case 18:
9604                 val |= PIPECONF_6BPC;
9605                 break;
9606         case 24:
9607                 val |= PIPECONF_8BPC;
9608                 break;
9609         case 30:
9610                 val |= PIPECONF_10BPC;
9611                 break;
9612         case 36:
9613                 val |= PIPECONF_12BPC;
9614                 break;
9615         default:
9616                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9617                 BUG();
9618         }
9619
9620         if (crtc_state->dither)
9621                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9622
9623         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9624                 val |= PIPECONF_INTERLACED_ILK;
9625         else
9626                 val |= PIPECONF_PROGRESSIVE;
9627
9628         /*
9629          * This would end up with an odd purple hue over
9630          * the entire display. Make sure we don't do it.
9631          */
9632         WARN_ON(crtc_state->limited_color_range &&
9633                 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9634
9635         if (crtc_state->limited_color_range)
9636                 val |= PIPECONF_COLOR_RANGE_SELECT;
9637
9638         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9639                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9640
9641         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9642
9643         val |= PIPECONF_FRAME_START_DELAY(0);
9644
9645         I915_WRITE(PIPECONF(pipe), val);
9646         POSTING_READ(PIPECONF(pipe));
9647 }
9648
9649 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9650 {
9651         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9652         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9653         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9654         u32 val = 0;
9655
9656         if (IS_HASWELL(dev_priv) && crtc_state->dither)
9657                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9658
9659         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9660                 val |= PIPECONF_INTERLACED_ILK;
9661         else
9662                 val |= PIPECONF_PROGRESSIVE;
9663
9664         if (IS_HASWELL(dev_priv) &&
9665             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9666                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9667
9668         I915_WRITE(PIPECONF(cpu_transcoder), val);
9669         POSTING_READ(PIPECONF(cpu_transcoder));
9670 }
9671
9672 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9673 {
9674         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9675         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9676         u32 val = 0;
9677
9678         switch (crtc_state->pipe_bpp) {
9679         case 18:
9680                 val |= PIPEMISC_DITHER_6_BPC;
9681                 break;
9682         case 24:
9683                 val |= PIPEMISC_DITHER_8_BPC;
9684                 break;
9685         case 30:
9686                 val |= PIPEMISC_DITHER_10_BPC;
9687                 break;
9688         case 36:
9689                 val |= PIPEMISC_DITHER_12_BPC;
9690                 break;
9691         default:
9692                 MISSING_CASE(crtc_state->pipe_bpp);
9693                 break;
9694         }
9695
9696         if (crtc_state->dither)
9697                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9698
9699         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9700             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9701                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9702
9703         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9704                 val |= PIPEMISC_YUV420_ENABLE |
9705                         PIPEMISC_YUV420_MODE_FULL_BLEND;
9706
9707         if (INTEL_GEN(dev_priv) >= 11 &&
9708             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9709                                            BIT(PLANE_CURSOR))) == 0)
9710                 val |= PIPEMISC_HDR_MODE_PRECISION;
9711
9712         I915_WRITE(PIPEMISC(crtc->pipe), val);
9713 }
9714
9715 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9716 {
9717         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9718         u32 tmp;
9719
9720         tmp = I915_READ(PIPEMISC(crtc->pipe));
9721
9722         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9723         case PIPEMISC_DITHER_6_BPC:
9724                 return 18;
9725         case PIPEMISC_DITHER_8_BPC:
9726                 return 24;
9727         case PIPEMISC_DITHER_10_BPC:
9728                 return 30;
9729         case PIPEMISC_DITHER_12_BPC:
9730                 return 36;
9731         default:
9732                 MISSING_CASE(tmp);
9733                 return 0;
9734         }
9735 }
9736
9737 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9738 {
9739         /*
9740          * Account for spread spectrum to avoid
9741          * oversubscribing the link. Max center spread
9742          * is 2.5%; use 5% for safety's sake.
9743          */
9744         u32 bps = target_clock * bpp * 21 / 20;
9745         return DIV_ROUND_UP(bps, link_bw * 8);
9746 }
9747
9748 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9749 {
9750         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9751 }
9752
9753 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9754                                   struct intel_crtc_state *crtc_state,
9755                                   struct dpll *reduced_clock)
9756 {
9757         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9758         u32 dpll, fp, fp2;
9759         int factor;
9760
9761         /* Enable autotuning of the PLL clock (if permissible) */
9762         factor = 21;
9763         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9764                 if ((intel_panel_use_ssc(dev_priv) &&
9765                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
9766                     (HAS_PCH_IBX(dev_priv) &&
9767                      intel_is_dual_link_lvds(dev_priv)))
9768                         factor = 25;
9769         } else if (crtc_state->sdvo_tv_clock) {
9770                 factor = 20;
9771         }
9772
9773         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9774
9775         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9776                 fp |= FP_CB_TUNE;
9777
9778         if (reduced_clock) {
9779                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9780
9781                 if (reduced_clock->m < factor * reduced_clock->n)
9782                         fp2 |= FP_CB_TUNE;
9783         } else {
9784                 fp2 = fp;
9785         }
9786
9787         dpll = 0;
9788
9789         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9790                 dpll |= DPLLB_MODE_LVDS;
9791         else
9792                 dpll |= DPLLB_MODE_DAC_SERIAL;
9793
9794         dpll |= (crtc_state->pixel_multiplier - 1)
9795                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9796
9797         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9798             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9799                 dpll |= DPLL_SDVO_HIGH_SPEED;
9800
9801         if (intel_crtc_has_dp_encoder(crtc_state))
9802                 dpll |= DPLL_SDVO_HIGH_SPEED;
9803
9804         /*
9805          * The high speed IO clock is only really required for
9806          * SDVO/HDMI/DP, but we also enable it for CRT to make it
9807          * possible to share the DPLL between CRT and HDMI. Enabling
9808          * the clock needlessly does no real harm, except use up a
9809          * bit of power potentially.
9810          *
9811          * We'll limit this to IVB with 3 pipes, since it has only two
9812          * DPLLs and so DPLL sharing is the only way to get three pipes
9813          * driving PCH ports at the same time. On SNB we could do this,
9814          * and potentially avoid enabling the second DPLL, but it's not
9815          * clear if it''s a win or loss power wise. No point in doing
9816          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9817          */
9818         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
9819             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9820                 dpll |= DPLL_SDVO_HIGH_SPEED;
9821
9822         /* compute bitmask from p1 value */
9823         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9824         /* also FPA1 */
9825         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9826
9827         switch (crtc_state->dpll.p2) {
9828         case 5:
9829                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9830                 break;
9831         case 7:
9832                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9833                 break;
9834         case 10:
9835                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9836                 break;
9837         case 14:
9838                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9839                 break;
9840         }
9841
9842         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9843             intel_panel_use_ssc(dev_priv))
9844                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9845         else
9846                 dpll |= PLL_REF_INPUT_DREFCLK;
9847
9848         dpll |= DPLL_VCO_ENABLE;
9849
9850         crtc_state->dpll_hw_state.dpll = dpll;
9851         crtc_state->dpll_hw_state.fp0 = fp;
9852         crtc_state->dpll_hw_state.fp1 = fp2;
9853 }
9854
9855 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9856                                        struct intel_crtc_state *crtc_state)
9857 {
9858         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9859         struct intel_atomic_state *state =
9860                 to_intel_atomic_state(crtc_state->uapi.state);
9861         const struct intel_limit *limit;
9862         int refclk = 120000;
9863
9864         memset(&crtc_state->dpll_hw_state, 0,
9865                sizeof(crtc_state->dpll_hw_state));
9866
9867         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9868         if (!crtc_state->has_pch_encoder)
9869                 return 0;
9870
9871         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9872                 if (intel_panel_use_ssc(dev_priv)) {
9873                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9874                                       dev_priv->vbt.lvds_ssc_freq);
9875                         refclk = dev_priv->vbt.lvds_ssc_freq;
9876                 }
9877
9878                 if (intel_is_dual_link_lvds(dev_priv)) {
9879                         if (refclk == 100000)
9880                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9881                         else
9882                                 limit = &intel_limits_ironlake_dual_lvds;
9883                 } else {
9884                         if (refclk == 100000)
9885                                 limit = &intel_limits_ironlake_single_lvds_100m;
9886                         else
9887                                 limit = &intel_limits_ironlake_single_lvds;
9888                 }
9889         } else {
9890                 limit = &intel_limits_ironlake_dac;
9891         }
9892
9893         if (!crtc_state->clock_set &&
9894             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9895                                 refclk, NULL, &crtc_state->dpll)) {
9896                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9897                 return -EINVAL;
9898         }
9899
9900         ironlake_compute_dpll(crtc, crtc_state, NULL);
9901
9902         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9903                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9904                               pipe_name(crtc->pipe));
9905                 return -EINVAL;
9906         }
9907
9908         return 0;
9909 }
9910
9911 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9912                                          struct intel_link_m_n *m_n)
9913 {
9914         struct drm_device *dev = crtc->base.dev;
9915         struct drm_i915_private *dev_priv = to_i915(dev);
9916         enum pipe pipe = crtc->pipe;
9917
9918         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9919         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9920         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9921                 & ~TU_SIZE_MASK;
9922         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9923         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9924                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9925 }
9926
9927 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9928                                          enum transcoder transcoder,
9929                                          struct intel_link_m_n *m_n,
9930                                          struct intel_link_m_n *m2_n2)
9931 {
9932         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9933         enum pipe pipe = crtc->pipe;
9934
9935         if (INTEL_GEN(dev_priv) >= 5) {
9936                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9937                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9938                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9939                         & ~TU_SIZE_MASK;
9940                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9941                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9942                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9943
9944                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9945                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9946                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9947                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9948                                         & ~TU_SIZE_MASK;
9949                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9950                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9951                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9952                 }
9953         } else {
9954                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9955                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9956                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9957                         & ~TU_SIZE_MASK;
9958                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9959                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9960                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9961         }
9962 }
9963
9964 void intel_dp_get_m_n(struct intel_crtc *crtc,
9965                       struct intel_crtc_state *pipe_config)
9966 {
9967         if (pipe_config->has_pch_encoder)
9968                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9969         else
9970                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9971                                              &pipe_config->dp_m_n,
9972                                              &pipe_config->dp_m2_n2);
9973 }
9974
9975 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9976                                         struct intel_crtc_state *pipe_config)
9977 {
9978         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9979                                      &pipe_config->fdi_m_n, NULL);
9980 }
9981
9982 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9983                                     struct intel_crtc_state *pipe_config)
9984 {
9985         struct drm_device *dev = crtc->base.dev;
9986         struct drm_i915_private *dev_priv = to_i915(dev);
9987         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9988         u32 ps_ctrl = 0;
9989         int id = -1;
9990         int i;
9991
9992         /* find scaler attached to this pipe */
9993         for (i = 0; i < crtc->num_scalers; i++) {
9994                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9995                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9996                         id = i;
9997                         pipe_config->pch_pfit.enabled = true;
9998                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9999                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
10000                         scaler_state->scalers[i].in_use = true;
10001                         break;
10002                 }
10003         }
10004
10005         scaler_state->scaler_id = id;
10006         if (id >= 0) {
10007                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10008         } else {
10009                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10010         }
10011 }
10012
10013 static void
10014 skylake_get_initial_plane_config(struct intel_crtc *crtc,
10015                                  struct intel_initial_plane_config *plane_config)
10016 {
10017         struct drm_device *dev = crtc->base.dev;
10018         struct drm_i915_private *dev_priv = to_i915(dev);
10019         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10020         enum plane_id plane_id = plane->id;
10021         enum pipe pipe;
10022         u32 val, base, offset, stride_mult, tiling, alpha;
10023         int fourcc, pixel_format;
10024         unsigned int aligned_height;
10025         struct drm_framebuffer *fb;
10026         struct intel_framebuffer *intel_fb;
10027
10028         if (!plane->get_hw_state(plane, &pipe))
10029                 return;
10030
10031         WARN_ON(pipe != crtc->pipe);
10032
10033         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10034         if (!intel_fb) {
10035                 DRM_DEBUG_KMS("failed to alloc fb\n");
10036                 return;
10037         }
10038
10039         fb = &intel_fb->base;
10040
10041         fb->dev = dev;
10042
10043         val = I915_READ(PLANE_CTL(pipe, plane_id));
10044
10045         if (INTEL_GEN(dev_priv) >= 11)
10046                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10047         else
10048                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10049
10050         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10051                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
10052                 alpha &= PLANE_COLOR_ALPHA_MASK;
10053         } else {
10054                 alpha = val & PLANE_CTL_ALPHA_MASK;
10055         }
10056
10057         fourcc = skl_format_to_fourcc(pixel_format,
10058                                       val & PLANE_CTL_ORDER_RGBX, alpha);
10059         fb->format = drm_format_info(fourcc);
10060
10061         tiling = val & PLANE_CTL_TILED_MASK;
10062         switch (tiling) {
10063         case PLANE_CTL_TILED_LINEAR:
10064                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10065                 break;
10066         case PLANE_CTL_TILED_X:
10067                 plane_config->tiling = I915_TILING_X;
10068                 fb->modifier = I915_FORMAT_MOD_X_TILED;
10069                 break;
10070         case PLANE_CTL_TILED_Y:
10071                 plane_config->tiling = I915_TILING_Y;
10072                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10073                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
10074                 else
10075                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
10076                 break;
10077         case PLANE_CTL_TILED_YF:
10078                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10079                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10080                 else
10081                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10082                 break;
10083         default:
10084                 MISSING_CASE(tiling);
10085                 goto error;
10086         }
10087
10088         /*
10089          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10090          * while i915 HW rotation is clockwise, thats why this swapping.
10091          */
10092         switch (val & PLANE_CTL_ROTATE_MASK) {
10093         case PLANE_CTL_ROTATE_0:
10094                 plane_config->rotation = DRM_MODE_ROTATE_0;
10095                 break;
10096         case PLANE_CTL_ROTATE_90:
10097                 plane_config->rotation = DRM_MODE_ROTATE_270;
10098                 break;
10099         case PLANE_CTL_ROTATE_180:
10100                 plane_config->rotation = DRM_MODE_ROTATE_180;
10101                 break;
10102         case PLANE_CTL_ROTATE_270:
10103                 plane_config->rotation = DRM_MODE_ROTATE_90;
10104                 break;
10105         }
10106
10107         if (INTEL_GEN(dev_priv) >= 10 &&
10108             val & PLANE_CTL_FLIP_HORIZONTAL)
10109                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10110
10111         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10112         plane_config->base = base;
10113
10114         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
10115
10116         val = I915_READ(PLANE_SIZE(pipe, plane_id));
10117         fb->height = ((val >> 16) & 0xffff) + 1;
10118         fb->width = ((val >> 0) & 0xffff) + 1;
10119
10120         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
10121         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10122         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10123
10124         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10125
10126         plane_config->size = fb->pitches[0] * aligned_height;
10127
10128         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10129                       crtc->base.name, plane->base.name, fb->width, fb->height,
10130                       fb->format->cpp[0] * 8, base, fb->pitches[0],
10131                       plane_config->size);
10132
10133         plane_config->fb = intel_fb;
10134         return;
10135
10136 error:
10137         kfree(intel_fb);
10138 }
10139
10140 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
10141                                      struct intel_crtc_state *pipe_config)
10142 {
10143         struct drm_device *dev = crtc->base.dev;
10144         struct drm_i915_private *dev_priv = to_i915(dev);
10145         u32 tmp;
10146
10147         tmp = I915_READ(PF_CTL(crtc->pipe));
10148
10149         if (tmp & PF_ENABLE) {
10150                 pipe_config->pch_pfit.enabled = true;
10151                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
10152                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
10153
10154                 /* We currently do not free assignements of panel fitters on
10155                  * ivb/hsw (since we don't use the higher upscaling modes which
10156                  * differentiates them) so just WARN about this case for now. */
10157                 if (IS_GEN(dev_priv, 7)) {
10158                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10159                                 PF_PIPE_SEL_IVB(crtc->pipe));
10160                 }
10161         }
10162 }
10163
10164 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
10165                                      struct intel_crtc_state *pipe_config)
10166 {
10167         struct drm_device *dev = crtc->base.dev;
10168         struct drm_i915_private *dev_priv = to_i915(dev);
10169         enum intel_display_power_domain power_domain;
10170         intel_wakeref_t wakeref;
10171         u32 tmp;
10172         bool ret;
10173
10174         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10175         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10176         if (!wakeref)
10177                 return false;
10178
10179         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10180         pipe_config->shared_dpll = NULL;
10181         pipe_config->master_transcoder = INVALID_TRANSCODER;
10182
10183         ret = false;
10184         tmp = I915_READ(PIPECONF(crtc->pipe));
10185         if (!(tmp & PIPECONF_ENABLE))
10186                 goto out;
10187
10188         switch (tmp & PIPECONF_BPC_MASK) {
10189         case PIPECONF_6BPC:
10190                 pipe_config->pipe_bpp = 18;
10191                 break;
10192         case PIPECONF_8BPC:
10193                 pipe_config->pipe_bpp = 24;
10194                 break;
10195         case PIPECONF_10BPC:
10196                 pipe_config->pipe_bpp = 30;
10197                 break;
10198         case PIPECONF_12BPC:
10199                 pipe_config->pipe_bpp = 36;
10200                 break;
10201         default:
10202                 break;
10203         }
10204
10205         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10206                 pipe_config->limited_color_range = true;
10207
10208         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10209         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10210         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10211                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10212                 break;
10213         default:
10214                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10215                 break;
10216         }
10217
10218         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10219                 PIPECONF_GAMMA_MODE_SHIFT;
10220
10221         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10222
10223         i9xx_get_pipe_color_config(pipe_config);
10224         intel_color_get_config(pipe_config);
10225
10226         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10227                 struct intel_shared_dpll *pll;
10228                 enum intel_dpll_id pll_id;
10229
10230                 pipe_config->has_pch_encoder = true;
10231
10232                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10233                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10234                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10235
10236                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10237
10238                 if (HAS_PCH_IBX(dev_priv)) {
10239                         /*
10240                          * The pipe->pch transcoder and pch transcoder->pll
10241                          * mapping is fixed.
10242                          */
10243                         pll_id = (enum intel_dpll_id) crtc->pipe;
10244                 } else {
10245                         tmp = I915_READ(PCH_DPLL_SEL);
10246                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10247                                 pll_id = DPLL_ID_PCH_PLL_B;
10248                         else
10249                                 pll_id= DPLL_ID_PCH_PLL_A;
10250                 }
10251
10252                 pipe_config->shared_dpll =
10253                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10254                 pll = pipe_config->shared_dpll;
10255
10256                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10257                                                 &pipe_config->dpll_hw_state));
10258
10259                 tmp = pipe_config->dpll_hw_state.dpll;
10260                 pipe_config->pixel_multiplier =
10261                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10262                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10263
10264                 ironlake_pch_clock_get(crtc, pipe_config);
10265         } else {
10266                 pipe_config->pixel_multiplier = 1;
10267         }
10268
10269         intel_get_pipe_timings(crtc, pipe_config);
10270         intel_get_pipe_src_size(crtc, pipe_config);
10271
10272         ironlake_get_pfit_config(crtc, pipe_config);
10273
10274         ret = true;
10275
10276 out:
10277         intel_display_power_put(dev_priv, power_domain, wakeref);
10278
10279         return ret;
10280 }
10281 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10282                                       struct intel_crtc_state *crtc_state)
10283 {
10284         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10285         struct intel_atomic_state *state =
10286                 to_intel_atomic_state(crtc_state->uapi.state);
10287
10288         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10289             INTEL_GEN(dev_priv) >= 11) {
10290                 struct intel_encoder *encoder =
10291                         intel_get_crtc_new_encoder(state, crtc_state);
10292
10293                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10294                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10295                                       pipe_name(crtc->pipe));
10296                         return -EINVAL;
10297                 }
10298         }
10299
10300         return 0;
10301 }
10302
10303 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10304                                    enum port port,
10305                                    struct intel_crtc_state *pipe_config)
10306 {
10307         enum intel_dpll_id id;
10308         u32 temp;
10309
10310         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10311         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10312
10313         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10314                 return;
10315
10316         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10317 }
10318
10319 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10320                                 enum port port,
10321                                 struct intel_crtc_state *pipe_config)
10322 {
10323         enum phy phy = intel_port_to_phy(dev_priv, port);
10324         enum icl_port_dpll_id port_dpll_id;
10325         enum intel_dpll_id id;
10326         u32 temp;
10327
10328         if (intel_phy_is_combo(dev_priv, phy)) {
10329                 temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10330                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10331                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10332                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10333         } else if (intel_phy_is_tc(dev_priv, phy)) {
10334                 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10335
10336                 if (clk_sel == DDI_CLK_SEL_MG) {
10337                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10338                                                                     port));
10339                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10340                 } else {
10341                         WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10342                         id = DPLL_ID_ICL_TBTPLL;
10343                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10344                 }
10345         } else {
10346                 WARN(1, "Invalid port %x\n", port);
10347                 return;
10348         }
10349
10350         pipe_config->icl_port_dplls[port_dpll_id].pll =
10351                 intel_get_shared_dpll_by_id(dev_priv, id);
10352
10353         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10354 }
10355
10356 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10357                                 enum port port,
10358                                 struct intel_crtc_state *pipe_config)
10359 {
10360         enum intel_dpll_id id;
10361
10362         switch (port) {
10363         case PORT_A:
10364                 id = DPLL_ID_SKL_DPLL0;
10365                 break;
10366         case PORT_B:
10367                 id = DPLL_ID_SKL_DPLL1;
10368                 break;
10369         case PORT_C:
10370                 id = DPLL_ID_SKL_DPLL2;
10371                 break;
10372         default:
10373                 DRM_ERROR("Incorrect port type\n");
10374                 return;
10375         }
10376
10377         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10378 }
10379
10380 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10381                                 enum port port,
10382                                 struct intel_crtc_state *pipe_config)
10383 {
10384         enum intel_dpll_id id;
10385         u32 temp;
10386
10387         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10388         id = temp >> (port * 3 + 1);
10389
10390         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10391                 return;
10392
10393         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10394 }
10395
10396 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10397                                 enum port port,
10398                                 struct intel_crtc_state *pipe_config)
10399 {
10400         enum intel_dpll_id id;
10401         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10402
10403         switch (ddi_pll_sel) {
10404         case PORT_CLK_SEL_WRPLL1:
10405                 id = DPLL_ID_WRPLL1;
10406                 break;
10407         case PORT_CLK_SEL_WRPLL2:
10408                 id = DPLL_ID_WRPLL2;
10409                 break;
10410         case PORT_CLK_SEL_SPLL:
10411                 id = DPLL_ID_SPLL;
10412                 break;
10413         case PORT_CLK_SEL_LCPLL_810:
10414                 id = DPLL_ID_LCPLL_810;
10415                 break;
10416         case PORT_CLK_SEL_LCPLL_1350:
10417                 id = DPLL_ID_LCPLL_1350;
10418                 break;
10419         case PORT_CLK_SEL_LCPLL_2700:
10420                 id = DPLL_ID_LCPLL_2700;
10421                 break;
10422         default:
10423                 MISSING_CASE(ddi_pll_sel);
10424                 /* fall through */
10425         case PORT_CLK_SEL_NONE:
10426                 return;
10427         }
10428
10429         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10430 }
10431
10432 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10433                                      struct intel_crtc_state *pipe_config,
10434                                      u64 *power_domain_mask,
10435                                      intel_wakeref_t *wakerefs)
10436 {
10437         struct drm_device *dev = crtc->base.dev;
10438         struct drm_i915_private *dev_priv = to_i915(dev);
10439         enum intel_display_power_domain power_domain;
10440         unsigned long panel_transcoder_mask = 0;
10441         unsigned long enabled_panel_transcoders = 0;
10442         enum transcoder panel_transcoder;
10443         intel_wakeref_t wf;
10444         u32 tmp;
10445
10446         if (INTEL_GEN(dev_priv) >= 11)
10447                 panel_transcoder_mask |=
10448                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10449
10450         if (HAS_TRANSCODER_EDP(dev_priv))
10451                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10452
10453         /*
10454          * The pipe->transcoder mapping is fixed with the exception of the eDP
10455          * and DSI transcoders handled below.
10456          */
10457         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10458
10459         /*
10460          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10461          * consistency and less surprising code; it's in always on power).
10462          */
10463         for_each_set_bit(panel_transcoder,
10464                          &panel_transcoder_mask,
10465                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10466                 bool force_thru = false;
10467                 enum pipe trans_pipe;
10468
10469                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10470                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10471                         continue;
10472
10473                 /*
10474                  * Log all enabled ones, only use the first one.
10475                  *
10476                  * FIXME: This won't work for two separate DSI displays.
10477                  */
10478                 enabled_panel_transcoders |= BIT(panel_transcoder);
10479                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10480                         continue;
10481
10482                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10483                 default:
10484                         WARN(1, "unknown pipe linked to transcoder %s\n",
10485                              transcoder_name(panel_transcoder));
10486                         /* fall through */
10487                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10488                         force_thru = true;
10489                         /* fall through */
10490                 case TRANS_DDI_EDP_INPUT_A_ON:
10491                         trans_pipe = PIPE_A;
10492                         break;
10493                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10494                         trans_pipe = PIPE_B;
10495                         break;
10496                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10497                         trans_pipe = PIPE_C;
10498                         break;
10499                 }
10500
10501                 if (trans_pipe == crtc->pipe) {
10502                         pipe_config->cpu_transcoder = panel_transcoder;
10503                         pipe_config->pch_pfit.force_thru = force_thru;
10504                 }
10505         }
10506
10507         /*
10508          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10509          */
10510         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10511                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10512
10513         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10514         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10515
10516         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10517         if (!wf)
10518                 return false;
10519
10520         wakerefs[power_domain] = wf;
10521         *power_domain_mask |= BIT_ULL(power_domain);
10522
10523         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10524
10525         return tmp & PIPECONF_ENABLE;
10526 }
10527
10528 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10529                                          struct intel_crtc_state *pipe_config,
10530                                          u64 *power_domain_mask,
10531                                          intel_wakeref_t *wakerefs)
10532 {
10533         struct drm_device *dev = crtc->base.dev;
10534         struct drm_i915_private *dev_priv = to_i915(dev);
10535         enum intel_display_power_domain power_domain;
10536         enum transcoder cpu_transcoder;
10537         intel_wakeref_t wf;
10538         enum port port;
10539         u32 tmp;
10540
10541         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10542                 if (port == PORT_A)
10543                         cpu_transcoder = TRANSCODER_DSI_A;
10544                 else
10545                         cpu_transcoder = TRANSCODER_DSI_C;
10546
10547                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10548                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10549
10550                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10551                 if (!wf)
10552                         continue;
10553
10554                 wakerefs[power_domain] = wf;
10555                 *power_domain_mask |= BIT_ULL(power_domain);
10556
10557                 /*
10558                  * The PLL needs to be enabled with a valid divider
10559                  * configuration, otherwise accessing DSI registers will hang
10560                  * the machine. See BSpec North Display Engine
10561                  * registers/MIPI[BXT]. We can break out here early, since we
10562                  * need the same DSI PLL to be enabled for both DSI ports.
10563                  */
10564                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10565                         break;
10566
10567                 /* XXX: this works for video mode only */
10568                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10569                 if (!(tmp & DPI_ENABLE))
10570                         continue;
10571
10572                 tmp = I915_READ(MIPI_CTRL(port));
10573                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10574                         continue;
10575
10576                 pipe_config->cpu_transcoder = cpu_transcoder;
10577                 break;
10578         }
10579
10580         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10581 }
10582
10583 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10584                                        struct intel_crtc_state *pipe_config)
10585 {
10586         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10587         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
10588         struct intel_shared_dpll *pll;
10589         enum port port;
10590         u32 tmp;
10591
10592         if (transcoder_is_dsi(cpu_transcoder)) {
10593                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
10594                                                 PORT_A : PORT_B;
10595         } else {
10596                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
10597                 if (INTEL_GEN(dev_priv) >= 12)
10598                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10599                 else
10600                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10601         }
10602
10603         if (INTEL_GEN(dev_priv) >= 11)
10604                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10605         else if (IS_CANNONLAKE(dev_priv))
10606                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10607         else if (IS_GEN9_BC(dev_priv))
10608                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10609         else if (IS_GEN9_LP(dev_priv))
10610                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10611         else
10612                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10613
10614         pll = pipe_config->shared_dpll;
10615         if (pll) {
10616                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10617                                                 &pipe_config->dpll_hw_state));
10618         }
10619
10620         /*
10621          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10622          * DDI E. So just check whether this pipe is wired to DDI E and whether
10623          * the PCH transcoder is on.
10624          */
10625         if (INTEL_GEN(dev_priv) < 9 &&
10626             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10627                 pipe_config->has_pch_encoder = true;
10628
10629                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10630                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10631                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10632
10633                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10634         }
10635 }
10636
10637 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
10638                                                  enum transcoder cpu_transcoder)
10639 {
10640         u32 trans_port_sync, master_select;
10641
10642         trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
10643
10644         if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
10645                 return INVALID_TRANSCODER;
10646
10647         master_select = trans_port_sync &
10648                         PORT_SYNC_MODE_MASTER_SELECT_MASK;
10649         if (master_select == 0)
10650                 return TRANSCODER_EDP;
10651         else
10652                 return master_select - 1;
10653 }
10654
10655 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
10656 {
10657         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10658         u32 transcoders;
10659         enum transcoder cpu_transcoder;
10660
10661         crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
10662                                                                   crtc_state->cpu_transcoder);
10663
10664         transcoders = BIT(TRANSCODER_A) |
10665                 BIT(TRANSCODER_B) |
10666                 BIT(TRANSCODER_C) |
10667                 BIT(TRANSCODER_D);
10668         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
10669                 enum intel_display_power_domain power_domain;
10670                 intel_wakeref_t trans_wakeref;
10671
10672                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10673                 trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
10674                                                                    power_domain);
10675
10676                 if (!trans_wakeref)
10677                         continue;
10678
10679                 if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
10680                     crtc_state->cpu_transcoder)
10681                         crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
10682
10683                 intel_display_power_put(dev_priv, power_domain, trans_wakeref);
10684         }
10685
10686         WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
10687                 crtc_state->sync_mode_slaves_mask);
10688 }
10689
10690 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10691                                     struct intel_crtc_state *pipe_config)
10692 {
10693         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10694         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10695         enum intel_display_power_domain power_domain;
10696         u64 power_domain_mask;
10697         bool active;
10698
10699         intel_crtc_init_scalers(crtc, pipe_config);
10700
10701         pipe_config->master_transcoder = INVALID_TRANSCODER;
10702
10703         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10704         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10705         if (!wf)
10706                 return false;
10707
10708         wakerefs[power_domain] = wf;
10709         power_domain_mask = BIT_ULL(power_domain);
10710
10711         pipe_config->shared_dpll = NULL;
10712
10713         active = hsw_get_transcoder_state(crtc, pipe_config,
10714                                           &power_domain_mask, wakerefs);
10715
10716         if (IS_GEN9_LP(dev_priv) &&
10717             bxt_get_dsi_transcoder_state(crtc, pipe_config,
10718                                          &power_domain_mask, wakerefs)) {
10719                 WARN_ON(active);
10720                 active = true;
10721         }
10722
10723         if (!active)
10724                 goto out;
10725
10726         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10727             INTEL_GEN(dev_priv) >= 11) {
10728                 haswell_get_ddi_port_state(crtc, pipe_config);
10729                 intel_get_pipe_timings(crtc, pipe_config);
10730         }
10731
10732         intel_get_pipe_src_size(crtc, pipe_config);
10733
10734         if (IS_HASWELL(dev_priv)) {
10735                 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10736
10737                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10738                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10739                 else
10740                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10741         } else {
10742                 pipe_config->output_format =
10743                         bdw_get_pipemisc_output_format(crtc);
10744
10745                 /*
10746                  * Currently there is no interface defined to
10747                  * check user preference between RGB/YCBCR444
10748                  * or YCBCR420. So the only possible case for
10749                  * YCBCR444 usage is driving YCBCR420 output
10750                  * with LSPCON, when pipe is configured for
10751                  * YCBCR444 output and LSPCON takes care of
10752                  * downsampling it.
10753                  */
10754                 pipe_config->lspcon_downsampling =
10755                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
10756         }
10757
10758         pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10759
10760         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10761
10762         if (INTEL_GEN(dev_priv) >= 9) {
10763                 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10764
10765                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10766                         pipe_config->gamma_enable = true;
10767
10768                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10769                         pipe_config->csc_enable = true;
10770         } else {
10771                 i9xx_get_pipe_color_config(pipe_config);
10772         }
10773
10774         intel_color_get_config(pipe_config);
10775
10776         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10777         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10778
10779         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10780         if (wf) {
10781                 wakerefs[power_domain] = wf;
10782                 power_domain_mask |= BIT_ULL(power_domain);
10783
10784                 if (INTEL_GEN(dev_priv) >= 9)
10785                         skylake_get_pfit_config(crtc, pipe_config);
10786                 else
10787                         ironlake_get_pfit_config(crtc, pipe_config);
10788         }
10789
10790         if (hsw_crtc_supports_ips(crtc)) {
10791                 if (IS_HASWELL(dev_priv))
10792                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10793                 else {
10794                         /*
10795                          * We cannot readout IPS state on broadwell, set to
10796                          * true so we can set it to a defined state on first
10797                          * commit.
10798                          */
10799                         pipe_config->ips_enabled = true;
10800                 }
10801         }
10802
10803         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10804             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10805                 pipe_config->pixel_multiplier =
10806                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10807         } else {
10808                 pipe_config->pixel_multiplier = 1;
10809         }
10810
10811         if (INTEL_GEN(dev_priv) >= 11 &&
10812             !transcoder_is_dsi(pipe_config->cpu_transcoder))
10813                 icelake_get_trans_port_sync_config(pipe_config);
10814
10815 out:
10816         for_each_power_domain(power_domain, power_domain_mask)
10817                 intel_display_power_put(dev_priv,
10818                                         power_domain, wakerefs[power_domain]);
10819
10820         return active;
10821 }
10822
10823 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10824 {
10825         struct drm_i915_private *dev_priv =
10826                 to_i915(plane_state->uapi.plane->dev);
10827         const struct drm_framebuffer *fb = plane_state->hw.fb;
10828         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10829         u32 base;
10830
10831         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10832                 base = obj->phys_handle->busaddr;
10833         else
10834                 base = intel_plane_ggtt_offset(plane_state);
10835
10836         return base + plane_state->color_plane[0].offset;
10837 }
10838
10839 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10840 {
10841         int x = plane_state->uapi.dst.x1;
10842         int y = plane_state->uapi.dst.y1;
10843         u32 pos = 0;
10844
10845         if (x < 0) {
10846                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10847                 x = -x;
10848         }
10849         pos |= x << CURSOR_X_SHIFT;
10850
10851         if (y < 0) {
10852                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10853                 y = -y;
10854         }
10855         pos |= y << CURSOR_Y_SHIFT;
10856
10857         return pos;
10858 }
10859
10860 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10861 {
10862         const struct drm_mode_config *config =
10863                 &plane_state->uapi.plane->dev->mode_config;
10864         int width = drm_rect_width(&plane_state->uapi.dst);
10865         int height = drm_rect_height(&plane_state->uapi.dst);
10866
10867         return width > 0 && width <= config->cursor_width &&
10868                 height > 0 && height <= config->cursor_height;
10869 }
10870
10871 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10872 {
10873         struct drm_i915_private *dev_priv =
10874                 to_i915(plane_state->uapi.plane->dev);
10875         unsigned int rotation = plane_state->hw.rotation;
10876         int src_x, src_y;
10877         u32 offset;
10878         int ret;
10879
10880         ret = intel_plane_compute_gtt(plane_state);
10881         if (ret)
10882                 return ret;
10883
10884         if (!plane_state->uapi.visible)
10885                 return 0;
10886
10887         src_x = plane_state->uapi.src.x1 >> 16;
10888         src_y = plane_state->uapi.src.y1 >> 16;
10889
10890         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10891         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10892                                                     plane_state, 0);
10893
10894         if (src_x != 0 || src_y != 0) {
10895                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10896                 return -EINVAL;
10897         }
10898
10899         /*
10900          * Put the final coordinates back so that the src
10901          * coordinate checks will see the right values.
10902          */
10903         drm_rect_translate_to(&plane_state->uapi.src,
10904                               src_x << 16, src_y << 16);
10905
10906         /* ILK+ do this automagically in hardware */
10907         if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
10908                 const struct drm_framebuffer *fb = plane_state->hw.fb;
10909                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
10910                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
10911
10912                 offset += (src_h * src_w - 1) * fb->format->cpp[0];
10913         }
10914
10915         plane_state->color_plane[0].offset = offset;
10916         plane_state->color_plane[0].x = src_x;
10917         plane_state->color_plane[0].y = src_y;
10918
10919         return 0;
10920 }
10921
10922 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10923                               struct intel_plane_state *plane_state)
10924 {
10925         const struct drm_framebuffer *fb = plane_state->hw.fb;
10926         int ret;
10927
10928         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10929                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10930                 return -EINVAL;
10931         }
10932
10933         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
10934                                                   &crtc_state->uapi,
10935                                                   DRM_PLANE_HELPER_NO_SCALING,
10936                                                   DRM_PLANE_HELPER_NO_SCALING,
10937                                                   true, true);
10938         if (ret)
10939                 return ret;
10940
10941         /* Use the unclipped src/dst rectangles, which we program to hw */
10942         plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
10943         plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
10944
10945         ret = intel_cursor_check_surface(plane_state);
10946         if (ret)
10947                 return ret;
10948
10949         if (!plane_state->uapi.visible)
10950                 return 0;
10951
10952         ret = intel_plane_check_src_coordinates(plane_state);
10953         if (ret)
10954                 return ret;
10955
10956         return 0;
10957 }
10958
10959 static unsigned int
10960 i845_cursor_max_stride(struct intel_plane *plane,
10961                        u32 pixel_format, u64 modifier,
10962                        unsigned int rotation)
10963 {
10964         return 2048;
10965 }
10966
10967 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10968 {
10969         u32 cntl = 0;
10970
10971         if (crtc_state->gamma_enable)
10972                 cntl |= CURSOR_GAMMA_ENABLE;
10973
10974         return cntl;
10975 }
10976
10977 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10978                            const struct intel_plane_state *plane_state)
10979 {
10980         return CURSOR_ENABLE |
10981                 CURSOR_FORMAT_ARGB |
10982                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10983 }
10984
10985 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10986 {
10987         int width = drm_rect_width(&plane_state->uapi.dst);
10988
10989         /*
10990          * 845g/865g are only limited by the width of their cursors,
10991          * the height is arbitrary up to the precision of the register.
10992          */
10993         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10994 }
10995
10996 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10997                              struct intel_plane_state *plane_state)
10998 {
10999         const struct drm_framebuffer *fb = plane_state->hw.fb;
11000         int ret;
11001
11002         ret = intel_check_cursor(crtc_state, plane_state);
11003         if (ret)
11004                 return ret;
11005
11006         /* if we want to turn off the cursor ignore width and height */
11007         if (!fb)
11008                 return 0;
11009
11010         /* Check for which cursor types we support */
11011         if (!i845_cursor_size_ok(plane_state)) {
11012                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11013                           drm_rect_width(&plane_state->uapi.dst),
11014                           drm_rect_height(&plane_state->uapi.dst));
11015                 return -EINVAL;
11016         }
11017
11018         WARN_ON(plane_state->uapi.visible &&
11019                 plane_state->color_plane[0].stride != fb->pitches[0]);
11020
11021         switch (fb->pitches[0]) {
11022         case 256:
11023         case 512:
11024         case 1024:
11025         case 2048:
11026                 break;
11027         default:
11028                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
11029                               fb->pitches[0]);
11030                 return -EINVAL;
11031         }
11032
11033         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11034
11035         return 0;
11036 }
11037
11038 static void i845_update_cursor(struct intel_plane *plane,
11039                                const struct intel_crtc_state *crtc_state,
11040                                const struct intel_plane_state *plane_state)
11041 {
11042         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11043         u32 cntl = 0, base = 0, pos = 0, size = 0;
11044         unsigned long irqflags;
11045
11046         if (plane_state && plane_state->uapi.visible) {
11047                 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11048                 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11049
11050                 cntl = plane_state->ctl |
11051                         i845_cursor_ctl_crtc(crtc_state);
11052
11053                 size = (height << 12) | width;
11054
11055                 base = intel_cursor_base(plane_state);
11056                 pos = intel_cursor_position(plane_state);
11057         }
11058
11059         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11060
11061         /* On these chipsets we can only modify the base/size/stride
11062          * whilst the cursor is disabled.
11063          */
11064         if (plane->cursor.base != base ||
11065             plane->cursor.size != size ||
11066             plane->cursor.cntl != cntl) {
11067                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
11068                 I915_WRITE_FW(CURBASE(PIPE_A), base);
11069                 I915_WRITE_FW(CURSIZE, size);
11070                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11071                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
11072
11073                 plane->cursor.base = base;
11074                 plane->cursor.size = size;
11075                 plane->cursor.cntl = cntl;
11076         } else {
11077                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11078         }
11079
11080         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11081 }
11082
11083 static void i845_disable_cursor(struct intel_plane *plane,
11084                                 const struct intel_crtc_state *crtc_state)
11085 {
11086         i845_update_cursor(plane, crtc_state, NULL);
11087 }
11088
11089 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11090                                      enum pipe *pipe)
11091 {
11092         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11093         enum intel_display_power_domain power_domain;
11094         intel_wakeref_t wakeref;
11095         bool ret;
11096
11097         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11098         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11099         if (!wakeref)
11100                 return false;
11101
11102         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11103
11104         *pipe = PIPE_A;
11105
11106         intel_display_power_put(dev_priv, power_domain, wakeref);
11107
11108         return ret;
11109 }
11110
11111 static unsigned int
11112 i9xx_cursor_max_stride(struct intel_plane *plane,
11113                        u32 pixel_format, u64 modifier,
11114                        unsigned int rotation)
11115 {
11116         return plane->base.dev->mode_config.cursor_width * 4;
11117 }
11118
11119 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11120 {
11121         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11122         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11123         u32 cntl = 0;
11124
11125         if (INTEL_GEN(dev_priv) >= 11)
11126                 return cntl;
11127
11128         if (crtc_state->gamma_enable)
11129                 cntl = MCURSOR_GAMMA_ENABLE;
11130
11131         if (crtc_state->csc_enable)
11132                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11133
11134         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11135                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11136
11137         return cntl;
11138 }
11139
11140 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11141                            const struct intel_plane_state *plane_state)
11142 {
11143         struct drm_i915_private *dev_priv =
11144                 to_i915(plane_state->uapi.plane->dev);
11145         u32 cntl = 0;
11146
11147         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11148                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11149
11150         switch (drm_rect_width(&plane_state->uapi.dst)) {
11151         case 64:
11152                 cntl |= MCURSOR_MODE_64_ARGB_AX;
11153                 break;
11154         case 128:
11155                 cntl |= MCURSOR_MODE_128_ARGB_AX;
11156                 break;
11157         case 256:
11158                 cntl |= MCURSOR_MODE_256_ARGB_AX;
11159                 break;
11160         default:
11161                 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11162                 return 0;
11163         }
11164
11165         if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11166                 cntl |= MCURSOR_ROTATE_180;
11167
11168         return cntl;
11169 }
11170
11171 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11172 {
11173         struct drm_i915_private *dev_priv =
11174                 to_i915(plane_state->uapi.plane->dev);
11175         int width = drm_rect_width(&plane_state->uapi.dst);
11176         int height = drm_rect_height(&plane_state->uapi.dst);
11177
11178         if (!intel_cursor_size_ok(plane_state))
11179                 return false;
11180
11181         /* Cursor width is limited to a few power-of-two sizes */
11182         switch (width) {
11183         case 256:
11184         case 128:
11185         case 64:
11186                 break;
11187         default:
11188                 return false;
11189         }
11190
11191         /*
11192          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11193          * height from 8 lines up to the cursor width, when the
11194          * cursor is not rotated. Everything else requires square
11195          * cursors.
11196          */
11197         if (HAS_CUR_FBC(dev_priv) &&
11198             plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11199                 if (height < 8 || height > width)
11200                         return false;
11201         } else {
11202                 if (height != width)
11203                         return false;
11204         }
11205
11206         return true;
11207 }
11208
11209 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11210                              struct intel_plane_state *plane_state)
11211 {
11212         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11213         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11214         const struct drm_framebuffer *fb = plane_state->hw.fb;
11215         enum pipe pipe = plane->pipe;
11216         int ret;
11217
11218         ret = intel_check_cursor(crtc_state, plane_state);
11219         if (ret)
11220                 return ret;
11221
11222         /* if we want to turn off the cursor ignore width and height */
11223         if (!fb)
11224                 return 0;
11225
11226         /* Check for which cursor types we support */
11227         if (!i9xx_cursor_size_ok(plane_state)) {
11228                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11229                           drm_rect_width(&plane_state->uapi.dst),
11230                           drm_rect_height(&plane_state->uapi.dst));
11231                 return -EINVAL;
11232         }
11233
11234         WARN_ON(plane_state->uapi.visible &&
11235                 plane_state->color_plane[0].stride != fb->pitches[0]);
11236
11237         if (fb->pitches[0] !=
11238             drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11239                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
11240                               fb->pitches[0],
11241                               drm_rect_width(&plane_state->uapi.dst));
11242                 return -EINVAL;
11243         }
11244
11245         /*
11246          * There's something wrong with the cursor on CHV pipe C.
11247          * If it straddles the left edge of the screen then
11248          * moving it away from the edge or disabling it often
11249          * results in a pipe underrun, and often that can lead to
11250          * dead pipe (constant underrun reported, and it scans
11251          * out just a solid color). To recover from that, the
11252          * display power well must be turned off and on again.
11253          * Refuse the put the cursor into that compromised position.
11254          */
11255         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11256             plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11257                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
11258                 return -EINVAL;
11259         }
11260
11261         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11262
11263         return 0;
11264 }
11265
11266 static void i9xx_update_cursor(struct intel_plane *plane,
11267                                const struct intel_crtc_state *crtc_state,
11268                                const struct intel_plane_state *plane_state)
11269 {
11270         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11271         enum pipe pipe = plane->pipe;
11272         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11273         unsigned long irqflags;
11274
11275         if (plane_state && plane_state->uapi.visible) {
11276                 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11277                 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11278
11279                 cntl = plane_state->ctl |
11280                         i9xx_cursor_ctl_crtc(crtc_state);
11281
11282                 if (width != height)
11283                         fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11284
11285                 base = intel_cursor_base(plane_state);
11286                 pos = intel_cursor_position(plane_state);
11287         }
11288
11289         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11290
11291         /*
11292          * On some platforms writing CURCNTR first will also
11293          * cause CURPOS to be armed by the CURBASE write.
11294          * Without the CURCNTR write the CURPOS write would
11295          * arm itself. Thus we always update CURCNTR before
11296          * CURPOS.
11297          *
11298          * On other platforms CURPOS always requires the
11299          * CURBASE write to arm the update. Additonally
11300          * a write to any of the cursor register will cancel
11301          * an already armed cursor update. Thus leaving out
11302          * the CURBASE write after CURPOS could lead to a
11303          * cursor that doesn't appear to move, or even change
11304          * shape. Thus we always write CURBASE.
11305          *
11306          * The other registers are armed by by the CURBASE write
11307          * except when the plane is getting enabled at which time
11308          * the CURCNTR write arms the update.
11309          */
11310
11311         if (INTEL_GEN(dev_priv) >= 9)
11312                 skl_write_cursor_wm(plane, crtc_state);
11313
11314         if (plane->cursor.base != base ||
11315             plane->cursor.size != fbc_ctl ||
11316             plane->cursor.cntl != cntl) {
11317                 if (HAS_CUR_FBC(dev_priv))
11318                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
11319                 I915_WRITE_FW(CURCNTR(pipe), cntl);
11320                 I915_WRITE_FW(CURPOS(pipe), pos);
11321                 I915_WRITE_FW(CURBASE(pipe), base);
11322
11323                 plane->cursor.base = base;
11324                 plane->cursor.size = fbc_ctl;
11325                 plane->cursor.cntl = cntl;
11326         } else {
11327                 I915_WRITE_FW(CURPOS(pipe), pos);
11328                 I915_WRITE_FW(CURBASE(pipe), base);
11329         }
11330
11331         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11332 }
11333
11334 static void i9xx_disable_cursor(struct intel_plane *plane,
11335                                 const struct intel_crtc_state *crtc_state)
11336 {
11337         i9xx_update_cursor(plane, crtc_state, NULL);
11338 }
11339
11340 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11341                                      enum pipe *pipe)
11342 {
11343         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11344         enum intel_display_power_domain power_domain;
11345         intel_wakeref_t wakeref;
11346         bool ret;
11347         u32 val;
11348
11349         /*
11350          * Not 100% correct for planes that can move between pipes,
11351          * but that's only the case for gen2-3 which don't have any
11352          * display power wells.
11353          */
11354         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11355         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11356         if (!wakeref)
11357                 return false;
11358
11359         val = I915_READ(CURCNTR(plane->pipe));
11360
11361         ret = val & MCURSOR_MODE;
11362
11363         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11364                 *pipe = plane->pipe;
11365         else
11366                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11367                         MCURSOR_PIPE_SELECT_SHIFT;
11368
11369         intel_display_power_put(dev_priv, power_domain, wakeref);
11370
11371         return ret;
11372 }
11373
11374 /* VESA 640x480x72Hz mode to set on the pipe */
11375 static const struct drm_display_mode load_detect_mode = {
11376         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11377                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11378 };
11379
11380 struct drm_framebuffer *
11381 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11382                          struct drm_mode_fb_cmd2 *mode_cmd)
11383 {
11384         struct intel_framebuffer *intel_fb;
11385         int ret;
11386
11387         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11388         if (!intel_fb)
11389                 return ERR_PTR(-ENOMEM);
11390
11391         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11392         if (ret)
11393                 goto err;
11394
11395         return &intel_fb->base;
11396
11397 err:
11398         kfree(intel_fb);
11399         return ERR_PTR(ret);
11400 }
11401
11402 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11403                                         struct drm_crtc *crtc)
11404 {
11405         struct drm_plane *plane;
11406         struct drm_plane_state *plane_state;
11407         int ret, i;
11408
11409         ret = drm_atomic_add_affected_planes(state, crtc);
11410         if (ret)
11411                 return ret;
11412
11413         for_each_new_plane_in_state(state, plane, plane_state, i) {
11414                 if (plane_state->crtc != crtc)
11415                         continue;
11416
11417                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11418                 if (ret)
11419                         return ret;
11420
11421                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11422         }
11423
11424         return 0;
11425 }
11426
11427 int intel_get_load_detect_pipe(struct drm_connector *connector,
11428                                struct intel_load_detect_pipe *old,
11429                                struct drm_modeset_acquire_ctx *ctx)
11430 {
11431         struct intel_crtc *intel_crtc;
11432         struct intel_encoder *intel_encoder =
11433                 intel_attached_encoder(connector);
11434         struct drm_crtc *possible_crtc;
11435         struct drm_encoder *encoder = &intel_encoder->base;
11436         struct drm_crtc *crtc = NULL;
11437         struct drm_device *dev = encoder->dev;
11438         struct drm_i915_private *dev_priv = to_i915(dev);
11439         struct drm_mode_config *config = &dev->mode_config;
11440         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11441         struct drm_connector_state *connector_state;
11442         struct intel_crtc_state *crtc_state;
11443         int ret, i = -1;
11444
11445         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11446                       connector->base.id, connector->name,
11447                       encoder->base.id, encoder->name);
11448
11449         old->restore_state = NULL;
11450
11451         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11452
11453         /*
11454          * Algorithm gets a little messy:
11455          *
11456          *   - if the connector already has an assigned crtc, use it (but make
11457          *     sure it's on first)
11458          *
11459          *   - try to find the first unused crtc that can drive this connector,
11460          *     and use that if we find one
11461          */
11462
11463         /* See if we already have a CRTC for this connector */
11464         if (connector->state->crtc) {
11465                 crtc = connector->state->crtc;
11466
11467                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11468                 if (ret)
11469                         goto fail;
11470
11471                 /* Make sure the crtc and connector are running */
11472                 goto found;
11473         }
11474
11475         /* Find an unused one (if possible) */
11476         for_each_crtc(dev, possible_crtc) {
11477                 i++;
11478                 if (!(encoder->possible_crtcs & (1 << i)))
11479                         continue;
11480
11481                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11482                 if (ret)
11483                         goto fail;
11484
11485                 if (possible_crtc->state->enable) {
11486                         drm_modeset_unlock(&possible_crtc->mutex);
11487                         continue;
11488                 }
11489
11490                 crtc = possible_crtc;
11491                 break;
11492         }
11493
11494         /*
11495          * If we didn't find an unused CRTC, don't use any.
11496          */
11497         if (!crtc) {
11498                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11499                 ret = -ENODEV;
11500                 goto fail;
11501         }
11502
11503 found:
11504         intel_crtc = to_intel_crtc(crtc);
11505
11506         state = drm_atomic_state_alloc(dev);
11507         restore_state = drm_atomic_state_alloc(dev);
11508         if (!state || !restore_state) {
11509                 ret = -ENOMEM;
11510                 goto fail;
11511         }
11512
11513         state->acquire_ctx = ctx;
11514         restore_state->acquire_ctx = ctx;
11515
11516         connector_state = drm_atomic_get_connector_state(state, connector);
11517         if (IS_ERR(connector_state)) {
11518                 ret = PTR_ERR(connector_state);
11519                 goto fail;
11520         }
11521
11522         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11523         if (ret)
11524                 goto fail;
11525
11526         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11527         if (IS_ERR(crtc_state)) {
11528                 ret = PTR_ERR(crtc_state);
11529                 goto fail;
11530         }
11531
11532         crtc_state->uapi.active = true;
11533
11534         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11535                                            &load_detect_mode);
11536         if (ret)
11537                 goto fail;
11538
11539         ret = intel_modeset_disable_planes(state, crtc);
11540         if (ret)
11541                 goto fail;
11542
11543         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11544         if (!ret)
11545                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11546         if (!ret)
11547                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11548         if (ret) {
11549                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11550                 goto fail;
11551         }
11552
11553         ret = drm_atomic_commit(state);
11554         if (ret) {
11555                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11556                 goto fail;
11557         }
11558
11559         old->restore_state = restore_state;
11560         drm_atomic_state_put(state);
11561
11562         /* let the connector get through one full cycle before testing */
11563         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11564         return true;
11565
11566 fail:
11567         if (state) {
11568                 drm_atomic_state_put(state);
11569                 state = NULL;
11570         }
11571         if (restore_state) {
11572                 drm_atomic_state_put(restore_state);
11573                 restore_state = NULL;
11574         }
11575
11576         if (ret == -EDEADLK)
11577                 return ret;
11578
11579         return false;
11580 }
11581
11582 void intel_release_load_detect_pipe(struct drm_connector *connector,
11583                                     struct intel_load_detect_pipe *old,
11584                                     struct drm_modeset_acquire_ctx *ctx)
11585 {
11586         struct intel_encoder *intel_encoder =
11587                 intel_attached_encoder(connector);
11588         struct drm_encoder *encoder = &intel_encoder->base;
11589         struct drm_atomic_state *state = old->restore_state;
11590         int ret;
11591
11592         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11593                       connector->base.id, connector->name,
11594                       encoder->base.id, encoder->name);
11595
11596         if (!state)
11597                 return;
11598
11599         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11600         if (ret)
11601                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11602         drm_atomic_state_put(state);
11603 }
11604
11605 static int i9xx_pll_refclk(struct drm_device *dev,
11606                            const struct intel_crtc_state *pipe_config)
11607 {
11608         struct drm_i915_private *dev_priv = to_i915(dev);
11609         u32 dpll = pipe_config->dpll_hw_state.dpll;
11610
11611         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11612                 return dev_priv->vbt.lvds_ssc_freq;
11613         else if (HAS_PCH_SPLIT(dev_priv))
11614                 return 120000;
11615         else if (!IS_GEN(dev_priv, 2))
11616                 return 96000;
11617         else
11618                 return 48000;
11619 }
11620
11621 /* Returns the clock of the currently programmed mode of the given pipe. */
11622 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11623                                 struct intel_crtc_state *pipe_config)
11624 {
11625         struct drm_device *dev = crtc->base.dev;
11626         struct drm_i915_private *dev_priv = to_i915(dev);
11627         enum pipe pipe = crtc->pipe;
11628         u32 dpll = pipe_config->dpll_hw_state.dpll;
11629         u32 fp;
11630         struct dpll clock;
11631         int port_clock;
11632         int refclk = i9xx_pll_refclk(dev, pipe_config);
11633
11634         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11635                 fp = pipe_config->dpll_hw_state.fp0;
11636         else
11637                 fp = pipe_config->dpll_hw_state.fp1;
11638
11639         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11640         if (IS_PINEVIEW(dev_priv)) {
11641                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11642                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11643         } else {
11644                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11645                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11646         }
11647
11648         if (!IS_GEN(dev_priv, 2)) {
11649                 if (IS_PINEVIEW(dev_priv))
11650                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11651                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11652                 else
11653                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11654                                DPLL_FPA01_P1_POST_DIV_SHIFT);
11655
11656                 switch (dpll & DPLL_MODE_MASK) {
11657                 case DPLLB_MODE_DAC_SERIAL:
11658                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11659                                 5 : 10;
11660                         break;
11661                 case DPLLB_MODE_LVDS:
11662                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11663                                 7 : 14;
11664                         break;
11665                 default:
11666                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11667                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
11668                         return;
11669                 }
11670
11671                 if (IS_PINEVIEW(dev_priv))
11672                         port_clock = pnv_calc_dpll_params(refclk, &clock);
11673                 else
11674                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
11675         } else {
11676                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11677                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11678
11679                 if (is_lvds) {
11680                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11681                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
11682
11683                         if (lvds & LVDS_CLKB_POWER_UP)
11684                                 clock.p2 = 7;
11685                         else
11686                                 clock.p2 = 14;
11687                 } else {
11688                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
11689                                 clock.p1 = 2;
11690                         else {
11691                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11692                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11693                         }
11694                         if (dpll & PLL_P2_DIVIDE_BY_4)
11695                                 clock.p2 = 4;
11696                         else
11697                                 clock.p2 = 2;
11698                 }
11699
11700                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11701         }
11702
11703         /*
11704          * This value includes pixel_multiplier. We will use
11705          * port_clock to compute adjusted_mode.crtc_clock in the
11706          * encoder's get_config() function.
11707          */
11708         pipe_config->port_clock = port_clock;
11709 }
11710
11711 int intel_dotclock_calculate(int link_freq,
11712                              const struct intel_link_m_n *m_n)
11713 {
11714         /*
11715          * The calculation for the data clock is:
11716          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11717          * But we want to avoid losing precison if possible, so:
11718          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11719          *
11720          * and the link clock is simpler:
11721          * link_clock = (m * link_clock) / n
11722          */
11723
11724         if (!m_n->link_n)
11725                 return 0;
11726
11727         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11728 }
11729
11730 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11731                                    struct intel_crtc_state *pipe_config)
11732 {
11733         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11734
11735         /* read out port_clock from the DPLL */
11736         i9xx_crtc_clock_get(crtc, pipe_config);
11737
11738         /*
11739          * In case there is an active pipe without active ports,
11740          * we may need some idea for the dotclock anyway.
11741          * Calculate one based on the FDI configuration.
11742          */
11743         pipe_config->hw.adjusted_mode.crtc_clock =
11744                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11745                                          &pipe_config->fdi_m_n);
11746 }
11747
11748 /* Returns the currently programmed mode of the given encoder. */
11749 struct drm_display_mode *
11750 intel_encoder_current_mode(struct intel_encoder *encoder)
11751 {
11752         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11753         struct intel_crtc_state *crtc_state;
11754         struct drm_display_mode *mode;
11755         struct intel_crtc *crtc;
11756         enum pipe pipe;
11757
11758         if (!encoder->get_hw_state(encoder, &pipe))
11759                 return NULL;
11760
11761         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11762
11763         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11764         if (!mode)
11765                 return NULL;
11766
11767         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11768         if (!crtc_state) {
11769                 kfree(mode);
11770                 return NULL;
11771         }
11772
11773         crtc_state->uapi.crtc = &crtc->base;
11774
11775         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11776                 kfree(crtc_state);
11777                 kfree(mode);
11778                 return NULL;
11779         }
11780
11781         encoder->get_config(encoder, crtc_state);
11782
11783         intel_mode_from_pipe_config(mode, crtc_state);
11784
11785         kfree(crtc_state);
11786
11787         return mode;
11788 }
11789
11790 static void intel_crtc_destroy(struct drm_crtc *crtc)
11791 {
11792         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11793
11794         drm_crtc_cleanup(crtc);
11795         kfree(intel_crtc);
11796 }
11797
11798 /**
11799  * intel_wm_need_update - Check whether watermarks need updating
11800  * @cur: current plane state
11801  * @new: new plane state
11802  *
11803  * Check current plane state versus the new one to determine whether
11804  * watermarks need to be recalculated.
11805  *
11806  * Returns true or false.
11807  */
11808 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11809                                  struct intel_plane_state *new)
11810 {
11811         /* Update watermarks on tiling or size changes. */
11812         if (new->uapi.visible != cur->uapi.visible)
11813                 return true;
11814
11815         if (!cur->hw.fb || !new->hw.fb)
11816                 return false;
11817
11818         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
11819             cur->hw.rotation != new->hw.rotation ||
11820             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
11821             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
11822             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
11823             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
11824                 return true;
11825
11826         return false;
11827 }
11828
11829 static bool needs_scaling(const struct intel_plane_state *state)
11830 {
11831         int src_w = drm_rect_width(&state->uapi.src) >> 16;
11832         int src_h = drm_rect_height(&state->uapi.src) >> 16;
11833         int dst_w = drm_rect_width(&state->uapi.dst);
11834         int dst_h = drm_rect_height(&state->uapi.dst);
11835
11836         return (src_w != dst_w || src_h != dst_h);
11837 }
11838
11839 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11840                                     struct intel_crtc_state *crtc_state,
11841                                     const struct intel_plane_state *old_plane_state,
11842                                     struct intel_plane_state *plane_state)
11843 {
11844         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11845         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11846         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11847         bool mode_changed = needs_modeset(crtc_state);
11848         bool was_crtc_enabled = old_crtc_state->hw.active;
11849         bool is_crtc_enabled = crtc_state->hw.active;
11850         bool turn_off, turn_on, visible, was_visible;
11851         int ret;
11852
11853         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11854                 ret = skl_update_scaler_plane(crtc_state, plane_state);
11855                 if (ret)
11856                         return ret;
11857         }
11858
11859         was_visible = old_plane_state->uapi.visible;
11860         visible = plane_state->uapi.visible;
11861
11862         if (!was_crtc_enabled && WARN_ON(was_visible))
11863                 was_visible = false;
11864
11865         /*
11866          * Visibility is calculated as if the crtc was on, but
11867          * after scaler setup everything depends on it being off
11868          * when the crtc isn't active.
11869          *
11870          * FIXME this is wrong for watermarks. Watermarks should also
11871          * be computed as if the pipe would be active. Perhaps move
11872          * per-plane wm computation to the .check_plane() hook, and
11873          * only combine the results from all planes in the current place?
11874          */
11875         if (!is_crtc_enabled) {
11876                 plane_state->uapi.visible = visible = false;
11877                 crtc_state->active_planes &= ~BIT(plane->id);
11878                 crtc_state->data_rate[plane->id] = 0;
11879                 crtc_state->min_cdclk[plane->id] = 0;
11880         }
11881
11882         if (!was_visible && !visible)
11883                 return 0;
11884
11885         turn_off = was_visible && (!visible || mode_changed);
11886         turn_on = visible && (!was_visible || mode_changed);
11887
11888         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11889                          crtc->base.base.id, crtc->base.name,
11890                          plane->base.base.id, plane->base.name,
11891                          was_visible, visible,
11892                          turn_off, turn_on, mode_changed);
11893
11894         if (turn_on) {
11895                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11896                         crtc_state->update_wm_pre = true;
11897
11898                 /* must disable cxsr around plane enable/disable */
11899                 if (plane->id != PLANE_CURSOR)
11900                         crtc_state->disable_cxsr = true;
11901         } else if (turn_off) {
11902                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11903                         crtc_state->update_wm_post = true;
11904
11905                 /* must disable cxsr around plane enable/disable */
11906                 if (plane->id != PLANE_CURSOR)
11907                         crtc_state->disable_cxsr = true;
11908         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
11909                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11910                         /* FIXME bollocks */
11911                         crtc_state->update_wm_pre = true;
11912                         crtc_state->update_wm_post = true;
11913                 }
11914         }
11915
11916         if (visible || was_visible)
11917                 crtc_state->fb_bits |= plane->frontbuffer_bit;
11918
11919         /*
11920          * ILK/SNB DVSACNTR/Sprite Enable
11921          * IVB SPR_CTL/Sprite Enable
11922          * "When in Self Refresh Big FIFO mode, a write to enable the
11923          *  plane will be internally buffered and delayed while Big FIFO
11924          *  mode is exiting."
11925          *
11926          * Which means that enabling the sprite can take an extra frame
11927          * when we start in big FIFO mode (LP1+). Thus we need to drop
11928          * down to LP0 and wait for vblank in order to make sure the
11929          * sprite gets enabled on the next vblank after the register write.
11930          * Doing otherwise would risk enabling the sprite one frame after
11931          * we've already signalled flip completion. We can resume LP1+
11932          * once the sprite has been enabled.
11933          *
11934          *
11935          * WaCxSRDisabledForSpriteScaling:ivb
11936          * IVB SPR_SCALE/Scaling Enable
11937          * "Low Power watermarks must be disabled for at least one
11938          *  frame before enabling sprite scaling, and kept disabled
11939          *  until sprite scaling is disabled."
11940          *
11941          * ILK/SNB DVSASCALE/Scaling Enable
11942          * "When in Self Refresh Big FIFO mode, scaling enable will be
11943          *  masked off while Big FIFO mode is exiting."
11944          *
11945          * Despite the w/a only being listed for IVB we assume that
11946          * the ILK/SNB note has similar ramifications, hence we apply
11947          * the w/a on all three platforms.
11948          *
11949          * With experimental results seems this is needed also for primary
11950          * plane, not only sprite plane.
11951          */
11952         if (plane->id != PLANE_CURSOR &&
11953             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11954              IS_IVYBRIDGE(dev_priv)) &&
11955             (turn_on || (!needs_scaling(old_plane_state) &&
11956                          needs_scaling(plane_state))))
11957                 crtc_state->disable_lp_wm = true;
11958
11959         return 0;
11960 }
11961
11962 static bool encoders_cloneable(const struct intel_encoder *a,
11963                                const struct intel_encoder *b)
11964 {
11965         /* masks could be asymmetric, so check both ways */
11966         return a == b || (a->cloneable & (1 << b->type) &&
11967                           b->cloneable & (1 << a->type));
11968 }
11969
11970 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11971                                          struct intel_crtc *crtc,
11972                                          struct intel_encoder *encoder)
11973 {
11974         struct intel_encoder *source_encoder;
11975         struct drm_connector *connector;
11976         struct drm_connector_state *connector_state;
11977         int i;
11978
11979         for_each_new_connector_in_state(state, connector, connector_state, i) {
11980                 if (connector_state->crtc != &crtc->base)
11981                         continue;
11982
11983                 source_encoder =
11984                         to_intel_encoder(connector_state->best_encoder);
11985                 if (!encoders_cloneable(encoder, source_encoder))
11986                         return false;
11987         }
11988
11989         return true;
11990 }
11991
11992 static int icl_add_linked_planes(struct intel_atomic_state *state)
11993 {
11994         struct intel_plane *plane, *linked;
11995         struct intel_plane_state *plane_state, *linked_plane_state;
11996         int i;
11997
11998         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11999                 linked = plane_state->planar_linked_plane;
12000
12001                 if (!linked)
12002                         continue;
12003
12004                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
12005                 if (IS_ERR(linked_plane_state))
12006                         return PTR_ERR(linked_plane_state);
12007
12008                 WARN_ON(linked_plane_state->planar_linked_plane != plane);
12009                 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
12010         }
12011
12012         return 0;
12013 }
12014
12015 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12016 {
12017         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12018         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12019         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12020         struct intel_plane *plane, *linked;
12021         struct intel_plane_state *plane_state;
12022         int i;
12023
12024         if (INTEL_GEN(dev_priv) < 11)
12025                 return 0;
12026
12027         /*
12028          * Destroy all old plane links and make the slave plane invisible
12029          * in the crtc_state->active_planes mask.
12030          */
12031         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12032                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12033                         continue;
12034
12035                 plane_state->planar_linked_plane = NULL;
12036                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
12037                         crtc_state->active_planes &= ~BIT(plane->id);
12038                         crtc_state->update_planes |= BIT(plane->id);
12039                 }
12040
12041                 plane_state->planar_slave = false;
12042         }
12043
12044         if (!crtc_state->nv12_planes)
12045                 return 0;
12046
12047         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12048                 struct intel_plane_state *linked_state = NULL;
12049
12050                 if (plane->pipe != crtc->pipe ||
12051                     !(crtc_state->nv12_planes & BIT(plane->id)))
12052                         continue;
12053
12054                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12055                         if (!icl_is_nv12_y_plane(linked->id))
12056                                 continue;
12057
12058                         if (crtc_state->active_planes & BIT(linked->id))
12059                                 continue;
12060
12061                         linked_state = intel_atomic_get_plane_state(state, linked);
12062                         if (IS_ERR(linked_state))
12063                                 return PTR_ERR(linked_state);
12064
12065                         break;
12066                 }
12067
12068                 if (!linked_state) {
12069                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
12070                                       hweight8(crtc_state->nv12_planes));
12071
12072                         return -EINVAL;
12073                 }
12074
12075                 plane_state->planar_linked_plane = linked;
12076
12077                 linked_state->planar_slave = true;
12078                 linked_state->planar_linked_plane = plane;
12079                 crtc_state->active_planes |= BIT(linked->id);
12080                 crtc_state->update_planes |= BIT(linked->id);
12081                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
12082
12083                 /* Copy parameters to slave plane */
12084                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12085                 linked_state->color_ctl = plane_state->color_ctl;
12086                 linked_state->color_plane[0] = plane_state->color_plane[0];
12087
12088                 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12089                 linked_state->uapi.src = plane_state->uapi.src;
12090                 linked_state->uapi.dst = plane_state->uapi.dst;
12091
12092                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12093                         if (linked->id == PLANE_SPRITE5)
12094                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12095                         else if (linked->id == PLANE_SPRITE4)
12096                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12097                         else
12098                                 MISSING_CASE(linked->id);
12099                 }
12100         }
12101
12102         return 0;
12103 }
12104
12105 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12106 {
12107         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12108         struct intel_atomic_state *state =
12109                 to_intel_atomic_state(new_crtc_state->uapi.state);
12110         const struct intel_crtc_state *old_crtc_state =
12111                 intel_atomic_get_old_crtc_state(state, crtc);
12112
12113         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12114 }
12115
12116 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
12117 {
12118         struct drm_crtc *crtc = crtc_state->uapi.crtc;
12119         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12120         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12121         struct drm_connector *master_connector, *connector;
12122         struct drm_connector_state *connector_state;
12123         struct drm_connector_list_iter conn_iter;
12124         struct drm_crtc *master_crtc = NULL;
12125         struct drm_crtc_state *master_crtc_state;
12126         struct intel_crtc_state *master_pipe_config;
12127         int i, tile_group_id;
12128
12129         if (INTEL_GEN(dev_priv) < 11)
12130                 return 0;
12131
12132         /*
12133          * In case of tiled displays there could be one or more slaves but there is
12134          * only one master. Lets make the CRTC used by the connector corresponding
12135          * to the last horizonal and last vertical tile a master/genlock CRTC.
12136          * All the other CRTCs corresponding to other tiles of the same Tile group
12137          * are the slave CRTCs and hold a pointer to their genlock CRTC.
12138          */
12139         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12140                 if (connector_state->crtc != crtc)
12141                         continue;
12142                 if (!connector->has_tile)
12143                         continue;
12144                 if (crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
12145                     crtc_state->hw.mode.vdisplay != connector->tile_v_size)
12146                         return 0;
12147                 if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12148                     connector->tile_v_loc == connector->num_v_tile - 1)
12149                         continue;
12150                 crtc_state->sync_mode_slaves_mask = 0;
12151                 tile_group_id = connector->tile_group->id;
12152                 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12153                 drm_for_each_connector_iter(master_connector, &conn_iter) {
12154                         struct drm_connector_state *master_conn_state = NULL;
12155
12156                         if (!master_connector->has_tile)
12157                                 continue;
12158                         if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12159                             master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12160                                 continue;
12161                         if (master_connector->tile_group->id != tile_group_id)
12162                                 continue;
12163
12164                         master_conn_state = drm_atomic_get_connector_state(&state->base,
12165                                                                            master_connector);
12166                         if (IS_ERR(master_conn_state)) {
12167                                 drm_connector_list_iter_end(&conn_iter);
12168                                 return PTR_ERR(master_conn_state);
12169                         }
12170                         if (master_conn_state->crtc) {
12171                                 master_crtc = master_conn_state->crtc;
12172                                 break;
12173                         }
12174                 }
12175                 drm_connector_list_iter_end(&conn_iter);
12176
12177                 if (!master_crtc) {
12178                         DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
12179                                       connector_state->crtc->base.id);
12180                         return -EINVAL;
12181                 }
12182
12183                 master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12184                                                               master_crtc);
12185                 if (IS_ERR(master_crtc_state))
12186                         return PTR_ERR(master_crtc_state);
12187
12188                 master_pipe_config = to_intel_crtc_state(master_crtc_state);
12189                 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12190                 master_pipe_config->sync_mode_slaves_mask |=
12191                         BIT(crtc_state->cpu_transcoder);
12192                 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12193                               transcoder_name(crtc_state->master_transcoder),
12194                               crtc_state->uapi.crtc->base.id,
12195                               master_pipe_config->sync_mode_slaves_mask);
12196         }
12197
12198         return 0;
12199 }
12200
12201 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12202                                    struct intel_crtc *crtc)
12203 {
12204         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12205         struct intel_crtc_state *crtc_state =
12206                 intel_atomic_get_new_crtc_state(state, crtc);
12207         bool mode_changed = needs_modeset(crtc_state);
12208         int ret;
12209
12210         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12211             mode_changed && !crtc_state->hw.active)
12212                 crtc_state->update_wm_post = true;
12213
12214         if (mode_changed && crtc_state->hw.enable &&
12215             dev_priv->display.crtc_compute_clock &&
12216             !WARN_ON(crtc_state->shared_dpll)) {
12217                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12218                 if (ret)
12219                         return ret;
12220         }
12221
12222         /*
12223          * May need to update pipe gamma enable bits
12224          * when C8 planes are getting enabled/disabled.
12225          */
12226         if (c8_planes_changed(crtc_state))
12227                 crtc_state->uapi.color_mgmt_changed = true;
12228
12229         if (mode_changed || crtc_state->update_pipe ||
12230             crtc_state->uapi.color_mgmt_changed) {
12231                 ret = intel_color_check(crtc_state);
12232                 if (ret)
12233                         return ret;
12234         }
12235
12236         ret = 0;
12237         if (dev_priv->display.compute_pipe_wm) {
12238                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12239                 if (ret) {
12240                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12241                         return ret;
12242                 }
12243         }
12244
12245         if (dev_priv->display.compute_intermediate_wm) {
12246                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12247                         return 0;
12248
12249                 /*
12250                  * Calculate 'intermediate' watermarks that satisfy both the
12251                  * old state and the new state.  We can program these
12252                  * immediately.
12253                  */
12254                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12255                 if (ret) {
12256                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12257                         return ret;
12258                 }
12259         }
12260
12261         if (INTEL_GEN(dev_priv) >= 9) {
12262                 if (mode_changed || crtc_state->update_pipe)
12263                         ret = skl_update_scaler_crtc(crtc_state);
12264                 if (!ret)
12265                         ret = intel_atomic_setup_scalers(dev_priv, crtc,
12266                                                          crtc_state);
12267         }
12268
12269         if (HAS_IPS(dev_priv))
12270                 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
12271
12272         return ret;
12273 }
12274
12275 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12276 {
12277         struct intel_connector *connector;
12278         struct drm_connector_list_iter conn_iter;
12279
12280         drm_connector_list_iter_begin(dev, &conn_iter);
12281         for_each_intel_connector_iter(connector, &conn_iter) {
12282                 if (connector->base.state->crtc)
12283                         drm_connector_put(&connector->base);
12284
12285                 if (connector->base.encoder) {
12286                         connector->base.state->best_encoder =
12287                                 connector->base.encoder;
12288                         connector->base.state->crtc =
12289                                 connector->base.encoder->crtc;
12290
12291                         drm_connector_get(&connector->base);
12292                 } else {
12293                         connector->base.state->best_encoder = NULL;
12294                         connector->base.state->crtc = NULL;
12295                 }
12296         }
12297         drm_connector_list_iter_end(&conn_iter);
12298 }
12299
12300 static int
12301 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12302                       struct intel_crtc_state *pipe_config)
12303 {
12304         struct drm_connector *connector = conn_state->connector;
12305         const struct drm_display_info *info = &connector->display_info;
12306         int bpp;
12307
12308         switch (conn_state->max_bpc) {
12309         case 6 ... 7:
12310                 bpp = 6 * 3;
12311                 break;
12312         case 8 ... 9:
12313                 bpp = 8 * 3;
12314                 break;
12315         case 10 ... 11:
12316                 bpp = 10 * 3;
12317                 break;
12318         case 12:
12319                 bpp = 12 * 3;
12320                 break;
12321         default:
12322                 return -EINVAL;
12323         }
12324
12325         if (bpp < pipe_config->pipe_bpp) {
12326                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12327                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12328                               connector->base.id, connector->name,
12329                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
12330                               pipe_config->pipe_bpp);
12331
12332                 pipe_config->pipe_bpp = bpp;
12333         }
12334
12335         return 0;
12336 }
12337
12338 static int
12339 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12340                           struct intel_crtc_state *pipe_config)
12341 {
12342         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12343         struct drm_atomic_state *state = pipe_config->uapi.state;
12344         struct drm_connector *connector;
12345         struct drm_connector_state *connector_state;
12346         int bpp, i;
12347
12348         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12349             IS_CHERRYVIEW(dev_priv)))
12350                 bpp = 10*3;
12351         else if (INTEL_GEN(dev_priv) >= 5)
12352                 bpp = 12*3;
12353         else
12354                 bpp = 8*3;
12355
12356         pipe_config->pipe_bpp = bpp;
12357
12358         /* Clamp display bpp to connector max bpp */
12359         for_each_new_connector_in_state(state, connector, connector_state, i) {
12360                 int ret;
12361
12362                 if (connector_state->crtc != &crtc->base)
12363                         continue;
12364
12365                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12366                 if (ret)
12367                         return ret;
12368         }
12369
12370         return 0;
12371 }
12372
12373 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12374 {
12375         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12376                       "type: 0x%x flags: 0x%x\n",
12377                       mode->crtc_clock,
12378                       mode->crtc_hdisplay, mode->crtc_hsync_start,
12379                       mode->crtc_hsync_end, mode->crtc_htotal,
12380                       mode->crtc_vdisplay, mode->crtc_vsync_start,
12381                       mode->crtc_vsync_end, mode->crtc_vtotal,
12382                       mode->type, mode->flags);
12383 }
12384
12385 static inline void
12386 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12387                       const char *id, unsigned int lane_count,
12388                       const struct intel_link_m_n *m_n)
12389 {
12390         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12391                       id, lane_count,
12392                       m_n->gmch_m, m_n->gmch_n,
12393                       m_n->link_m, m_n->link_n, m_n->tu);
12394 }
12395
12396 static void
12397 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12398                      const union hdmi_infoframe *frame)
12399 {
12400         if ((drm_debug & DRM_UT_KMS) == 0)
12401                 return;
12402
12403         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12404 }
12405
12406 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12407
12408 static const char * const output_type_str[] = {
12409         OUTPUT_TYPE(UNUSED),
12410         OUTPUT_TYPE(ANALOG),
12411         OUTPUT_TYPE(DVO),
12412         OUTPUT_TYPE(SDVO),
12413         OUTPUT_TYPE(LVDS),
12414         OUTPUT_TYPE(TVOUT),
12415         OUTPUT_TYPE(HDMI),
12416         OUTPUT_TYPE(DP),
12417         OUTPUT_TYPE(EDP),
12418         OUTPUT_TYPE(DSI),
12419         OUTPUT_TYPE(DDI),
12420         OUTPUT_TYPE(DP_MST),
12421 };
12422
12423 #undef OUTPUT_TYPE
12424
12425 static void snprintf_output_types(char *buf, size_t len,
12426                                   unsigned int output_types)
12427 {
12428         char *str = buf;
12429         int i;
12430
12431         str[0] = '\0';
12432
12433         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12434                 int r;
12435
12436                 if ((output_types & BIT(i)) == 0)
12437                         continue;
12438
12439                 r = snprintf(str, len, "%s%s",
12440                              str != buf ? "," : "", output_type_str[i]);
12441                 if (r >= len)
12442                         break;
12443                 str += r;
12444                 len -= r;
12445
12446                 output_types &= ~BIT(i);
12447         }
12448
12449         WARN_ON_ONCE(output_types != 0);
12450 }
12451
12452 static const char * const output_format_str[] = {
12453         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12454         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12455         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12456         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12457 };
12458
12459 static const char *output_formats(enum intel_output_format format)
12460 {
12461         if (format >= ARRAY_SIZE(output_format_str))
12462                 format = INTEL_OUTPUT_FORMAT_INVALID;
12463         return output_format_str[format];
12464 }
12465
12466 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12467 {
12468         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12469         const struct drm_framebuffer *fb = plane_state->hw.fb;
12470         struct drm_format_name_buf format_name;
12471
12472         if (!fb) {
12473                 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12474                               plane->base.base.id, plane->base.name,
12475                               yesno(plane_state->uapi.visible));
12476                 return;
12477         }
12478
12479         DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12480                       plane->base.base.id, plane->base.name,
12481                       fb->base.id, fb->width, fb->height,
12482                       drm_get_format_name(fb->format->format, &format_name),
12483                       yesno(plane_state->uapi.visible));
12484         DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12485                       plane_state->hw.rotation, plane_state->scaler_id);
12486         if (plane_state->uapi.visible)
12487                 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12488                               DRM_RECT_FP_ARG(&plane_state->uapi.src),
12489                               DRM_RECT_ARG(&plane_state->uapi.dst));
12490 }
12491
12492 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12493                                    struct intel_atomic_state *state,
12494                                    const char *context)
12495 {
12496         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12497         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12498         const struct intel_plane_state *plane_state;
12499         struct intel_plane *plane;
12500         char buf[64];
12501         int i;
12502
12503         DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12504                       crtc->base.base.id, crtc->base.name,
12505                       yesno(pipe_config->hw.enable), context);
12506
12507         if (!pipe_config->hw.enable)
12508                 goto dump_planes;
12509
12510         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12511         DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12512                       yesno(pipe_config->hw.active),
12513                       buf, pipe_config->output_types,
12514                       output_formats(pipe_config->output_format));
12515
12516         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12517                       transcoder_name(pipe_config->cpu_transcoder),
12518                       pipe_config->pipe_bpp, pipe_config->dither);
12519
12520         if (pipe_config->has_pch_encoder)
12521                 intel_dump_m_n_config(pipe_config, "fdi",
12522                                       pipe_config->fdi_lanes,
12523                                       &pipe_config->fdi_m_n);
12524
12525         if (intel_crtc_has_dp_encoder(pipe_config)) {
12526                 intel_dump_m_n_config(pipe_config, "dp m_n",
12527                                 pipe_config->lane_count, &pipe_config->dp_m_n);
12528                 if (pipe_config->has_drrs)
12529                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
12530                                               pipe_config->lane_count,
12531                                               &pipe_config->dp_m2_n2);
12532         }
12533
12534         DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12535                       pipe_config->has_audio, pipe_config->has_infoframe,
12536                       pipe_config->infoframes.enable);
12537
12538         if (pipe_config->infoframes.enable &
12539             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12540                 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12541         if (pipe_config->infoframes.enable &
12542             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12543                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12544         if (pipe_config->infoframes.enable &
12545             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12546                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12547         if (pipe_config->infoframes.enable &
12548             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12549                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12550
12551         DRM_DEBUG_KMS("requested mode:\n");
12552         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12553         DRM_DEBUG_KMS("adjusted mode:\n");
12554         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12555         intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
12556         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12557                       pipe_config->port_clock,
12558                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12559                       pipe_config->pixel_rate);
12560
12561         if (INTEL_GEN(dev_priv) >= 9)
12562                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12563                               crtc->num_scalers,
12564                               pipe_config->scaler_state.scaler_users,
12565                               pipe_config->scaler_state.scaler_id);
12566
12567         if (HAS_GMCH(dev_priv))
12568                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12569                               pipe_config->gmch_pfit.control,
12570                               pipe_config->gmch_pfit.pgm_ratios,
12571                               pipe_config->gmch_pfit.lvds_border_bits);
12572         else
12573                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12574                               pipe_config->pch_pfit.pos,
12575                               pipe_config->pch_pfit.size,
12576                               enableddisabled(pipe_config->pch_pfit.enabled),
12577                               yesno(pipe_config->pch_pfit.force_thru));
12578
12579         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12580                       pipe_config->ips_enabled, pipe_config->double_wide);
12581
12582         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12583
12584         if (IS_CHERRYVIEW(dev_priv))
12585                 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12586                               pipe_config->cgm_mode, pipe_config->gamma_mode,
12587                               pipe_config->gamma_enable, pipe_config->csc_enable);
12588         else
12589                 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12590                               pipe_config->csc_mode, pipe_config->gamma_mode,
12591                               pipe_config->gamma_enable, pipe_config->csc_enable);
12592
12593 dump_planes:
12594         if (!state)
12595                 return;
12596
12597         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12598                 if (plane->pipe == crtc->pipe)
12599                         intel_dump_plane_state(plane_state);
12600         }
12601 }
12602
12603 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12604 {
12605         struct drm_device *dev = state->base.dev;
12606         struct drm_connector *connector;
12607         struct drm_connector_list_iter conn_iter;
12608         unsigned int used_ports = 0;
12609         unsigned int used_mst_ports = 0;
12610         bool ret = true;
12611
12612         /*
12613          * We're going to peek into connector->state,
12614          * hence connection_mutex must be held.
12615          */
12616         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12617
12618         /*
12619          * Walk the connector list instead of the encoder
12620          * list to detect the problem on ddi platforms
12621          * where there's just one encoder per digital port.
12622          */
12623         drm_connector_list_iter_begin(dev, &conn_iter);
12624         drm_for_each_connector_iter(connector, &conn_iter) {
12625                 struct drm_connector_state *connector_state;
12626                 struct intel_encoder *encoder;
12627
12628                 connector_state =
12629                         drm_atomic_get_new_connector_state(&state->base,
12630                                                            connector);
12631                 if (!connector_state)
12632                         connector_state = connector->state;
12633
12634                 if (!connector_state->best_encoder)
12635                         continue;
12636
12637                 encoder = to_intel_encoder(connector_state->best_encoder);
12638
12639                 WARN_ON(!connector_state->crtc);
12640
12641                 switch (encoder->type) {
12642                         unsigned int port_mask;
12643                 case INTEL_OUTPUT_DDI:
12644                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
12645                                 break;
12646                         /* else, fall through */
12647                 case INTEL_OUTPUT_DP:
12648                 case INTEL_OUTPUT_HDMI:
12649                 case INTEL_OUTPUT_EDP:
12650                         port_mask = 1 << encoder->port;
12651
12652                         /* the same port mustn't appear more than once */
12653                         if (used_ports & port_mask)
12654                                 ret = false;
12655
12656                         used_ports |= port_mask;
12657                         break;
12658                 case INTEL_OUTPUT_DP_MST:
12659                         used_mst_ports |=
12660                                 1 << encoder->port;
12661                         break;
12662                 default:
12663                         break;
12664                 }
12665         }
12666         drm_connector_list_iter_end(&conn_iter);
12667
12668         /* can't mix MST and SST/HDMI on the same port */
12669         if (used_ports & used_mst_ports)
12670                 return false;
12671
12672         return ret;
12673 }
12674
12675 static void
12676 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
12677 {
12678         intel_crtc_copy_color_blobs(crtc_state);
12679 }
12680
12681 static void
12682 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
12683 {
12684         crtc_state->hw.enable = crtc_state->uapi.enable;
12685         crtc_state->hw.active = crtc_state->uapi.active;
12686         crtc_state->hw.mode = crtc_state->uapi.mode;
12687         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
12688         intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
12689 }
12690
12691 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
12692 {
12693         crtc_state->uapi.enable = crtc_state->hw.enable;
12694         crtc_state->uapi.active = crtc_state->hw.active;
12695         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
12696
12697         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
12698
12699         /* copy color blobs to uapi */
12700         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
12701                                   crtc_state->hw.degamma_lut);
12702         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
12703                                   crtc_state->hw.gamma_lut);
12704         drm_property_replace_blob(&crtc_state->uapi.ctm,
12705                                   crtc_state->hw.ctm);
12706 }
12707
12708 static int
12709 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
12710 {
12711         struct drm_i915_private *dev_priv =
12712                 to_i915(crtc_state->uapi.crtc->dev);
12713         struct intel_crtc_state *saved_state;
12714
12715         saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12716         if (!saved_state)
12717                 return -ENOMEM;
12718
12719         /* free the old crtc_state->hw members */
12720         intel_crtc_free_hw_state(crtc_state);
12721
12722         /* FIXME: before the switch to atomic started, a new pipe_config was
12723          * kzalloc'd. Code that depends on any field being zero should be
12724          * fixed, so that the crtc_state can be safely duplicated. For now,
12725          * only fields that are know to not cause problems are preserved. */
12726
12727         saved_state->uapi = crtc_state->uapi;
12728         saved_state->scaler_state = crtc_state->scaler_state;
12729         saved_state->shared_dpll = crtc_state->shared_dpll;
12730         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12731         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12732                sizeof(saved_state->icl_port_dplls));
12733         saved_state->crc_enabled = crtc_state->crc_enabled;
12734         if (IS_G4X(dev_priv) ||
12735             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12736                 saved_state->wm = crtc_state->wm;
12737         /*
12738          * Save the slave bitmask which gets filled for master crtc state during
12739          * slave atomic check call.
12740          */
12741         if (is_trans_port_sync_master(crtc_state))
12742                 saved_state->sync_mode_slaves_mask =
12743                         crtc_state->sync_mode_slaves_mask;
12744
12745         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12746         kfree(saved_state);
12747
12748         intel_crtc_copy_uapi_to_hw_state(crtc_state);
12749
12750         return 0;
12751 }
12752
12753 static int
12754 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12755 {
12756         struct drm_crtc *crtc = pipe_config->uapi.crtc;
12757         struct drm_atomic_state *state = pipe_config->uapi.state;
12758         struct intel_encoder *encoder;
12759         struct drm_connector *connector;
12760         struct drm_connector_state *connector_state;
12761         int base_bpp, ret;
12762         int i;
12763         bool retry = true;
12764
12765         pipe_config->cpu_transcoder =
12766                 (enum transcoder) to_intel_crtc(crtc)->pipe;
12767
12768         /*
12769          * Sanitize sync polarity flags based on requested ones. If neither
12770          * positive or negative polarity is requested, treat this as meaning
12771          * negative polarity.
12772          */
12773         if (!(pipe_config->hw.adjusted_mode.flags &
12774               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12775                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12776
12777         if (!(pipe_config->hw.adjusted_mode.flags &
12778               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12779                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12780
12781         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12782                                         pipe_config);
12783         if (ret)
12784                 return ret;
12785
12786         base_bpp = pipe_config->pipe_bpp;
12787
12788         /*
12789          * Determine the real pipe dimensions. Note that stereo modes can
12790          * increase the actual pipe size due to the frame doubling and
12791          * insertion of additional space for blanks between the frame. This
12792          * is stored in the crtc timings. We use the requested mode to do this
12793          * computation to clearly distinguish it from the adjusted mode, which
12794          * can be changed by the connectors in the below retry loop.
12795          */
12796         drm_mode_get_hv_timing(&pipe_config->hw.mode,
12797                                &pipe_config->pipe_src_w,
12798                                &pipe_config->pipe_src_h);
12799
12800         for_each_new_connector_in_state(state, connector, connector_state, i) {
12801                 if (connector_state->crtc != crtc)
12802                         continue;
12803
12804                 encoder = to_intel_encoder(connector_state->best_encoder);
12805
12806                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12807                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12808                         return -EINVAL;
12809                 }
12810
12811                 /*
12812                  * Determine output_types before calling the .compute_config()
12813                  * hooks so that the hooks can use this information safely.
12814                  */
12815                 if (encoder->compute_output_type)
12816                         pipe_config->output_types |=
12817                                 BIT(encoder->compute_output_type(encoder, pipe_config,
12818                                                                  connector_state));
12819                 else
12820                         pipe_config->output_types |= BIT(encoder->type);
12821         }
12822
12823 encoder_retry:
12824         /* Ensure the port clock defaults are reset when retrying. */
12825         pipe_config->port_clock = 0;
12826         pipe_config->pixel_multiplier = 1;
12827
12828         /* Fill in default crtc timings, allow encoders to overwrite them. */
12829         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
12830                               CRTC_STEREO_DOUBLE);
12831
12832         /* Set the crtc_state defaults for trans_port_sync */
12833         pipe_config->master_transcoder = INVALID_TRANSCODER;
12834         ret = icl_add_sync_mode_crtcs(pipe_config);
12835         if (ret) {
12836                 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
12837                               ret);
12838                 return ret;
12839         }
12840
12841         /* Pass our mode to the connectors and the CRTC to give them a chance to
12842          * adjust it according to limitations or connector properties, and also
12843          * a chance to reject the mode entirely.
12844          */
12845         for_each_new_connector_in_state(state, connector, connector_state, i) {
12846                 if (connector_state->crtc != crtc)
12847                         continue;
12848
12849                 encoder = to_intel_encoder(connector_state->best_encoder);
12850                 ret = encoder->compute_config(encoder, pipe_config,
12851                                               connector_state);
12852                 if (ret < 0) {
12853                         if (ret != -EDEADLK)
12854                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12855                                               ret);
12856                         return ret;
12857                 }
12858         }
12859
12860         /* Set default port clock if not overwritten by the encoder. Needs to be
12861          * done afterwards in case the encoder adjusts the mode. */
12862         if (!pipe_config->port_clock)
12863                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
12864                         * pipe_config->pixel_multiplier;
12865
12866         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12867         if (ret == -EDEADLK)
12868                 return ret;
12869         if (ret < 0) {
12870                 DRM_DEBUG_KMS("CRTC fixup failed\n");
12871                 return ret;
12872         }
12873
12874         if (ret == RETRY) {
12875                 if (WARN(!retry, "loop in pipe configuration computation\n"))
12876                         return -EINVAL;
12877
12878                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12879                 retry = false;
12880                 goto encoder_retry;
12881         }
12882
12883         /* Dithering seems to not pass-through bits correctly when it should, so
12884          * only enable it on 6bpc panels and when its not a compliance
12885          * test requesting 6bpc video pattern.
12886          */
12887         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12888                 !pipe_config->dither_force_disable;
12889         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12890                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12891
12892         /*
12893          * Make drm_calc_timestamping_constants in
12894          * drm_atomic_helper_update_legacy_modeset_state() happy
12895          */
12896         pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
12897
12898         return 0;
12899 }
12900
12901 bool intel_fuzzy_clock_check(int clock1, int clock2)
12902 {
12903         int diff;
12904
12905         if (clock1 == clock2)
12906                 return true;
12907
12908         if (!clock1 || !clock2)
12909                 return false;
12910
12911         diff = abs(clock1 - clock2);
12912
12913         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12914                 return true;
12915
12916         return false;
12917 }
12918
12919 static bool
12920 intel_compare_m_n(unsigned int m, unsigned int n,
12921                   unsigned int m2, unsigned int n2,
12922                   bool exact)
12923 {
12924         if (m == m2 && n == n2)
12925                 return true;
12926
12927         if (exact || !m || !n || !m2 || !n2)
12928                 return false;
12929
12930         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12931
12932         if (n > n2) {
12933                 while (n > n2) {
12934                         m2 <<= 1;
12935                         n2 <<= 1;
12936                 }
12937         } else if (n < n2) {
12938                 while (n < n2) {
12939                         m <<= 1;
12940                         n <<= 1;
12941                 }
12942         }
12943
12944         if (n != n2)
12945                 return false;
12946
12947         return intel_fuzzy_clock_check(m, m2);
12948 }
12949
12950 static bool
12951 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12952                        const struct intel_link_m_n *m2_n2,
12953                        bool exact)
12954 {
12955         return m_n->tu == m2_n2->tu &&
12956                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12957                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12958                 intel_compare_m_n(m_n->link_m, m_n->link_n,
12959                                   m2_n2->link_m, m2_n2->link_n, exact);
12960 }
12961
12962 static bool
12963 intel_compare_infoframe(const union hdmi_infoframe *a,
12964                         const union hdmi_infoframe *b)
12965 {
12966         return memcmp(a, b, sizeof(*a)) == 0;
12967 }
12968
12969 static void
12970 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12971                                bool fastset, const char *name,
12972                                const union hdmi_infoframe *a,
12973                                const union hdmi_infoframe *b)
12974 {
12975         if (fastset) {
12976                 if ((drm_debug & DRM_UT_KMS) == 0)
12977                         return;
12978
12979                 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
12980                 DRM_DEBUG_KMS("expected:\n");
12981                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12982                 DRM_DEBUG_KMS("found:\n");
12983                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12984         } else {
12985                 DRM_ERROR("mismatch in %s infoframe\n", name);
12986                 DRM_ERROR("expected:\n");
12987                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12988                 DRM_ERROR("found:\n");
12989                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12990         }
12991 }
12992
12993 static void __printf(4, 5)
12994 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
12995                      const char *name, const char *format, ...)
12996 {
12997         struct va_format vaf;
12998         va_list args;
12999
13000         va_start(args, format);
13001         vaf.fmt = format;
13002         vaf.va = &args;
13003
13004         if (fastset)
13005                 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13006                               crtc->base.base.id, crtc->base.name, name, &vaf);
13007         else
13008                 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
13009                           crtc->base.base.id, crtc->base.name, name, &vaf);
13010
13011         va_end(args);
13012 }
13013
13014 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13015 {
13016         if (i915_modparams.fastboot != -1)
13017                 return i915_modparams.fastboot;
13018
13019         /* Enable fastboot by default on Skylake and newer */
13020         if (INTEL_GEN(dev_priv) >= 9)
13021                 return true;
13022
13023         /* Enable fastboot by default on VLV and CHV */
13024         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13025                 return true;
13026
13027         /* Disabled by default on all others */
13028         return false;
13029 }
13030
13031 static bool
13032 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13033                           const struct intel_crtc_state *pipe_config,
13034                           bool fastset)
13035 {
13036         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13037         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13038         bool ret = true;
13039         u32 bp_gamma = 0;
13040         bool fixup_inherited = fastset &&
13041                 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
13042                 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
13043
13044         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13045                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
13046                 ret = false;
13047         }
13048
13049 #define PIPE_CONF_CHECK_X(name) do { \
13050         if (current_config->name != pipe_config->name) { \
13051                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13052                                      "(expected 0x%08x, found 0x%08x)", \
13053                                      current_config->name, \
13054                                      pipe_config->name); \
13055                 ret = false; \
13056         } \
13057 } while (0)
13058
13059 #define PIPE_CONF_CHECK_I(name) do { \
13060         if (current_config->name != pipe_config->name) { \
13061                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13062                                      "(expected %i, found %i)", \
13063                                      current_config->name, \
13064                                      pipe_config->name); \
13065                 ret = false; \
13066         } \
13067 } while (0)
13068
13069 #define PIPE_CONF_CHECK_BOOL(name) do { \
13070         if (current_config->name != pipe_config->name) { \
13071                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13072                                      "(expected %s, found %s)", \
13073                                      yesno(current_config->name), \
13074                                      yesno(pipe_config->name)); \
13075                 ret = false; \
13076         } \
13077 } while (0)
13078
13079 /*
13080  * Checks state where we only read out the enabling, but not the entire
13081  * state itself (like full infoframes or ELD for audio). These states
13082  * require a full modeset on bootup to fix up.
13083  */
13084 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13085         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13086                 PIPE_CONF_CHECK_BOOL(name); \
13087         } else { \
13088                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13089                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13090                                      yesno(current_config->name), \
13091                                      yesno(pipe_config->name)); \
13092                 ret = false; \
13093         } \
13094 } while (0)
13095
13096 #define PIPE_CONF_CHECK_P(name) do { \
13097         if (current_config->name != pipe_config->name) { \
13098                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13099                                      "(expected %p, found %p)", \
13100                                      current_config->name, \
13101                                      pipe_config->name); \
13102                 ret = false; \
13103         } \
13104 } while (0)
13105
13106 #define PIPE_CONF_CHECK_M_N(name) do { \
13107         if (!intel_compare_link_m_n(&current_config->name, \
13108                                     &pipe_config->name,\
13109                                     !fastset)) { \
13110                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13111                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13112                                      "found tu %i, gmch %i/%i link %i/%i)", \
13113                                      current_config->name.tu, \
13114                                      current_config->name.gmch_m, \
13115                                      current_config->name.gmch_n, \
13116                                      current_config->name.link_m, \
13117                                      current_config->name.link_n, \
13118                                      pipe_config->name.tu, \
13119                                      pipe_config->name.gmch_m, \
13120                                      pipe_config->name.gmch_n, \
13121                                      pipe_config->name.link_m, \
13122                                      pipe_config->name.link_n); \
13123                 ret = false; \
13124         } \
13125 } while (0)
13126
13127 /* This is required for BDW+ where there is only one set of registers for
13128  * switching between high and low RR.
13129  * This macro can be used whenever a comparison has to be made between one
13130  * hw state and multiple sw state variables.
13131  */
13132 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13133         if (!intel_compare_link_m_n(&current_config->name, \
13134                                     &pipe_config->name, !fastset) && \
13135             !intel_compare_link_m_n(&current_config->alt_name, \
13136                                     &pipe_config->name, !fastset)) { \
13137                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13138                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13139                                      "or tu %i gmch %i/%i link %i/%i, " \
13140                                      "found tu %i, gmch %i/%i link %i/%i)", \
13141                                      current_config->name.tu, \
13142                                      current_config->name.gmch_m, \
13143                                      current_config->name.gmch_n, \
13144                                      current_config->name.link_m, \
13145                                      current_config->name.link_n, \
13146                                      current_config->alt_name.tu, \
13147                                      current_config->alt_name.gmch_m, \
13148                                      current_config->alt_name.gmch_n, \
13149                                      current_config->alt_name.link_m, \
13150                                      current_config->alt_name.link_n, \
13151                                      pipe_config->name.tu, \
13152                                      pipe_config->name.gmch_m, \
13153                                      pipe_config->name.gmch_n, \
13154                                      pipe_config->name.link_m, \
13155                                      pipe_config->name.link_n); \
13156                 ret = false; \
13157         } \
13158 } while (0)
13159
13160 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13161         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13162                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13163                                      "(%x) (expected %i, found %i)", \
13164                                      (mask), \
13165                                      current_config->name & (mask), \
13166                                      pipe_config->name & (mask)); \
13167                 ret = false; \
13168         } \
13169 } while (0)
13170
13171 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13172         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13173                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13174                                      "(expected %i, found %i)", \
13175                                      current_config->name, \
13176                                      pipe_config->name); \
13177                 ret = false; \
13178         } \
13179 } while (0)
13180
13181 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13182         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13183                                      &pipe_config->infoframes.name)) { \
13184                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13185                                                &current_config->infoframes.name, \
13186                                                &pipe_config->infoframes.name); \
13187                 ret = false; \
13188         } \
13189 } while (0)
13190
13191 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13192         if (current_config->name1 != pipe_config->name1) { \
13193                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13194                                 "(expected %i, found %i, won't compare lut values)", \
13195                                 current_config->name1, \
13196                                 pipe_config->name1); \
13197                 ret = false;\
13198         } else { \
13199                 if (!intel_color_lut_equal(current_config->name2, \
13200                                         pipe_config->name2, pipe_config->name1, \
13201                                         bit_precision)) { \
13202                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13203                                         "hw_state doesn't match sw_state"); \
13204                         ret = false; \
13205                 } \
13206         } \
13207 } while (0)
13208
13209 #define PIPE_CONF_QUIRK(quirk) \
13210         ((current_config->quirks | pipe_config->quirks) & (quirk))
13211
13212         PIPE_CONF_CHECK_I(cpu_transcoder);
13213
13214         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13215         PIPE_CONF_CHECK_I(fdi_lanes);
13216         PIPE_CONF_CHECK_M_N(fdi_m_n);
13217
13218         PIPE_CONF_CHECK_I(lane_count);
13219         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13220
13221         if (INTEL_GEN(dev_priv) < 8) {
13222                 PIPE_CONF_CHECK_M_N(dp_m_n);
13223
13224                 if (current_config->has_drrs)
13225                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13226         } else
13227                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13228
13229         PIPE_CONF_CHECK_X(output_types);
13230
13231         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13232         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13233         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13234         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13235         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13236         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13237
13238         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13239         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13240         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13241         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13242         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13243         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13244
13245         PIPE_CONF_CHECK_I(pixel_multiplier);
13246         PIPE_CONF_CHECK_I(output_format);
13247         PIPE_CONF_CHECK_I(dc3co_exitline);
13248         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13249         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13250             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13251                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13252
13253         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13254         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13255         PIPE_CONF_CHECK_BOOL(has_infoframe);
13256         PIPE_CONF_CHECK_BOOL(fec_enable);
13257
13258         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13259
13260         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13261                               DRM_MODE_FLAG_INTERLACE);
13262
13263         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13264                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13265                                       DRM_MODE_FLAG_PHSYNC);
13266                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13267                                       DRM_MODE_FLAG_NHSYNC);
13268                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13269                                       DRM_MODE_FLAG_PVSYNC);
13270                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13271                                       DRM_MODE_FLAG_NVSYNC);
13272         }
13273
13274         PIPE_CONF_CHECK_X(gmch_pfit.control);
13275         /* pfit ratios are autocomputed by the hw on gen4+ */
13276         if (INTEL_GEN(dev_priv) < 4)
13277                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13278         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13279
13280         /*
13281          * Changing the EDP transcoder input mux
13282          * (A_ONOFF vs. A_ON) requires a full modeset.
13283          */
13284         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13285
13286         if (!fastset) {
13287                 PIPE_CONF_CHECK_I(pipe_src_w);
13288                 PIPE_CONF_CHECK_I(pipe_src_h);
13289
13290                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13291                 if (current_config->pch_pfit.enabled) {
13292                         PIPE_CONF_CHECK_X(pch_pfit.pos);
13293                         PIPE_CONF_CHECK_X(pch_pfit.size);
13294                 }
13295
13296                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13297                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13298
13299                 PIPE_CONF_CHECK_X(gamma_mode);
13300                 if (IS_CHERRYVIEW(dev_priv))
13301                         PIPE_CONF_CHECK_X(cgm_mode);
13302                 else
13303                         PIPE_CONF_CHECK_X(csc_mode);
13304                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13305                 PIPE_CONF_CHECK_BOOL(csc_enable);
13306
13307                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13308                 if (bp_gamma)
13309                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13310
13311         }
13312
13313         PIPE_CONF_CHECK_BOOL(double_wide);
13314
13315         PIPE_CONF_CHECK_P(shared_dpll);
13316         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13317         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13318         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13319         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13320         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13321         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13322         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13323         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13324         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13325         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13326         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13327         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13328         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13329         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13330         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13331         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13332         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13333         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13334         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13335         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13336         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13337         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13338         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13339         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13340         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13341         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13342         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13343         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13344         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13345         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13346         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13347
13348         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13349         PIPE_CONF_CHECK_X(dsi_pll.div);
13350
13351         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13352                 PIPE_CONF_CHECK_I(pipe_bpp);
13353
13354         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13355         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13356
13357         PIPE_CONF_CHECK_I(min_voltage_level);
13358
13359         PIPE_CONF_CHECK_X(infoframes.enable);
13360         PIPE_CONF_CHECK_X(infoframes.gcp);
13361         PIPE_CONF_CHECK_INFOFRAME(avi);
13362         PIPE_CONF_CHECK_INFOFRAME(spd);
13363         PIPE_CONF_CHECK_INFOFRAME(hdmi);
13364         PIPE_CONF_CHECK_INFOFRAME(drm);
13365
13366         PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
13367         PIPE_CONF_CHECK_I(master_transcoder);
13368
13369 #undef PIPE_CONF_CHECK_X
13370 #undef PIPE_CONF_CHECK_I
13371 #undef PIPE_CONF_CHECK_BOOL
13372 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13373 #undef PIPE_CONF_CHECK_P
13374 #undef PIPE_CONF_CHECK_FLAGS
13375 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13376 #undef PIPE_CONF_CHECK_COLOR_LUT
13377 #undef PIPE_CONF_QUIRK
13378
13379         return ret;
13380 }
13381
13382 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13383                                            const struct intel_crtc_state *pipe_config)
13384 {
13385         if (pipe_config->has_pch_encoder) {
13386                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13387                                                             &pipe_config->fdi_m_n);
13388                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13389
13390                 /*
13391                  * FDI already provided one idea for the dotclock.
13392                  * Yell if the encoder disagrees.
13393                  */
13394                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13395                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13396                      fdi_dotclock, dotclock);
13397         }
13398 }
13399
13400 static void verify_wm_state(struct intel_crtc *crtc,
13401                             struct intel_crtc_state *new_crtc_state)
13402 {
13403         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13404         struct skl_hw_state {
13405                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13406                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13407                 struct skl_ddb_allocation ddb;
13408                 struct skl_pipe_wm wm;
13409         } *hw;
13410         struct skl_ddb_allocation *sw_ddb;
13411         struct skl_pipe_wm *sw_wm;
13412         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13413         const enum pipe pipe = crtc->pipe;
13414         int plane, level, max_level = ilk_wm_max_level(dev_priv);
13415
13416         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13417                 return;
13418
13419         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13420         if (!hw)
13421                 return;
13422
13423         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13424         sw_wm = &new_crtc_state->wm.skl.optimal;
13425
13426         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13427
13428         skl_ddb_get_hw_state(dev_priv, &hw->ddb);
13429         sw_ddb = &dev_priv->wm.skl_hw.ddb;
13430
13431         if (INTEL_GEN(dev_priv) >= 11 &&
13432             hw->ddb.enabled_slices != sw_ddb->enabled_slices)
13433                 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
13434                           sw_ddb->enabled_slices,
13435                           hw->ddb.enabled_slices);
13436
13437         /* planes */
13438         for_each_universal_plane(dev_priv, pipe, plane) {
13439                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13440
13441                 hw_plane_wm = &hw->wm.planes[plane];
13442                 sw_plane_wm = &sw_wm->planes[plane];
13443
13444                 /* Watermarks */
13445                 for (level = 0; level <= max_level; level++) {
13446                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13447                                                 &sw_plane_wm->wm[level]))
13448                                 continue;
13449
13450                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13451                                   pipe_name(pipe), plane + 1, level,
13452                                   sw_plane_wm->wm[level].plane_en,
13453                                   sw_plane_wm->wm[level].plane_res_b,
13454                                   sw_plane_wm->wm[level].plane_res_l,
13455                                   hw_plane_wm->wm[level].plane_en,
13456                                   hw_plane_wm->wm[level].plane_res_b,
13457                                   hw_plane_wm->wm[level].plane_res_l);
13458                 }
13459
13460                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13461                                          &sw_plane_wm->trans_wm)) {
13462                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13463                                   pipe_name(pipe), plane + 1,
13464                                   sw_plane_wm->trans_wm.plane_en,
13465                                   sw_plane_wm->trans_wm.plane_res_b,
13466                                   sw_plane_wm->trans_wm.plane_res_l,
13467                                   hw_plane_wm->trans_wm.plane_en,
13468                                   hw_plane_wm->trans_wm.plane_res_b,
13469                                   hw_plane_wm->trans_wm.plane_res_l);
13470                 }
13471
13472                 /* DDB */
13473                 hw_ddb_entry = &hw->ddb_y[plane];
13474                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13475
13476                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13477                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13478                                   pipe_name(pipe), plane + 1,
13479                                   sw_ddb_entry->start, sw_ddb_entry->end,
13480                                   hw_ddb_entry->start, hw_ddb_entry->end);
13481                 }
13482         }
13483
13484         /*
13485          * cursor
13486          * If the cursor plane isn't active, we may not have updated it's ddb
13487          * allocation. In that case since the ddb allocation will be updated
13488          * once the plane becomes visible, we can skip this check
13489          */
13490         if (1) {
13491                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13492
13493                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13494                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13495
13496                 /* Watermarks */
13497                 for (level = 0; level <= max_level; level++) {
13498                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13499                                                 &sw_plane_wm->wm[level]))
13500                                 continue;
13501
13502                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13503                                   pipe_name(pipe), level,
13504                                   sw_plane_wm->wm[level].plane_en,
13505                                   sw_plane_wm->wm[level].plane_res_b,
13506                                   sw_plane_wm->wm[level].plane_res_l,
13507                                   hw_plane_wm->wm[level].plane_en,
13508                                   hw_plane_wm->wm[level].plane_res_b,
13509                                   hw_plane_wm->wm[level].plane_res_l);
13510                 }
13511
13512                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13513                                          &sw_plane_wm->trans_wm)) {
13514                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13515                                   pipe_name(pipe),
13516                                   sw_plane_wm->trans_wm.plane_en,
13517                                   sw_plane_wm->trans_wm.plane_res_b,
13518                                   sw_plane_wm->trans_wm.plane_res_l,
13519                                   hw_plane_wm->trans_wm.plane_en,
13520                                   hw_plane_wm->trans_wm.plane_res_b,
13521                                   hw_plane_wm->trans_wm.plane_res_l);
13522                 }
13523
13524                 /* DDB */
13525                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13526                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13527
13528                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13529                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13530                                   pipe_name(pipe),
13531                                   sw_ddb_entry->start, sw_ddb_entry->end,
13532                                   hw_ddb_entry->start, hw_ddb_entry->end);
13533                 }
13534         }
13535
13536         kfree(hw);
13537 }
13538
13539 static void
13540 verify_connector_state(struct intel_atomic_state *state,
13541                        struct intel_crtc *crtc)
13542 {
13543         struct drm_connector *connector;
13544         struct drm_connector_state *new_conn_state;
13545         int i;
13546
13547         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13548                 struct drm_encoder *encoder = connector->encoder;
13549                 struct intel_crtc_state *crtc_state = NULL;
13550
13551                 if (new_conn_state->crtc != &crtc->base)
13552                         continue;
13553
13554                 if (crtc)
13555                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13556
13557                 intel_connector_verify_state(crtc_state, new_conn_state);
13558
13559                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13560                      "connector's atomic encoder doesn't match legacy encoder\n");
13561         }
13562 }
13563
13564 static void
13565 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13566 {
13567         struct intel_encoder *encoder;
13568         struct drm_connector *connector;
13569         struct drm_connector_state *old_conn_state, *new_conn_state;
13570         int i;
13571
13572         for_each_intel_encoder(&dev_priv->drm, encoder) {
13573                 bool enabled = false, found = false;
13574                 enum pipe pipe;
13575
13576                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13577                               encoder->base.base.id,
13578                               encoder->base.name);
13579
13580                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13581                                                    new_conn_state, i) {
13582                         if (old_conn_state->best_encoder == &encoder->base)
13583                                 found = true;
13584
13585                         if (new_conn_state->best_encoder != &encoder->base)
13586                                 continue;
13587                         found = enabled = true;
13588
13589                         I915_STATE_WARN(new_conn_state->crtc !=
13590                                         encoder->base.crtc,
13591                              "connector's crtc doesn't match encoder crtc\n");
13592                 }
13593
13594                 if (!found)
13595                         continue;
13596
13597                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13598                      "encoder's enabled state mismatch "
13599                      "(expected %i, found %i)\n",
13600                      !!encoder->base.crtc, enabled);
13601
13602                 if (!encoder->base.crtc) {
13603                         bool active;
13604
13605                         active = encoder->get_hw_state(encoder, &pipe);
13606                         I915_STATE_WARN(active,
13607                              "encoder detached but still enabled on pipe %c.\n",
13608                              pipe_name(pipe));
13609                 }
13610         }
13611 }
13612
13613 static void
13614 verify_crtc_state(struct intel_crtc *crtc,
13615                   struct intel_crtc_state *old_crtc_state,
13616                   struct intel_crtc_state *new_crtc_state)
13617 {
13618         struct drm_device *dev = crtc->base.dev;
13619         struct drm_i915_private *dev_priv = to_i915(dev);
13620         struct intel_encoder *encoder;
13621         struct intel_crtc_state *pipe_config;
13622         struct drm_atomic_state *state;
13623         bool active;
13624
13625         state = old_crtc_state->uapi.state;
13626         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
13627         intel_crtc_free_hw_state(old_crtc_state);
13628
13629         pipe_config = old_crtc_state;
13630         memset(pipe_config, 0, sizeof(*pipe_config));
13631         pipe_config->uapi.crtc = &crtc->base;
13632         pipe_config->uapi.state = state;
13633
13634         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13635
13636         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13637
13638         /* we keep both pipes enabled on 830 */
13639         if (IS_I830(dev_priv))
13640                 active = new_crtc_state->hw.active;
13641
13642         I915_STATE_WARN(new_crtc_state->hw.active != active,
13643                         "crtc active state doesn't match with hw state "
13644                         "(expected %i, found %i)\n",
13645                         new_crtc_state->hw.active, active);
13646
13647         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
13648                         "transitional active state does not match atomic hw state "
13649                         "(expected %i, found %i)\n",
13650                         new_crtc_state->hw.active, crtc->active);
13651
13652         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13653                 enum pipe pipe;
13654
13655                 active = encoder->get_hw_state(encoder, &pipe);
13656                 I915_STATE_WARN(active != new_crtc_state->hw.active,
13657                                 "[ENCODER:%i] active %i with crtc active %i\n",
13658                                 encoder->base.base.id, active,
13659                                 new_crtc_state->hw.active);
13660
13661                 I915_STATE_WARN(active && crtc->pipe != pipe,
13662                                 "Encoder connected to wrong pipe %c\n",
13663                                 pipe_name(pipe));
13664
13665                 if (active)
13666                         encoder->get_config(encoder, pipe_config);
13667         }
13668
13669         intel_crtc_compute_pixel_rate(pipe_config);
13670
13671         if (!new_crtc_state->hw.active)
13672                 return;
13673
13674         intel_pipe_config_sanity_check(dev_priv, pipe_config);
13675
13676         if (!intel_pipe_config_compare(new_crtc_state,
13677                                        pipe_config, false)) {
13678                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13679                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13680                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13681         }
13682 }
13683
13684 static void
13685 intel_verify_planes(struct intel_atomic_state *state)
13686 {
13687         struct intel_plane *plane;
13688         const struct intel_plane_state *plane_state;
13689         int i;
13690
13691         for_each_new_intel_plane_in_state(state, plane,
13692                                           plane_state, i)
13693                 assert_plane(plane, plane_state->planar_slave ||
13694                              plane_state->uapi.visible);
13695 }
13696
13697 static void
13698 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13699                          struct intel_shared_dpll *pll,
13700                          struct intel_crtc *crtc,
13701                          struct intel_crtc_state *new_crtc_state)
13702 {
13703         struct intel_dpll_hw_state dpll_hw_state;
13704         unsigned int crtc_mask;
13705         bool active;
13706
13707         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13708
13709         DRM_DEBUG_KMS("%s\n", pll->info->name);
13710
13711         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13712
13713         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13714                 I915_STATE_WARN(!pll->on && pll->active_mask,
13715                      "pll in active use but not on in sw tracking\n");
13716                 I915_STATE_WARN(pll->on && !pll->active_mask,
13717                      "pll is on but not used by any active crtc\n");
13718                 I915_STATE_WARN(pll->on != active,
13719                      "pll on state mismatch (expected %i, found %i)\n",
13720                      pll->on, active);
13721         }
13722
13723         if (!crtc) {
13724                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13725                                 "more active pll users than references: %x vs %x\n",
13726                                 pll->active_mask, pll->state.crtc_mask);
13727
13728                 return;
13729         }
13730
13731         crtc_mask = drm_crtc_mask(&crtc->base);
13732
13733         if (new_crtc_state->hw.active)
13734                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13735                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13736                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13737         else
13738                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13739                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13740                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13741
13742         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13743                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13744                         crtc_mask, pll->state.crtc_mask);
13745
13746         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13747                                           &dpll_hw_state,
13748                                           sizeof(dpll_hw_state)),
13749                         "pll hw state mismatch\n");
13750 }
13751
13752 static void
13753 verify_shared_dpll_state(struct intel_crtc *crtc,
13754                          struct intel_crtc_state *old_crtc_state,
13755                          struct intel_crtc_state *new_crtc_state)
13756 {
13757         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13758
13759         if (new_crtc_state->shared_dpll)
13760                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13761
13762         if (old_crtc_state->shared_dpll &&
13763             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13764                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13765                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13766
13767                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13768                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13769                                 pipe_name(drm_crtc_index(&crtc->base)));
13770                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13771                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13772                                 pipe_name(drm_crtc_index(&crtc->base)));
13773         }
13774 }
13775
13776 static void
13777 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13778                           struct intel_atomic_state *state,
13779                           struct intel_crtc_state *old_crtc_state,
13780                           struct intel_crtc_state *new_crtc_state)
13781 {
13782         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13783                 return;
13784
13785         verify_wm_state(crtc, new_crtc_state);
13786         verify_connector_state(state, crtc);
13787         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13788         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13789 }
13790
13791 static void
13792 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13793 {
13794         int i;
13795
13796         for (i = 0; i < dev_priv->num_shared_dpll; i++)
13797                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13798 }
13799
13800 static void
13801 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13802                               struct intel_atomic_state *state)
13803 {
13804         verify_encoder_state(dev_priv, state);
13805         verify_connector_state(state, NULL);
13806         verify_disabled_dpll_state(dev_priv);
13807 }
13808
13809 static void
13810 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13811 {
13812         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13813         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13814         const struct drm_display_mode *adjusted_mode =
13815                 &crtc_state->hw.adjusted_mode;
13816
13817         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13818
13819         /*
13820          * The scanline counter increments at the leading edge of hsync.
13821          *
13822          * On most platforms it starts counting from vtotal-1 on the
13823          * first active line. That means the scanline counter value is
13824          * always one less than what we would expect. Ie. just after
13825          * start of vblank, which also occurs at start of hsync (on the
13826          * last active line), the scanline counter will read vblank_start-1.
13827          *
13828          * On gen2 the scanline counter starts counting from 1 instead
13829          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13830          * to keep the value positive), instead of adding one.
13831          *
13832          * On HSW+ the behaviour of the scanline counter depends on the output
13833          * type. For DP ports it behaves like most other platforms, but on HDMI
13834          * there's an extra 1 line difference. So we need to add two instead of
13835          * one to the value.
13836          *
13837          * On VLV/CHV DSI the scanline counter would appear to increment
13838          * approx. 1/3 of a scanline before start of vblank. Unfortunately
13839          * that means we can't tell whether we're in vblank or not while
13840          * we're on that particular line. We must still set scanline_offset
13841          * to 1 so that the vblank timestamps come out correct when we query
13842          * the scanline counter from within the vblank interrupt handler.
13843          * However if queried just before the start of vblank we'll get an
13844          * answer that's slightly in the future.
13845          */
13846         if (IS_GEN(dev_priv, 2)) {
13847                 int vtotal;
13848
13849                 vtotal = adjusted_mode->crtc_vtotal;
13850                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13851                         vtotal /= 2;
13852
13853                 crtc->scanline_offset = vtotal - 1;
13854         } else if (HAS_DDI(dev_priv) &&
13855                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13856                 crtc->scanline_offset = 2;
13857         } else {
13858                 crtc->scanline_offset = 1;
13859         }
13860 }
13861
13862 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13863 {
13864         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13865         struct intel_crtc_state *new_crtc_state;
13866         struct intel_crtc *crtc;
13867         int i;
13868
13869         if (!dev_priv->display.crtc_compute_clock)
13870                 return;
13871
13872         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13873                 if (!needs_modeset(new_crtc_state))
13874                         continue;
13875
13876                 intel_release_shared_dplls(state, crtc);
13877         }
13878 }
13879
13880 /*
13881  * This implements the workaround described in the "notes" section of the mode
13882  * set sequence documentation. When going from no pipes or single pipe to
13883  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13884  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13885  */
13886 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13887 {
13888         struct intel_crtc_state *crtc_state;
13889         struct intel_crtc *crtc;
13890         struct intel_crtc_state *first_crtc_state = NULL;
13891         struct intel_crtc_state *other_crtc_state = NULL;
13892         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13893         int i;
13894
13895         /* look at all crtc's that are going to be enabled in during modeset */
13896         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13897                 if (!crtc_state->hw.active ||
13898                     !needs_modeset(crtc_state))
13899                         continue;
13900
13901                 if (first_crtc_state) {
13902                         other_crtc_state = crtc_state;
13903                         break;
13904                 } else {
13905                         first_crtc_state = crtc_state;
13906                         first_pipe = crtc->pipe;
13907                 }
13908         }
13909
13910         /* No workaround needed? */
13911         if (!first_crtc_state)
13912                 return 0;
13913
13914         /* w/a possibly needed, check how many crtc's are already enabled. */
13915         for_each_intel_crtc(state->base.dev, crtc) {
13916                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13917                 if (IS_ERR(crtc_state))
13918                         return PTR_ERR(crtc_state);
13919
13920                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13921
13922                 if (!crtc_state->hw.active ||
13923                     needs_modeset(crtc_state))
13924                         continue;
13925
13926                 /* 2 or more enabled crtcs means no need for w/a */
13927                 if (enabled_pipe != INVALID_PIPE)
13928                         return 0;
13929
13930                 enabled_pipe = crtc->pipe;
13931         }
13932
13933         if (enabled_pipe != INVALID_PIPE)
13934                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13935         else if (other_crtc_state)
13936                 other_crtc_state->hsw_workaround_pipe = first_pipe;
13937
13938         return 0;
13939 }
13940
13941 static int intel_modeset_checks(struct intel_atomic_state *state)
13942 {
13943         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13944         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13945         struct intel_crtc *crtc;
13946         int ret, i;
13947
13948         /* keep the current setting */
13949         if (!state->cdclk.force_min_cdclk_changed)
13950                 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13951
13952         state->modeset = true;
13953         state->active_pipes = dev_priv->active_pipes;
13954         state->cdclk.logical = dev_priv->cdclk.logical;
13955         state->cdclk.actual = dev_priv->cdclk.actual;
13956
13957         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13958                                             new_crtc_state, i) {
13959                 if (new_crtc_state->hw.active)
13960                         state->active_pipes |= BIT(crtc->pipe);
13961                 else
13962                         state->active_pipes &= ~BIT(crtc->pipe);
13963
13964                 if (old_crtc_state->hw.active != new_crtc_state->hw.active)
13965                         state->active_pipe_changes |= BIT(crtc->pipe);
13966         }
13967
13968         if (state->active_pipe_changes) {
13969                 ret = intel_atomic_lock_global_state(state);
13970                 if (ret)
13971                         return ret;
13972         }
13973
13974         ret = intel_modeset_calc_cdclk(state);
13975         if (ret)
13976                 return ret;
13977
13978         intel_modeset_clear_plls(state);
13979
13980         if (IS_HASWELL(dev_priv))
13981                 return haswell_mode_set_planes_workaround(state);
13982
13983         return 0;
13984 }
13985
13986 /*
13987  * Handle calculation of various watermark data at the end of the atomic check
13988  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13989  * handlers to ensure that all derived state has been updated.
13990  */
13991 static int calc_watermark_data(struct intel_atomic_state *state)
13992 {
13993         struct drm_device *dev = state->base.dev;
13994         struct drm_i915_private *dev_priv = to_i915(dev);
13995
13996         /* Is there platform-specific watermark information to calculate? */
13997         if (dev_priv->display.compute_global_watermarks)
13998                 return dev_priv->display.compute_global_watermarks(state);
13999
14000         return 0;
14001 }
14002
14003 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14004                                      struct intel_crtc_state *new_crtc_state)
14005 {
14006         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14007                 return;
14008
14009         new_crtc_state->uapi.mode_changed = false;
14010         new_crtc_state->update_pipe = true;
14011
14012         /*
14013          * If we're not doing the full modeset we want to
14014          * keep the current M/N values as they may be
14015          * sufficiently different to the computed values
14016          * to cause problems.
14017          *
14018          * FIXME: should really copy more fuzzy state here
14019          */
14020         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14021         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14022         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14023         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14024 }
14025
14026 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14027                                           struct intel_crtc *crtc,
14028                                           u8 plane_ids_mask)
14029 {
14030         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14031         struct intel_plane *plane;
14032
14033         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14034                 struct intel_plane_state *plane_state;
14035
14036                 if ((plane_ids_mask & BIT(plane->id)) == 0)
14037                         continue;
14038
14039                 plane_state = intel_atomic_get_plane_state(state, plane);
14040                 if (IS_ERR(plane_state))
14041                         return PTR_ERR(plane_state);
14042         }
14043
14044         return 0;
14045 }
14046
14047 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14048 {
14049         /* See {hsw,vlv,ivb}_plane_ratio() */
14050         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14051                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14052                 IS_IVYBRIDGE(dev_priv);
14053 }
14054
14055 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14056                                      bool *need_modeset)
14057 {
14058         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14059         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14060         struct intel_plane_state *plane_state;
14061         struct intel_plane *plane;
14062         struct intel_crtc *crtc;
14063         int i, ret;
14064
14065         ret = icl_add_linked_planes(state);
14066         if (ret)
14067                 return ret;
14068
14069         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14070                 ret = intel_plane_atomic_check(state, plane);
14071                 if (ret) {
14072                         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
14073                                          plane->base.base.id, plane->base.name);
14074                         return ret;
14075                 }
14076         }
14077
14078         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14079                                             new_crtc_state, i) {
14080                 u8 old_active_planes, new_active_planes;
14081
14082                 ret = icl_check_nv12_planes(new_crtc_state);
14083                 if (ret)
14084                         return ret;
14085
14086                 /*
14087                  * On some platforms the number of active planes affects
14088                  * the planes' minimum cdclk calculation. Add such planes
14089                  * to the state before we compute the minimum cdclk.
14090                  */
14091                 if (!active_planes_affects_min_cdclk(dev_priv))
14092                         continue;
14093
14094                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14095                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14096
14097                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14098                         continue;
14099
14100                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14101                 if (ret)
14102                         return ret;
14103         }
14104
14105         /*
14106          * active_planes bitmask has been updated, and potentially
14107          * affected planes are part of the state. We can now
14108          * compute the minimum cdclk for each plane.
14109          */
14110         for_each_new_intel_plane_in_state(state, plane, plane_state, i)
14111                 *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
14112
14113         return 0;
14114 }
14115
14116 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14117 {
14118         struct intel_crtc_state *crtc_state;
14119         struct intel_crtc *crtc;
14120         int i;
14121
14122         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14123                 int ret = intel_crtc_atomic_check(state, crtc);
14124                 if (ret) {
14125                         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
14126                                          crtc->base.base.id, crtc->base.name);
14127                         return ret;
14128                 }
14129         }
14130
14131         return 0;
14132 }
14133
14134 /**
14135  * intel_atomic_check - validate state object
14136  * @dev: drm device
14137  * @_state: state to validate
14138  */
14139 static int intel_atomic_check(struct drm_device *dev,
14140                               struct drm_atomic_state *_state)
14141 {
14142         struct drm_i915_private *dev_priv = to_i915(dev);
14143         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14144         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14145         struct intel_crtc *crtc;
14146         int ret, i;
14147         bool any_ms = false;
14148
14149         /* Catch I915_MODE_FLAG_INHERITED */
14150         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14151                                             new_crtc_state, i) {
14152                 if (new_crtc_state->hw.mode.private_flags !=
14153                     old_crtc_state->hw.mode.private_flags)
14154                         new_crtc_state->uapi.mode_changed = true;
14155         }
14156
14157         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14158         if (ret)
14159                 goto fail;
14160
14161         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14162                                             new_crtc_state, i) {
14163                 if (!needs_modeset(new_crtc_state)) {
14164                         /* Light copy */
14165                         intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14166
14167                         continue;
14168                 }
14169
14170                 if (!new_crtc_state->uapi.enable) {
14171                         intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
14172
14173                         any_ms = true;
14174                         continue;
14175                 }
14176
14177                 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14178                 if (ret)
14179                         goto fail;
14180
14181                 ret = intel_modeset_pipe_config(new_crtc_state);
14182                 if (ret)
14183                         goto fail;
14184
14185                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14186
14187                 if (needs_modeset(new_crtc_state))
14188                         any_ms = true;
14189         }
14190
14191         if (any_ms && !check_digital_port_conflicts(state)) {
14192                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
14193                 ret = EINVAL;
14194                 goto fail;
14195         }
14196
14197         ret = drm_dp_mst_atomic_check(&state->base);
14198         if (ret)
14199                 goto fail;
14200
14201         any_ms |= state->cdclk.force_min_cdclk_changed;
14202
14203         ret = intel_atomic_check_planes(state, &any_ms);
14204         if (ret)
14205                 goto fail;
14206
14207         if (any_ms) {
14208                 ret = intel_modeset_checks(state);
14209                 if (ret)
14210                         goto fail;
14211         } else {
14212                 state->cdclk.logical = dev_priv->cdclk.logical;
14213         }
14214
14215         ret = intel_atomic_check_crtcs(state);
14216         if (ret)
14217                 goto fail;
14218
14219         intel_fbc_choose_crtc(dev_priv, state);
14220         ret = calc_watermark_data(state);
14221         if (ret)
14222                 goto fail;
14223
14224         ret = intel_bw_atomic_check(state);
14225         if (ret)
14226                 goto fail;
14227
14228         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14229                                             new_crtc_state, i) {
14230                 if (!needs_modeset(new_crtc_state) &&
14231                     !new_crtc_state->update_pipe)
14232                         continue;
14233
14234                 intel_dump_pipe_config(new_crtc_state, state,
14235                                        needs_modeset(new_crtc_state) ?
14236                                        "[modeset]" : "[fastset]");
14237         }
14238
14239         return 0;
14240
14241  fail:
14242         if (ret == -EDEADLK)
14243                 return ret;
14244
14245         /*
14246          * FIXME would probably be nice to know which crtc specifically
14247          * caused the failure, in cases where we can pinpoint it.
14248          */
14249         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14250                                             new_crtc_state, i)
14251                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14252
14253         return ret;
14254 }
14255
14256 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14257 {
14258         return drm_atomic_helper_prepare_planes(state->base.dev,
14259                                                 &state->base);
14260 }
14261
14262 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14263 {
14264         struct drm_device *dev = crtc->base.dev;
14265         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14266
14267         if (!vblank->max_vblank_count)
14268                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14269
14270         return crtc->base.funcs->get_vblank_counter(&crtc->base);
14271 }
14272
14273 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14274                                   struct intel_crtc_state *crtc_state)
14275 {
14276         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14277
14278         if (!IS_GEN(dev_priv, 2))
14279                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14280
14281         if (crtc_state->has_pch_encoder) {
14282                 enum pipe pch_transcoder =
14283                         intel_crtc_pch_transcoder(crtc);
14284
14285                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14286         }
14287 }
14288
14289 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14290                                const struct intel_crtc_state *new_crtc_state)
14291 {
14292         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14293         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14294
14295         /*
14296          * Update pipe size and adjust fitter if needed: the reason for this is
14297          * that in compute_mode_changes we check the native mode (not the pfit
14298          * mode) to see if we can flip rather than do a full mode set. In the
14299          * fastboot case, we'll flip, but if we don't update the pipesrc and
14300          * pfit state, we'll end up with a big fb scanned out into the wrong
14301          * sized surface.
14302          */
14303         intel_set_pipe_src_size(new_crtc_state);
14304
14305         /* on skylake this is done by detaching scalers */
14306         if (INTEL_GEN(dev_priv) >= 9) {
14307                 skl_detach_scalers(new_crtc_state);
14308
14309                 if (new_crtc_state->pch_pfit.enabled)
14310                         skylake_pfit_enable(new_crtc_state);
14311         } else if (HAS_PCH_SPLIT(dev_priv)) {
14312                 if (new_crtc_state->pch_pfit.enabled)
14313                         ironlake_pfit_enable(new_crtc_state);
14314                 else if (old_crtc_state->pch_pfit.enabled)
14315                         ironlake_pfit_disable(old_crtc_state);
14316         }
14317
14318         if (INTEL_GEN(dev_priv) >= 11)
14319                 icl_set_pipe_chicken(crtc);
14320 }
14321
14322 static void commit_pipe_config(struct intel_atomic_state *state,
14323                                struct intel_crtc_state *old_crtc_state,
14324                                struct intel_crtc_state *new_crtc_state)
14325 {
14326         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14327         bool modeset = needs_modeset(new_crtc_state);
14328
14329         /*
14330          * During modesets pipe configuration was programmed as the
14331          * CRTC was enabled.
14332          */
14333         if (!modeset) {
14334                 if (new_crtc_state->uapi.color_mgmt_changed ||
14335                     new_crtc_state->update_pipe)
14336                         intel_color_commit(new_crtc_state);
14337
14338                 if (INTEL_GEN(dev_priv) >= 9)
14339                         skl_detach_scalers(new_crtc_state);
14340
14341                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14342                         bdw_set_pipemisc(new_crtc_state);
14343
14344                 if (new_crtc_state->update_pipe)
14345                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
14346         }
14347
14348         if (dev_priv->display.atomic_update_watermarks)
14349                 dev_priv->display.atomic_update_watermarks(state,
14350                                                            new_crtc_state);
14351 }
14352
14353 static void intel_update_crtc(struct intel_crtc *crtc,
14354                               struct intel_atomic_state *state,
14355                               struct intel_crtc_state *old_crtc_state,
14356                               struct intel_crtc_state *new_crtc_state)
14357 {
14358         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14359         bool modeset = needs_modeset(new_crtc_state);
14360         struct intel_plane_state *new_plane_state =
14361                 intel_atomic_get_new_plane_state(state,
14362                                                  to_intel_plane(crtc->base.primary));
14363
14364         if (modeset) {
14365                 intel_crtc_update_active_timings(new_crtc_state);
14366
14367                 dev_priv->display.crtc_enable(new_crtc_state, state);
14368
14369                 /* vblanks work again, re-enable pipe CRC. */
14370                 intel_crtc_enable_pipe_crc(crtc);
14371         } else {
14372                 if (new_crtc_state->preload_luts &&
14373                     (new_crtc_state->uapi.color_mgmt_changed ||
14374                      new_crtc_state->update_pipe))
14375                         intel_color_load_luts(new_crtc_state);
14376
14377                 intel_pre_plane_update(old_crtc_state, new_crtc_state);
14378
14379                 if (new_crtc_state->update_pipe)
14380                         intel_encoders_update_pipe(state, crtc);
14381         }
14382
14383         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14384                 intel_fbc_disable(crtc);
14385         else if (new_plane_state)
14386                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14387
14388         /* Perform vblank evasion around commit operation */
14389         intel_pipe_update_start(new_crtc_state);
14390
14391         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14392
14393         if (INTEL_GEN(dev_priv) >= 9)
14394                 skl_update_planes_on_crtc(state, crtc);
14395         else
14396                 i9xx_update_planes_on_crtc(state, crtc);
14397
14398         intel_pipe_update_end(new_crtc_state);
14399
14400         /*
14401          * We usually enable FIFO underrun interrupts as part of the
14402          * CRTC enable sequence during modesets.  But when we inherit a
14403          * valid pipe configuration from the BIOS we need to take care
14404          * of enabling them on the CRTC's first fastset.
14405          */
14406         if (new_crtc_state->update_pipe && !modeset &&
14407             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14408                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14409 }
14410
14411 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
14412 {
14413         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
14414         enum transcoder slave_transcoder;
14415
14416         WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
14417
14418         slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
14419         return intel_get_crtc_for_pipe(dev_priv,
14420                                        (enum pipe)slave_transcoder);
14421 }
14422
14423 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14424                                           struct intel_crtc_state *old_crtc_state,
14425                                           struct intel_crtc_state *new_crtc_state,
14426                                           struct intel_crtc *crtc)
14427 {
14428         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14429
14430         intel_crtc_disable_planes(state, crtc);
14431
14432         /*
14433          * We need to disable pipe CRC before disabling the pipe,
14434          * or we race against vblank off.
14435          */
14436         intel_crtc_disable_pipe_crc(crtc);
14437
14438         dev_priv->display.crtc_disable(old_crtc_state, state);
14439         crtc->active = false;
14440         intel_fbc_disable(crtc);
14441         intel_disable_shared_dpll(old_crtc_state);
14442
14443         /*
14444          * Underruns don't always raise interrupts,
14445          * so check manually.
14446          */
14447         intel_check_cpu_fifo_underruns(dev_priv);
14448         intel_check_pch_fifo_underruns(dev_priv);
14449
14450         /* FIXME unify this for all platforms */
14451         if (!new_crtc_state->hw.active &&
14452             !HAS_GMCH(dev_priv) &&
14453             dev_priv->display.initial_watermarks)
14454                 dev_priv->display.initial_watermarks(state,
14455                                                      new_crtc_state);
14456 }
14457
14458 static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
14459                                                    struct intel_crtc *crtc,
14460                                                    struct intel_crtc_state *old_crtc_state,
14461                                                    struct intel_crtc_state *new_crtc_state)
14462 {
14463         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14464         struct intel_crtc_state *new_slave_crtc_state =
14465                 intel_atomic_get_new_crtc_state(state, slave_crtc);
14466         struct intel_crtc_state *old_slave_crtc_state =
14467                 intel_atomic_get_old_crtc_state(state, slave_crtc);
14468
14469         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14470                 !old_slave_crtc_state);
14471
14472         /* Disable Slave first */
14473         intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
14474         if (old_slave_crtc_state->hw.active)
14475                 intel_old_crtc_state_disables(state,
14476                                               old_slave_crtc_state,
14477                                               new_slave_crtc_state,
14478                                               slave_crtc);
14479
14480         /* Disable Master */
14481         intel_pre_plane_update(old_crtc_state, new_crtc_state);
14482         if (old_crtc_state->hw.active)
14483                 intel_old_crtc_state_disables(state,
14484                                               old_crtc_state,
14485                                               new_crtc_state,
14486                                               crtc);
14487 }
14488
14489 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14490 {
14491         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14492         struct intel_crtc *crtc;
14493         int i;
14494
14495         /*
14496          * Disable CRTC/pipes in reverse order because some features(MST in
14497          * TGL+) requires master and slave relationship between pipes, so it
14498          * should always pick the lowest pipe as master as it will be enabled
14499          * first and disable in the reverse order so the master will be the
14500          * last one to be disabled.
14501          */
14502         for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
14503                                                     new_crtc_state, i) {
14504                 if (!needs_modeset(new_crtc_state))
14505                         continue;
14506
14507                 /* In case of Transcoder port Sync master slave CRTCs can be
14508                  * assigned in any order and we need to make sure that
14509                  * slave CRTCs are disabled first and then master CRTC since
14510                  * Slave vblanks are masked till Master Vblanks.
14511                  */
14512                 if (is_trans_port_sync_mode(new_crtc_state)) {
14513                         if (is_trans_port_sync_master(new_crtc_state))
14514                                 intel_trans_port_sync_modeset_disables(state,
14515                                                                        crtc,
14516                                                                        old_crtc_state,
14517                                                                        new_crtc_state);
14518                         else
14519                                 continue;
14520                 } else {
14521                         intel_pre_plane_update(old_crtc_state, new_crtc_state);
14522
14523                         if (old_crtc_state->hw.active)
14524                                 intel_old_crtc_state_disables(state,
14525                                                               old_crtc_state,
14526                                                               new_crtc_state,
14527                                                               crtc);
14528                 }
14529         }
14530 }
14531
14532 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
14533 {
14534         struct intel_crtc *crtc;
14535         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14536         int i;
14537
14538         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14539                 if (!new_crtc_state->hw.active)
14540                         continue;
14541
14542                 intel_update_crtc(crtc, state, old_crtc_state,
14543                                   new_crtc_state);
14544         }
14545 }
14546
14547 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
14548                                               struct intel_atomic_state *state,
14549                                               struct intel_crtc_state *new_crtc_state)
14550 {
14551         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14552
14553         intel_crtc_update_active_timings(new_crtc_state);
14554         dev_priv->display.crtc_enable(new_crtc_state, state);
14555         intel_crtc_enable_pipe_crc(crtc);
14556 }
14557
14558 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
14559                                        struct intel_atomic_state *state)
14560 {
14561         struct drm_connector *uninitialized_var(conn);
14562         struct drm_connector_state *conn_state;
14563         struct intel_dp *intel_dp;
14564         int i;
14565
14566         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
14567                 if (conn_state->crtc == &crtc->base)
14568                         break;
14569         }
14570         intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
14571         intel_dp_stop_link_train(intel_dp);
14572 }
14573
14574 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
14575                                            struct intel_atomic_state *state)
14576 {
14577         struct intel_crtc_state *new_crtc_state =
14578                 intel_atomic_get_new_crtc_state(state, crtc);
14579         struct intel_crtc_state *old_crtc_state =
14580                 intel_atomic_get_old_crtc_state(state, crtc);
14581         struct intel_plane_state *new_plane_state =
14582                 intel_atomic_get_new_plane_state(state,
14583                                                  to_intel_plane(crtc->base.primary));
14584         bool modeset = needs_modeset(new_crtc_state);
14585
14586         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14587                 intel_fbc_disable(crtc);
14588         else if (new_plane_state)
14589                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14590
14591         /* Perform vblank evasion around commit operation */
14592         intel_pipe_update_start(new_crtc_state);
14593         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14594         skl_update_planes_on_crtc(state, crtc);
14595         intel_pipe_update_end(new_crtc_state);
14596
14597         /*
14598          * We usually enable FIFO underrun interrupts as part of the
14599          * CRTC enable sequence during modesets.  But when we inherit a
14600          * valid pipe configuration from the BIOS we need to take care
14601          * of enabling them on the CRTC's first fastset.
14602          */
14603         if (new_crtc_state->update_pipe && !modeset &&
14604             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14605                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14606 }
14607
14608 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
14609                                                struct intel_atomic_state *state,
14610                                                struct intel_crtc_state *old_crtc_state,
14611                                                struct intel_crtc_state *new_crtc_state)
14612 {
14613         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14614         struct intel_crtc_state *new_slave_crtc_state =
14615                 intel_atomic_get_new_crtc_state(state, slave_crtc);
14616         struct intel_crtc_state *old_slave_crtc_state =
14617                 intel_atomic_get_old_crtc_state(state, slave_crtc);
14618
14619         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14620                 !old_slave_crtc_state);
14621
14622         DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
14623                       crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
14624                       slave_crtc->base.name);
14625
14626         /* Enable seq for slave with with DP_TP_CTL left Idle until the
14627          * master is ready
14628          */
14629         intel_crtc_enable_trans_port_sync(slave_crtc,
14630                                           state,
14631                                           new_slave_crtc_state);
14632
14633         /* Enable seq for master with with DP_TP_CTL left Idle */
14634         intel_crtc_enable_trans_port_sync(crtc,
14635                                           state,
14636                                           new_crtc_state);
14637
14638         /* Set Slave's DP_TP_CTL to Normal */
14639         intel_set_dp_tp_ctl_normal(slave_crtc,
14640                                    state);
14641
14642         /* Set Master's DP_TP_CTL To Normal */
14643         usleep_range(200, 400);
14644         intel_set_dp_tp_ctl_normal(crtc,
14645                                    state);
14646
14647         /* Now do the post crtc enable for all master and slaves */
14648         intel_post_crtc_enable_updates(slave_crtc,
14649                                        state);
14650         intel_post_crtc_enable_updates(crtc,
14651                                        state);
14652 }
14653
14654 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
14655 {
14656         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14657         struct intel_crtc *crtc;
14658         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14659         unsigned int updated = 0;
14660         bool progress;
14661         int i;
14662         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
14663         u8 required_slices = state->wm_results.ddb.enabled_slices;
14664         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
14665
14666         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
14667                 /* ignore allocations for crtc's that have been turned off. */
14668                 if (new_crtc_state->hw.active)
14669                         entries[i] = old_crtc_state->wm.skl.ddb;
14670
14671         /* If 2nd DBuf slice required, enable it here */
14672         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
14673                 icl_dbuf_slices_update(dev_priv, required_slices);
14674
14675         /*
14676          * Whenever the number of active pipes changes, we need to make sure we
14677          * update the pipes in the right order so that their ddb allocations
14678          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
14679          * cause pipe underruns and other bad stuff.
14680          */
14681         do {
14682                 progress = false;
14683
14684                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14685                         enum pipe pipe = crtc->pipe;
14686                         bool vbl_wait = false;
14687                         bool modeset = needs_modeset(new_crtc_state);
14688
14689                         if (updated & BIT(crtc->pipe) || !new_crtc_state->hw.active)
14690                                 continue;
14691
14692                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
14693                                                         entries,
14694                                                         INTEL_NUM_PIPES(dev_priv), i))
14695                                 continue;
14696
14697                         updated |= BIT(pipe);
14698                         entries[i] = new_crtc_state->wm.skl.ddb;
14699
14700                         /*
14701                          * If this is an already active pipe, it's DDB changed,
14702                          * and this isn't the last pipe that needs updating
14703                          * then we need to wait for a vblank to pass for the
14704                          * new ddb allocation to take effect.
14705                          */
14706                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
14707                                                  &old_crtc_state->wm.skl.ddb) &&
14708                             !modeset &&
14709                             state->wm_results.dirty_pipes != updated)
14710                                 vbl_wait = true;
14711
14712                         if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
14713                                 if (is_trans_port_sync_master(new_crtc_state))
14714                                         intel_update_trans_port_sync_crtcs(crtc,
14715                                                                            state,
14716                                                                            old_crtc_state,
14717                                                                            new_crtc_state);
14718                                 else
14719                                         continue;
14720                         } else {
14721                                 intel_update_crtc(crtc, state, old_crtc_state,
14722                                                   new_crtc_state);
14723                         }
14724
14725                         if (vbl_wait)
14726                                 intel_wait_for_vblank(dev_priv, pipe);
14727
14728                         progress = true;
14729                 }
14730         } while (progress);
14731
14732         /* If 2nd DBuf slice is no more required disable it */
14733         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
14734                 icl_dbuf_slices_update(dev_priv, required_slices);
14735 }
14736
14737 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14738 {
14739         struct intel_atomic_state *state, *next;
14740         struct llist_node *freed;
14741
14742         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14743         llist_for_each_entry_safe(state, next, freed, freed)
14744                 drm_atomic_state_put(&state->base);
14745 }
14746
14747 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14748 {
14749         struct drm_i915_private *dev_priv =
14750                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14751
14752         intel_atomic_helper_free_state(dev_priv);
14753 }
14754
14755 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
14756 {
14757         struct wait_queue_entry wait_fence, wait_reset;
14758         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
14759
14760         init_wait_entry(&wait_fence, 0);
14761         init_wait_entry(&wait_reset, 0);
14762         for (;;) {
14763                 prepare_to_wait(&intel_state->commit_ready.wait,
14764                                 &wait_fence, TASK_UNINTERRUPTIBLE);
14765                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14766                                               I915_RESET_MODESET),
14767                                 &wait_reset, TASK_UNINTERRUPTIBLE);
14768
14769
14770                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
14771                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
14772                         break;
14773
14774                 schedule();
14775         }
14776         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
14777         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14778                                   I915_RESET_MODESET),
14779                     &wait_reset);
14780 }
14781
14782 static void intel_atomic_cleanup_work(struct work_struct *work)
14783 {
14784         struct drm_atomic_state *state =
14785                 container_of(work, struct drm_atomic_state, commit_work);
14786         struct drm_i915_private *i915 = to_i915(state->dev);
14787
14788         drm_atomic_helper_cleanup_planes(&i915->drm, state);
14789         drm_atomic_helper_commit_cleanup_done(state);
14790         drm_atomic_state_put(state);
14791
14792         intel_atomic_helper_free_state(i915);
14793 }
14794
14795 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
14796 {
14797         struct drm_device *dev = state->base.dev;
14798         struct drm_i915_private *dev_priv = to_i915(dev);
14799         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14800         struct intel_crtc *crtc;
14801         u64 put_domains[I915_MAX_PIPES] = {};
14802         intel_wakeref_t wakeref = 0;
14803         int i;
14804
14805         intel_atomic_commit_fence_wait(state);
14806
14807         drm_atomic_helper_wait_for_dependencies(&state->base);
14808
14809         if (state->modeset)
14810                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
14811
14812         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14813                                             new_crtc_state, i) {
14814                 if (needs_modeset(new_crtc_state) ||
14815                     new_crtc_state->update_pipe) {
14816
14817                         put_domains[crtc->pipe] =
14818                                 modeset_get_crtc_power_domains(new_crtc_state);
14819                 }
14820         }
14821
14822         intel_commit_modeset_disables(state);
14823
14824         /* FIXME: Eventually get rid of our crtc->config pointer */
14825         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14826                 crtc->config = new_crtc_state;
14827
14828         if (state->modeset) {
14829                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
14830
14831                 intel_set_cdclk_pre_plane_update(dev_priv,
14832                                                  &state->cdclk.actual,
14833                                                  &dev_priv->cdclk.actual,
14834                                                  state->cdclk.pipe);
14835
14836                 /*
14837                  * SKL workaround: bspec recommends we disable the SAGV when we
14838                  * have more then one pipe enabled
14839                  */
14840                 if (!intel_can_enable_sagv(state))
14841                         intel_disable_sagv(dev_priv);
14842
14843                 intel_modeset_verify_disabled(dev_priv, state);
14844         }
14845
14846         /* Complete the events for pipes that have now been disabled */
14847         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14848                 bool modeset = needs_modeset(new_crtc_state);
14849
14850                 /* Complete events for now disable pipes here. */
14851                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
14852                         spin_lock_irq(&dev->event_lock);
14853                         drm_crtc_send_vblank_event(&crtc->base,
14854                                                    new_crtc_state->uapi.event);
14855                         spin_unlock_irq(&dev->event_lock);
14856
14857                         new_crtc_state->uapi.event = NULL;
14858                 }
14859         }
14860
14861         if (state->modeset)
14862                 intel_encoders_update_prepare(state);
14863
14864         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14865         dev_priv->display.commit_modeset_enables(state);
14866
14867         if (state->modeset) {
14868                 intel_encoders_update_complete(state);
14869
14870                 intel_set_cdclk_post_plane_update(dev_priv,
14871                                                   &state->cdclk.actual,
14872                                                   &dev_priv->cdclk.actual,
14873                                                   state->cdclk.pipe);
14874         }
14875
14876         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14877          * already, but still need the state for the delayed optimization. To
14878          * fix this:
14879          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14880          * - schedule that vblank worker _before_ calling hw_done
14881          * - at the start of commit_tail, cancel it _synchrously
14882          * - switch over to the vblank wait helper in the core after that since
14883          *   we don't need out special handling any more.
14884          */
14885         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14886
14887         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14888                 if (new_crtc_state->hw.active &&
14889                     !needs_modeset(new_crtc_state) &&
14890                     !new_crtc_state->preload_luts &&
14891                     (new_crtc_state->uapi.color_mgmt_changed ||
14892                      new_crtc_state->update_pipe))
14893                         intel_color_load_luts(new_crtc_state);
14894         }
14895
14896         /*
14897          * Now that the vblank has passed, we can go ahead and program the
14898          * optimal watermarks on platforms that need two-step watermark
14899          * programming.
14900          *
14901          * TODO: Move this (and other cleanup) to an async worker eventually.
14902          */
14903         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14904                 if (dev_priv->display.optimize_watermarks)
14905                         dev_priv->display.optimize_watermarks(state,
14906                                                               new_crtc_state);
14907         }
14908
14909         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14910                 intel_post_plane_update(old_crtc_state);
14911
14912                 if (put_domains[i])
14913                         modeset_put_power_domains(dev_priv, put_domains[i]);
14914
14915                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14916         }
14917
14918         if (state->modeset)
14919                 intel_verify_planes(state);
14920
14921         if (state->modeset && intel_can_enable_sagv(state))
14922                 intel_enable_sagv(dev_priv);
14923
14924         drm_atomic_helper_commit_hw_done(&state->base);
14925
14926         if (state->modeset) {
14927                 /* As one of the primary mmio accessors, KMS has a high
14928                  * likelihood of triggering bugs in unclaimed access. After we
14929                  * finish modesetting, see if an error has been flagged, and if
14930                  * so enable debugging for the next modeset - and hope we catch
14931                  * the culprit.
14932                  */
14933                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
14934                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
14935         }
14936         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14937
14938         /*
14939          * Defer the cleanup of the old state to a separate worker to not
14940          * impede the current task (userspace for blocking modesets) that
14941          * are executed inline. For out-of-line asynchronous modesets/flips,
14942          * deferring to a new worker seems overkill, but we would place a
14943          * schedule point (cond_resched()) here anyway to keep latencies
14944          * down.
14945          */
14946         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
14947         queue_work(system_highpri_wq, &state->base.commit_work);
14948 }
14949
14950 static void intel_atomic_commit_work(struct work_struct *work)
14951 {
14952         struct intel_atomic_state *state =
14953                 container_of(work, struct intel_atomic_state, base.commit_work);
14954
14955         intel_atomic_commit_tail(state);
14956 }
14957
14958 static int __i915_sw_fence_call
14959 intel_atomic_commit_ready(struct i915_sw_fence *fence,
14960                           enum i915_sw_fence_notify notify)
14961 {
14962         struct intel_atomic_state *state =
14963                 container_of(fence, struct intel_atomic_state, commit_ready);
14964
14965         switch (notify) {
14966         case FENCE_COMPLETE:
14967                 /* we do blocking waits in the worker, nothing to do here */
14968                 break;
14969         case FENCE_FREE:
14970                 {
14971                         struct intel_atomic_helper *helper =
14972                                 &to_i915(state->base.dev)->atomic_helper;
14973
14974                         if (llist_add(&state->freed, &helper->free_list))
14975                                 schedule_work(&helper->free_work);
14976                         break;
14977                 }
14978         }
14979
14980         return NOTIFY_DONE;
14981 }
14982
14983 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
14984 {
14985         struct intel_plane_state *old_plane_state, *new_plane_state;
14986         struct intel_plane *plane;
14987         int i;
14988
14989         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14990                                              new_plane_state, i)
14991                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
14992                                         to_intel_frontbuffer(new_plane_state->hw.fb),
14993                                         plane->frontbuffer_bit);
14994 }
14995
14996 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
14997 {
14998         struct intel_crtc *crtc;
14999
15000         for_each_intel_crtc(&dev_priv->drm, crtc)
15001                 drm_modeset_lock_assert_held(&crtc->base.mutex);
15002 }
15003
15004 static int intel_atomic_commit(struct drm_device *dev,
15005                                struct drm_atomic_state *_state,
15006                                bool nonblock)
15007 {
15008         struct intel_atomic_state *state = to_intel_atomic_state(_state);
15009         struct drm_i915_private *dev_priv = to_i915(dev);
15010         int ret = 0;
15011
15012         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15013
15014         drm_atomic_state_get(&state->base);
15015         i915_sw_fence_init(&state->commit_ready,
15016                            intel_atomic_commit_ready);
15017
15018         /*
15019          * The intel_legacy_cursor_update() fast path takes care
15020          * of avoiding the vblank waits for simple cursor
15021          * movement and flips. For cursor on/off and size changes,
15022          * we want to perform the vblank waits so that watermark
15023          * updates happen during the correct frames. Gen9+ have
15024          * double buffered watermarks and so shouldn't need this.
15025          *
15026          * Unset state->legacy_cursor_update before the call to
15027          * drm_atomic_helper_setup_commit() because otherwise
15028          * drm_atomic_helper_wait_for_flip_done() is a noop and
15029          * we get FIFO underruns because we didn't wait
15030          * for vblank.
15031          *
15032          * FIXME doing watermarks and fb cleanup from a vblank worker
15033          * (assuming we had any) would solve these problems.
15034          */
15035         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15036                 struct intel_crtc_state *new_crtc_state;
15037                 struct intel_crtc *crtc;
15038                 int i;
15039
15040                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15041                         if (new_crtc_state->wm.need_postvbl_update ||
15042                             new_crtc_state->update_wm_post)
15043                                 state->base.legacy_cursor_update = false;
15044         }
15045
15046         ret = intel_atomic_prepare_commit(state);
15047         if (ret) {
15048                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
15049                 i915_sw_fence_commit(&state->commit_ready);
15050                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15051                 return ret;
15052         }
15053
15054         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15055         if (!ret)
15056                 ret = drm_atomic_helper_swap_state(&state->base, true);
15057
15058         if (ret) {
15059                 i915_sw_fence_commit(&state->commit_ready);
15060
15061                 drm_atomic_helper_cleanup_planes(dev, &state->base);
15062                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15063                 return ret;
15064         }
15065         dev_priv->wm.distrust_bios_wm = false;
15066         intel_shared_dpll_swap_state(state);
15067         intel_atomic_track_fbs(state);
15068
15069         if (state->global_state_changed) {
15070                 assert_global_state_locked(dev_priv);
15071
15072                 memcpy(dev_priv->min_cdclk, state->min_cdclk,
15073                        sizeof(state->min_cdclk));
15074                 memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
15075                        sizeof(state->min_voltage_level));
15076                 dev_priv->active_pipes = state->active_pipes;
15077                 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
15078
15079                 intel_cdclk_swap_state(state);
15080         }
15081
15082         drm_atomic_state_get(&state->base);
15083         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15084
15085         i915_sw_fence_commit(&state->commit_ready);
15086         if (nonblock && state->modeset) {
15087                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15088         } else if (nonblock) {
15089                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15090         } else {
15091                 if (state->modeset)
15092                         flush_workqueue(dev_priv->modeset_wq);
15093                 intel_atomic_commit_tail(state);
15094         }
15095
15096         return 0;
15097 }
15098
15099 struct wait_rps_boost {
15100         struct wait_queue_entry wait;
15101
15102         struct drm_crtc *crtc;
15103         struct i915_request *request;
15104 };
15105
15106 static int do_rps_boost(struct wait_queue_entry *_wait,
15107                         unsigned mode, int sync, void *key)
15108 {
15109         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15110         struct i915_request *rq = wait->request;
15111
15112         /*
15113          * If we missed the vblank, but the request is already running it
15114          * is reasonable to assume that it will complete before the next
15115          * vblank without our intervention, so leave RPS alone.
15116          */
15117         if (!i915_request_started(rq))
15118                 intel_rps_boost(rq);
15119         i915_request_put(rq);
15120
15121         drm_crtc_vblank_put(wait->crtc);
15122
15123         list_del(&wait->wait.entry);
15124         kfree(wait);
15125         return 1;
15126 }
15127
15128 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15129                                        struct dma_fence *fence)
15130 {
15131         struct wait_rps_boost *wait;
15132
15133         if (!dma_fence_is_i915(fence))
15134                 return;
15135
15136         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15137                 return;
15138
15139         if (drm_crtc_vblank_get(crtc))
15140                 return;
15141
15142         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15143         if (!wait) {
15144                 drm_crtc_vblank_put(crtc);
15145                 return;
15146         }
15147
15148         wait->request = to_request(dma_fence_get(fence));
15149         wait->crtc = crtc;
15150
15151         wait->wait.func = do_rps_boost;
15152         wait->wait.flags = 0;
15153
15154         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15155 }
15156
15157 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15158 {
15159         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15160         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15161         struct drm_framebuffer *fb = plane_state->hw.fb;
15162         struct i915_vma *vma;
15163
15164         if (plane->id == PLANE_CURSOR &&
15165             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15166                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15167                 const int align = intel_cursor_alignment(dev_priv);
15168                 int err;
15169
15170                 err = i915_gem_object_attach_phys(obj, align);
15171                 if (err)
15172                         return err;
15173         }
15174
15175         vma = intel_pin_and_fence_fb_obj(fb,
15176                                          &plane_state->view,
15177                                          intel_plane_uses_fence(plane_state),
15178                                          &plane_state->flags);
15179         if (IS_ERR(vma))
15180                 return PTR_ERR(vma);
15181
15182         plane_state->vma = vma;
15183
15184         return 0;
15185 }
15186
15187 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15188 {
15189         struct i915_vma *vma;
15190
15191         vma = fetch_and_zero(&old_plane_state->vma);
15192         if (vma)
15193                 intel_unpin_fb_vma(vma, old_plane_state->flags);
15194 }
15195
15196 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15197 {
15198         struct i915_sched_attr attr = {
15199                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15200         };
15201
15202         i915_gem_object_wait_priority(obj, 0, &attr);
15203 }
15204
15205 /**
15206  * intel_prepare_plane_fb - Prepare fb for usage on plane
15207  * @plane: drm plane to prepare for
15208  * @_new_plane_state: the plane state being prepared
15209  *
15210  * Prepares a framebuffer for usage on a display plane.  Generally this
15211  * involves pinning the underlying object and updating the frontbuffer tracking
15212  * bits.  Some older platforms need special physical address handling for
15213  * cursor planes.
15214  *
15215  * Returns 0 on success, negative error code on failure.
15216  */
15217 int
15218 intel_prepare_plane_fb(struct drm_plane *plane,
15219                        struct drm_plane_state *_new_plane_state)
15220 {
15221         struct intel_plane_state *new_plane_state =
15222                 to_intel_plane_state(_new_plane_state);
15223         struct intel_atomic_state *intel_state =
15224                 to_intel_atomic_state(new_plane_state->uapi.state);
15225         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15226         struct drm_framebuffer *fb = new_plane_state->hw.fb;
15227         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15228         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
15229         int ret;
15230
15231         if (old_obj) {
15232                 struct intel_crtc_state *crtc_state =
15233                         intel_atomic_get_new_crtc_state(intel_state,
15234                                                         to_intel_crtc(plane->state->crtc));
15235
15236                 /* Big Hammer, we also need to ensure that any pending
15237                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15238                  * current scanout is retired before unpinning the old
15239                  * framebuffer. Note that we rely on userspace rendering
15240                  * into the buffer attached to the pipe they are waiting
15241                  * on. If not, userspace generates a GPU hang with IPEHR
15242                  * point to the MI_WAIT_FOR_EVENT.
15243                  *
15244                  * This should only fail upon a hung GPU, in which case we
15245                  * can safely continue.
15246                  */
15247                 if (needs_modeset(crtc_state)) {
15248                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15249                                                               old_obj->base.resv, NULL,
15250                                                               false, 0,
15251                                                               GFP_KERNEL);
15252                         if (ret < 0)
15253                                 return ret;
15254                 }
15255         }
15256
15257         if (new_plane_state->uapi.fence) { /* explicit fencing */
15258                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
15259                                                     new_plane_state->uapi.fence,
15260                                                     I915_FENCE_TIMEOUT,
15261                                                     GFP_KERNEL);
15262                 if (ret < 0)
15263                         return ret;
15264         }
15265
15266         if (!obj)
15267                 return 0;
15268
15269         ret = i915_gem_object_pin_pages(obj);
15270         if (ret)
15271                 return ret;
15272
15273         ret = intel_plane_pin_fb(new_plane_state);
15274
15275         i915_gem_object_unpin_pages(obj);
15276         if (ret)
15277                 return ret;
15278
15279         fb_obj_bump_render_priority(obj);
15280         intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
15281
15282         if (!new_plane_state->uapi.fence) { /* implicit fencing */
15283                 struct dma_fence *fence;
15284
15285                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15286                                                       obj->base.resv, NULL,
15287                                                       false, I915_FENCE_TIMEOUT,
15288                                                       GFP_KERNEL);
15289                 if (ret < 0)
15290                         return ret;
15291
15292                 fence = dma_resv_get_excl_rcu(obj->base.resv);
15293                 if (fence) {
15294                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15295                                                    fence);
15296                         dma_fence_put(fence);
15297                 }
15298         } else {
15299                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15300                                            new_plane_state->uapi.fence);
15301         }
15302
15303         /*
15304          * We declare pageflips to be interactive and so merit a small bias
15305          * towards upclocking to deliver the frame on time. By only changing
15306          * the RPS thresholds to sample more regularly and aim for higher
15307          * clocks we can hopefully deliver low power workloads (like kodi)
15308          * that are not quite steady state without resorting to forcing
15309          * maximum clocks following a vblank miss (see do_rps_boost()).
15310          */
15311         if (!intel_state->rps_interactive) {
15312                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15313                 intel_state->rps_interactive = true;
15314         }
15315
15316         return 0;
15317 }
15318
15319 /**
15320  * intel_cleanup_plane_fb - Cleans up an fb after plane use
15321  * @plane: drm plane to clean up for
15322  * @_old_plane_state: the state from the previous modeset
15323  *
15324  * Cleans up a framebuffer that has just been removed from a plane.
15325  */
15326 void
15327 intel_cleanup_plane_fb(struct drm_plane *plane,
15328                        struct drm_plane_state *_old_plane_state)
15329 {
15330         struct intel_plane_state *old_plane_state =
15331                 to_intel_plane_state(_old_plane_state);
15332         struct intel_atomic_state *intel_state =
15333                 to_intel_atomic_state(old_plane_state->uapi.state);
15334         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15335
15336         if (intel_state->rps_interactive) {
15337                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15338                 intel_state->rps_interactive = false;
15339         }
15340
15341         /* Should only be called after a successful intel_prepare_plane_fb()! */
15342         intel_plane_unpin_fb(old_plane_state);
15343 }
15344
15345 /**
15346  * intel_plane_destroy - destroy a plane
15347  * @plane: plane to destroy
15348  *
15349  * Common destruction function for all types of planes (primary, cursor,
15350  * sprite).
15351  */
15352 void intel_plane_destroy(struct drm_plane *plane)
15353 {
15354         drm_plane_cleanup(plane);
15355         kfree(to_intel_plane(plane));
15356 }
15357
15358 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15359                                             u32 format, u64 modifier)
15360 {
15361         switch (modifier) {
15362         case DRM_FORMAT_MOD_LINEAR:
15363         case I915_FORMAT_MOD_X_TILED:
15364                 break;
15365         default:
15366                 return false;
15367         }
15368
15369         switch (format) {
15370         case DRM_FORMAT_C8:
15371         case DRM_FORMAT_RGB565:
15372         case DRM_FORMAT_XRGB1555:
15373         case DRM_FORMAT_XRGB8888:
15374                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15375                         modifier == I915_FORMAT_MOD_X_TILED;
15376         default:
15377                 return false;
15378         }
15379 }
15380
15381 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
15382                                             u32 format, u64 modifier)
15383 {
15384         switch (modifier) {
15385         case DRM_FORMAT_MOD_LINEAR:
15386         case I915_FORMAT_MOD_X_TILED:
15387                 break;
15388         default:
15389                 return false;
15390         }
15391
15392         switch (format) {
15393         case DRM_FORMAT_C8:
15394         case DRM_FORMAT_RGB565:
15395         case DRM_FORMAT_XRGB8888:
15396         case DRM_FORMAT_XBGR8888:
15397         case DRM_FORMAT_ARGB8888:
15398         case DRM_FORMAT_ABGR8888:
15399         case DRM_FORMAT_XRGB2101010:
15400         case DRM_FORMAT_XBGR2101010:
15401         case DRM_FORMAT_ARGB2101010:
15402         case DRM_FORMAT_ABGR2101010:
15403         case DRM_FORMAT_XBGR16161616F:
15404                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15405                         modifier == I915_FORMAT_MOD_X_TILED;
15406         default:
15407                 return false;
15408         }
15409 }
15410
15411 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
15412                                               u32 format, u64 modifier)
15413 {
15414         return modifier == DRM_FORMAT_MOD_LINEAR &&
15415                 format == DRM_FORMAT_ARGB8888;
15416 }
15417
15418 static const struct drm_plane_funcs i965_plane_funcs = {
15419         .update_plane = drm_atomic_helper_update_plane,
15420         .disable_plane = drm_atomic_helper_disable_plane,
15421         .destroy = intel_plane_destroy,
15422         .atomic_duplicate_state = intel_plane_duplicate_state,
15423         .atomic_destroy_state = intel_plane_destroy_state,
15424         .format_mod_supported = i965_plane_format_mod_supported,
15425 };
15426
15427 static const struct drm_plane_funcs i8xx_plane_funcs = {
15428         .update_plane = drm_atomic_helper_update_plane,
15429         .disable_plane = drm_atomic_helper_disable_plane,
15430         .destroy = intel_plane_destroy,
15431         .atomic_duplicate_state = intel_plane_duplicate_state,
15432         .atomic_destroy_state = intel_plane_destroy_state,
15433         .format_mod_supported = i8xx_plane_format_mod_supported,
15434 };
15435
15436 static int
15437 intel_legacy_cursor_update(struct drm_plane *_plane,
15438                            struct drm_crtc *_crtc,
15439                            struct drm_framebuffer *fb,
15440                            int crtc_x, int crtc_y,
15441                            unsigned int crtc_w, unsigned int crtc_h,
15442                            u32 src_x, u32 src_y,
15443                            u32 src_w, u32 src_h,
15444                            struct drm_modeset_acquire_ctx *ctx)
15445 {
15446         struct intel_plane *plane = to_intel_plane(_plane);
15447         struct intel_crtc *crtc = to_intel_crtc(_crtc);
15448         struct intel_plane_state *old_plane_state =
15449                 to_intel_plane_state(plane->base.state);
15450         struct intel_plane_state *new_plane_state;
15451         struct intel_crtc_state *crtc_state =
15452                 to_intel_crtc_state(crtc->base.state);
15453         struct intel_crtc_state *new_crtc_state;
15454         int ret;
15455
15456         /*
15457          * When crtc is inactive or there is a modeset pending,
15458          * wait for it to complete in the slowpath
15459          */
15460         if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
15461             crtc_state->update_pipe)
15462                 goto slow;
15463
15464         /*
15465          * Don't do an async update if there is an outstanding commit modifying
15466          * the plane.  This prevents our async update's changes from getting
15467          * overridden by a previous synchronous update's state.
15468          */
15469         if (old_plane_state->uapi.commit &&
15470             !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
15471                 goto slow;
15472
15473         /*
15474          * If any parameters change that may affect watermarks,
15475          * take the slowpath. Only changing fb or position should be
15476          * in the fastpath.
15477          */
15478         if (old_plane_state->uapi.crtc != &crtc->base ||
15479             old_plane_state->uapi.src_w != src_w ||
15480             old_plane_state->uapi.src_h != src_h ||
15481             old_plane_state->uapi.crtc_w != crtc_w ||
15482             old_plane_state->uapi.crtc_h != crtc_h ||
15483             !old_plane_state->uapi.fb != !fb)
15484                 goto slow;
15485
15486         new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
15487         if (!new_plane_state)
15488                 return -ENOMEM;
15489
15490         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
15491         if (!new_crtc_state) {
15492                 ret = -ENOMEM;
15493                 goto out_free;
15494         }
15495
15496         drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
15497
15498         new_plane_state->uapi.src_x = src_x;
15499         new_plane_state->uapi.src_y = src_y;
15500         new_plane_state->uapi.src_w = src_w;
15501         new_plane_state->uapi.src_h = src_h;
15502         new_plane_state->uapi.crtc_x = crtc_x;
15503         new_plane_state->uapi.crtc_y = crtc_y;
15504         new_plane_state->uapi.crtc_w = crtc_w;
15505         new_plane_state->uapi.crtc_h = crtc_h;
15506
15507         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
15508                                                   old_plane_state, new_plane_state);
15509         if (ret)
15510                 goto out_free;
15511
15512         ret = intel_plane_pin_fb(new_plane_state);
15513         if (ret)
15514                 goto out_free;
15515
15516         intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
15517                                 ORIGIN_FLIP);
15518         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15519                                 to_intel_frontbuffer(new_plane_state->hw.fb),
15520                                 plane->frontbuffer_bit);
15521
15522         /* Swap plane state */
15523         plane->base.state = &new_plane_state->uapi;
15524
15525         /*
15526          * We cannot swap crtc_state as it may be in use by an atomic commit or
15527          * page flip that's running simultaneously. If we swap crtc_state and
15528          * destroy the old state, we will cause a use-after-free there.
15529          *
15530          * Only update active_planes, which is needed for our internal
15531          * bookkeeping. Either value will do the right thing when updating
15532          * planes atomically. If the cursor was part of the atomic update then
15533          * we would have taken the slowpath.
15534          */
15535         crtc_state->active_planes = new_crtc_state->active_planes;
15536
15537         if (new_plane_state->uapi.visible)
15538                 intel_update_plane(plane, crtc_state, new_plane_state);
15539         else
15540                 intel_disable_plane(plane, crtc_state);
15541
15542         intel_plane_unpin_fb(old_plane_state);
15543
15544 out_free:
15545         if (new_crtc_state)
15546                 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
15547         if (ret)
15548                 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
15549         else
15550                 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
15551         return ret;
15552
15553 slow:
15554         return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
15555                                               crtc_x, crtc_y, crtc_w, crtc_h,
15556                                               src_x, src_y, src_w, src_h, ctx);
15557 }
15558
15559 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
15560         .update_plane = intel_legacy_cursor_update,
15561         .disable_plane = drm_atomic_helper_disable_plane,
15562         .destroy = intel_plane_destroy,
15563         .atomic_duplicate_state = intel_plane_duplicate_state,
15564         .atomic_destroy_state = intel_plane_destroy_state,
15565         .format_mod_supported = intel_cursor_format_mod_supported,
15566 };
15567
15568 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
15569                                enum i9xx_plane_id i9xx_plane)
15570 {
15571         if (!HAS_FBC(dev_priv))
15572                 return false;
15573
15574         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15575                 return i9xx_plane == PLANE_A; /* tied to pipe A */
15576         else if (IS_IVYBRIDGE(dev_priv))
15577                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
15578                         i9xx_plane == PLANE_C;
15579         else if (INTEL_GEN(dev_priv) >= 4)
15580                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
15581         else
15582                 return i9xx_plane == PLANE_A;
15583 }
15584
15585 static struct intel_plane *
15586 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
15587 {
15588         struct intel_plane *plane;
15589         const struct drm_plane_funcs *plane_funcs;
15590         unsigned int supported_rotations;
15591         unsigned int possible_crtcs;
15592         const u32 *formats;
15593         int num_formats;
15594         int ret, zpos;
15595
15596         if (INTEL_GEN(dev_priv) >= 9)
15597                 return skl_universal_plane_create(dev_priv, pipe,
15598                                                   PLANE_PRIMARY);
15599
15600         plane = intel_plane_alloc();
15601         if (IS_ERR(plane))
15602                 return plane;
15603
15604         plane->pipe = pipe;
15605         /*
15606          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
15607          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
15608          */
15609         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
15610                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
15611         else
15612                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
15613         plane->id = PLANE_PRIMARY;
15614         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
15615
15616         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
15617         if (plane->has_fbc) {
15618                 struct intel_fbc *fbc = &dev_priv->fbc;
15619
15620                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
15621         }
15622
15623         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15624                 formats = vlv_primary_formats;
15625                 num_formats = ARRAY_SIZE(vlv_primary_formats);
15626         } else if (INTEL_GEN(dev_priv) >= 4) {
15627                 /*
15628                  * WaFP16GammaEnabling:ivb
15629                  * "Workaround : When using the 64-bit format, the plane
15630                  *  output on each color channel has one quarter amplitude.
15631                  *  It can be brought up to full amplitude by using pipe
15632                  *  gamma correction or pipe color space conversion to
15633                  *  multiply the plane output by four."
15634                  *
15635                  * There is no dedicated plane gamma for the primary plane,
15636                  * and using the pipe gamma/csc could conflict with other
15637                  * planes, so we choose not to expose fp16 on IVB primary
15638                  * planes. HSW primary planes no longer have this problem.
15639                  */
15640                 if (IS_IVYBRIDGE(dev_priv)) {
15641                         formats = ivb_primary_formats;
15642                         num_formats = ARRAY_SIZE(ivb_primary_formats);
15643                 } else {
15644                         formats = i965_primary_formats;
15645                         num_formats = ARRAY_SIZE(i965_primary_formats);
15646                 }
15647         } else {
15648                 formats = i8xx_primary_formats;
15649                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
15650         }
15651
15652         if (INTEL_GEN(dev_priv) >= 4)
15653                 plane_funcs = &i965_plane_funcs;
15654         else
15655                 plane_funcs = &i8xx_plane_funcs;
15656
15657         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15658                 plane->min_cdclk = vlv_plane_min_cdclk;
15659         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15660                 plane->min_cdclk = hsw_plane_min_cdclk;
15661         else if (IS_IVYBRIDGE(dev_priv))
15662                 plane->min_cdclk = ivb_plane_min_cdclk;
15663         else
15664                 plane->min_cdclk = i9xx_plane_min_cdclk;
15665
15666         plane->max_stride = i9xx_plane_max_stride;
15667         plane->update_plane = i9xx_update_plane;
15668         plane->disable_plane = i9xx_disable_plane;
15669         plane->get_hw_state = i9xx_plane_get_hw_state;
15670         plane->check_plane = i9xx_plane_check;
15671
15672         possible_crtcs = BIT(pipe);
15673
15674         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
15675                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15676                                                possible_crtcs, plane_funcs,
15677                                                formats, num_formats,
15678                                                i9xx_format_modifiers,
15679                                                DRM_PLANE_TYPE_PRIMARY,
15680                                                "primary %c", pipe_name(pipe));
15681         else
15682                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15683                                                possible_crtcs, plane_funcs,
15684                                                formats, num_formats,
15685                                                i9xx_format_modifiers,
15686                                                DRM_PLANE_TYPE_PRIMARY,
15687                                                "plane %c",
15688                                                plane_name(plane->i9xx_plane));
15689         if (ret)
15690                 goto fail;
15691
15692         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
15693                 supported_rotations =
15694                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
15695                         DRM_MODE_REFLECT_X;
15696         } else if (INTEL_GEN(dev_priv) >= 4) {
15697                 supported_rotations =
15698                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
15699         } else {
15700                 supported_rotations = DRM_MODE_ROTATE_0;
15701         }
15702
15703         if (INTEL_GEN(dev_priv) >= 4)
15704                 drm_plane_create_rotation_property(&plane->base,
15705                                                    DRM_MODE_ROTATE_0,
15706                                                    supported_rotations);
15707
15708         zpos = 0;
15709         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
15710
15711         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
15712
15713         return plane;
15714
15715 fail:
15716         intel_plane_free(plane);
15717
15718         return ERR_PTR(ret);
15719 }
15720
15721 static struct intel_plane *
15722 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
15723                           enum pipe pipe)
15724 {
15725         unsigned int possible_crtcs;
15726         struct intel_plane *cursor;
15727         int ret, zpos;
15728
15729         cursor = intel_plane_alloc();
15730         if (IS_ERR(cursor))
15731                 return cursor;
15732
15733         cursor->pipe = pipe;
15734         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
15735         cursor->id = PLANE_CURSOR;
15736         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
15737
15738         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15739                 cursor->max_stride = i845_cursor_max_stride;
15740                 cursor->update_plane = i845_update_cursor;
15741                 cursor->disable_plane = i845_disable_cursor;
15742                 cursor->get_hw_state = i845_cursor_get_hw_state;
15743                 cursor->check_plane = i845_check_cursor;
15744         } else {
15745                 cursor->max_stride = i9xx_cursor_max_stride;
15746                 cursor->update_plane = i9xx_update_cursor;
15747                 cursor->disable_plane = i9xx_disable_cursor;
15748                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
15749                 cursor->check_plane = i9xx_check_cursor;
15750         }
15751
15752         cursor->cursor.base = ~0;
15753         cursor->cursor.cntl = ~0;
15754
15755         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
15756                 cursor->cursor.size = ~0;
15757
15758         possible_crtcs = BIT(pipe);
15759
15760         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
15761                                        possible_crtcs, &intel_cursor_plane_funcs,
15762                                        intel_cursor_formats,
15763                                        ARRAY_SIZE(intel_cursor_formats),
15764                                        cursor_format_modifiers,
15765                                        DRM_PLANE_TYPE_CURSOR,
15766                                        "cursor %c", pipe_name(pipe));
15767         if (ret)
15768                 goto fail;
15769
15770         if (INTEL_GEN(dev_priv) >= 4)
15771                 drm_plane_create_rotation_property(&cursor->base,
15772                                                    DRM_MODE_ROTATE_0,
15773                                                    DRM_MODE_ROTATE_0 |
15774                                                    DRM_MODE_ROTATE_180);
15775
15776         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
15777         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
15778
15779         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15780
15781         return cursor;
15782
15783 fail:
15784         intel_plane_free(cursor);
15785
15786         return ERR_PTR(ret);
15787 }
15788
15789 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
15790                                     struct intel_crtc_state *crtc_state)
15791 {
15792         struct intel_crtc_scaler_state *scaler_state =
15793                 &crtc_state->scaler_state;
15794         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15795         int i;
15796
15797         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
15798         if (!crtc->num_scalers)
15799                 return;
15800
15801         for (i = 0; i < crtc->num_scalers; i++) {
15802                 struct intel_scaler *scaler = &scaler_state->scalers[i];
15803
15804                 scaler->in_use = 0;
15805                 scaler->mode = 0;
15806         }
15807
15808         scaler_state->scaler_id = -1;
15809 }
15810
15811 #define INTEL_CRTC_FUNCS \
15812         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
15813         .set_config = drm_atomic_helper_set_config, \
15814         .destroy = intel_crtc_destroy, \
15815         .page_flip = drm_atomic_helper_page_flip, \
15816         .atomic_duplicate_state = intel_crtc_duplicate_state, \
15817         .atomic_destroy_state = intel_crtc_destroy_state, \
15818         .set_crc_source = intel_crtc_set_crc_source, \
15819         .verify_crc_source = intel_crtc_verify_crc_source, \
15820         .get_crc_sources = intel_crtc_get_crc_sources
15821
15822 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15823         INTEL_CRTC_FUNCS,
15824
15825         .get_vblank_counter = g4x_get_vblank_counter,
15826         .enable_vblank = bdw_enable_vblank,
15827         .disable_vblank = bdw_disable_vblank,
15828 };
15829
15830 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15831         INTEL_CRTC_FUNCS,
15832
15833         .get_vblank_counter = g4x_get_vblank_counter,
15834         .enable_vblank = ilk_enable_vblank,
15835         .disable_vblank = ilk_disable_vblank,
15836 };
15837
15838 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15839         INTEL_CRTC_FUNCS,
15840
15841         .get_vblank_counter = g4x_get_vblank_counter,
15842         .enable_vblank = i965_enable_vblank,
15843         .disable_vblank = i965_disable_vblank,
15844 };
15845
15846 static const struct drm_crtc_funcs i965_crtc_funcs = {
15847         INTEL_CRTC_FUNCS,
15848
15849         .get_vblank_counter = i915_get_vblank_counter,
15850         .enable_vblank = i965_enable_vblank,
15851         .disable_vblank = i965_disable_vblank,
15852 };
15853
15854 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15855         INTEL_CRTC_FUNCS,
15856
15857         .get_vblank_counter = i915_get_vblank_counter,
15858         .enable_vblank = i915gm_enable_vblank,
15859         .disable_vblank = i915gm_disable_vblank,
15860 };
15861
15862 static const struct drm_crtc_funcs i915_crtc_funcs = {
15863         INTEL_CRTC_FUNCS,
15864
15865         .get_vblank_counter = i915_get_vblank_counter,
15866         .enable_vblank = i8xx_enable_vblank,
15867         .disable_vblank = i8xx_disable_vblank,
15868 };
15869
15870 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15871         INTEL_CRTC_FUNCS,
15872
15873         /* no hw vblank counter */
15874         .enable_vblank = i8xx_enable_vblank,
15875         .disable_vblank = i8xx_disable_vblank,
15876 };
15877
15878 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15879 {
15880         const struct drm_crtc_funcs *funcs;
15881         struct intel_crtc *intel_crtc;
15882         struct intel_crtc_state *crtc_state = NULL;
15883         struct intel_plane *primary = NULL;
15884         struct intel_plane *cursor = NULL;
15885         int sprite, ret;
15886
15887         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
15888         if (!intel_crtc)
15889                 return -ENOMEM;
15890
15891         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
15892         if (!crtc_state) {
15893                 ret = -ENOMEM;
15894                 goto fail;
15895         }
15896         __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->uapi);
15897         intel_crtc->config = crtc_state;
15898
15899         primary = intel_primary_plane_create(dev_priv, pipe);
15900         if (IS_ERR(primary)) {
15901                 ret = PTR_ERR(primary);
15902                 goto fail;
15903         }
15904         intel_crtc->plane_ids_mask |= BIT(primary->id);
15905
15906         for_each_sprite(dev_priv, pipe, sprite) {
15907                 struct intel_plane *plane;
15908
15909                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15910                 if (IS_ERR(plane)) {
15911                         ret = PTR_ERR(plane);
15912                         goto fail;
15913                 }
15914                 intel_crtc->plane_ids_mask |= BIT(plane->id);
15915         }
15916
15917         cursor = intel_cursor_plane_create(dev_priv, pipe);
15918         if (IS_ERR(cursor)) {
15919                 ret = PTR_ERR(cursor);
15920                 goto fail;
15921         }
15922         intel_crtc->plane_ids_mask |= BIT(cursor->id);
15923
15924         if (HAS_GMCH(dev_priv)) {
15925                 if (IS_CHERRYVIEW(dev_priv) ||
15926                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15927                         funcs = &g4x_crtc_funcs;
15928                 else if (IS_GEN(dev_priv, 4))
15929                         funcs = &i965_crtc_funcs;
15930                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
15931                         funcs = &i915gm_crtc_funcs;
15932                 else if (IS_GEN(dev_priv, 3))
15933                         funcs = &i915_crtc_funcs;
15934                 else
15935                         funcs = &i8xx_crtc_funcs;
15936         } else {
15937                 if (INTEL_GEN(dev_priv) >= 8)
15938                         funcs = &bdw_crtc_funcs;
15939                 else
15940                         funcs = &ilk_crtc_funcs;
15941         }
15942
15943         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
15944                                         &primary->base, &cursor->base,
15945                                         funcs, "pipe %c", pipe_name(pipe));
15946         if (ret)
15947                 goto fail;
15948
15949         intel_crtc->pipe = pipe;
15950
15951         /* initialize shared scalers */
15952         intel_crtc_init_scalers(intel_crtc, crtc_state);
15953
15954         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15955                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15956         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
15957
15958         if (INTEL_GEN(dev_priv) < 9) {
15959                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15960
15961                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15962                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15963                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
15964         }
15965
15966         intel_color_init(intel_crtc);
15967
15968         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15969
15970         return 0;
15971
15972 fail:
15973         /*
15974          * drm_mode_config_cleanup() will free up any
15975          * crtcs/planes already initialized.
15976          */
15977         kfree(crtc_state);
15978         kfree(intel_crtc);
15979
15980         return ret;
15981 }
15982
15983 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15984                                       struct drm_file *file)
15985 {
15986         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15987         struct drm_crtc *drmmode_crtc;
15988         struct intel_crtc *crtc;
15989
15990         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15991         if (!drmmode_crtc)
15992                 return -ENOENT;
15993
15994         crtc = to_intel_crtc(drmmode_crtc);
15995         pipe_from_crtc_id->pipe = crtc->pipe;
15996
15997         return 0;
15998 }
15999
16000 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16001 {
16002         struct drm_device *dev = encoder->base.dev;
16003         struct intel_encoder *source_encoder;
16004         u32 possible_clones = 0;
16005
16006         for_each_intel_encoder(dev, source_encoder) {
16007                 if (encoders_cloneable(encoder, source_encoder))
16008                         possible_clones |= drm_encoder_mask(&source_encoder->base);
16009         }
16010
16011         return possible_clones;
16012 }
16013
16014 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16015 {
16016         struct drm_device *dev = encoder->base.dev;
16017         struct intel_crtc *crtc;
16018         u32 possible_crtcs = 0;
16019
16020         for_each_intel_crtc(dev, crtc) {
16021                 if (encoder->pipe_mask & BIT(crtc->pipe))
16022                         possible_crtcs |= drm_crtc_mask(&crtc->base);
16023         }
16024
16025         return possible_crtcs;
16026 }
16027
16028 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16029 {
16030         if (!IS_MOBILE(dev_priv))
16031                 return false;
16032
16033         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
16034                 return false;
16035
16036         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
16037                 return false;
16038
16039         return true;
16040 }
16041
16042 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16043 {
16044         if (INTEL_GEN(dev_priv) >= 9)
16045                 return false;
16046
16047         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16048                 return false;
16049
16050         if (HAS_PCH_LPT_H(dev_priv) &&
16051             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16052                 return false;
16053
16054         /* DDI E can't be used if DDI A requires 4 lanes */
16055         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16056                 return false;
16057
16058         if (!dev_priv->vbt.int_crt_support)
16059                 return false;
16060
16061         return true;
16062 }
16063
16064 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16065 {
16066         int pps_num;
16067         int pps_idx;
16068
16069         if (HAS_DDI(dev_priv))
16070                 return;
16071         /*
16072          * This w/a is needed at least on CPT/PPT, but to be sure apply it
16073          * everywhere where registers can be write protected.
16074          */
16075         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16076                 pps_num = 2;
16077         else
16078                 pps_num = 1;
16079
16080         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16081                 u32 val = I915_READ(PP_CONTROL(pps_idx));
16082
16083                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16084                 I915_WRITE(PP_CONTROL(pps_idx), val);
16085         }
16086 }
16087
16088 static void intel_pps_init(struct drm_i915_private *dev_priv)
16089 {
16090         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16091                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16092         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16093                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16094         else
16095                 dev_priv->pps_mmio_base = PPS_BASE;
16096
16097         intel_pps_unlock_regs_wa(dev_priv);
16098 }
16099
16100 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16101 {
16102         struct intel_encoder *encoder;
16103         bool dpd_is_edp = false;
16104
16105         intel_pps_init(dev_priv);
16106
16107         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16108                 return;
16109
16110         if (INTEL_GEN(dev_priv) >= 12) {
16111                 intel_ddi_init(dev_priv, PORT_A);
16112                 intel_ddi_init(dev_priv, PORT_B);
16113                 intel_ddi_init(dev_priv, PORT_D);
16114                 intel_ddi_init(dev_priv, PORT_E);
16115                 intel_ddi_init(dev_priv, PORT_F);
16116                 intel_ddi_init(dev_priv, PORT_G);
16117                 intel_ddi_init(dev_priv, PORT_H);
16118                 intel_ddi_init(dev_priv, PORT_I);
16119                 icl_dsi_init(dev_priv);
16120         } else if (IS_ELKHARTLAKE(dev_priv)) {
16121                 intel_ddi_init(dev_priv, PORT_A);
16122                 intel_ddi_init(dev_priv, PORT_B);
16123                 intel_ddi_init(dev_priv, PORT_C);
16124                 intel_ddi_init(dev_priv, PORT_D);
16125                 icl_dsi_init(dev_priv);
16126         } else if (IS_GEN(dev_priv, 11)) {
16127                 intel_ddi_init(dev_priv, PORT_A);
16128                 intel_ddi_init(dev_priv, PORT_B);
16129                 intel_ddi_init(dev_priv, PORT_C);
16130                 intel_ddi_init(dev_priv, PORT_D);
16131                 intel_ddi_init(dev_priv, PORT_E);
16132                 /*
16133                  * On some ICL SKUs port F is not present. No strap bits for
16134                  * this, so rely on VBT.
16135                  * Work around broken VBTs on SKUs known to have no port F.
16136                  */
16137                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16138                     intel_bios_is_port_present(dev_priv, PORT_F))
16139                         intel_ddi_init(dev_priv, PORT_F);
16140
16141                 icl_dsi_init(dev_priv);
16142         } else if (IS_GEN9_LP(dev_priv)) {
16143                 /*
16144                  * FIXME: Broxton doesn't support port detection via the
16145                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16146                  * detect the ports.
16147                  */
16148                 intel_ddi_init(dev_priv, PORT_A);
16149                 intel_ddi_init(dev_priv, PORT_B);
16150                 intel_ddi_init(dev_priv, PORT_C);
16151
16152                 vlv_dsi_init(dev_priv);
16153         } else if (HAS_DDI(dev_priv)) {
16154                 int found;
16155
16156                 if (intel_ddi_crt_present(dev_priv))
16157                         intel_crt_init(dev_priv);
16158
16159                 /*
16160                  * Haswell uses DDI functions to detect digital outputs.
16161                  * On SKL pre-D0 the strap isn't connected, so we assume
16162                  * it's there.
16163                  */
16164                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16165                 /* WaIgnoreDDIAStrap: skl */
16166                 if (found || IS_GEN9_BC(dev_priv))
16167                         intel_ddi_init(dev_priv, PORT_A);
16168
16169                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16170                  * register */
16171                 found = I915_READ(SFUSE_STRAP);
16172
16173                 if (found & SFUSE_STRAP_DDIB_DETECTED)
16174                         intel_ddi_init(dev_priv, PORT_B);
16175                 if (found & SFUSE_STRAP_DDIC_DETECTED)
16176                         intel_ddi_init(dev_priv, PORT_C);
16177                 if (found & SFUSE_STRAP_DDID_DETECTED)
16178                         intel_ddi_init(dev_priv, PORT_D);
16179                 if (found & SFUSE_STRAP_DDIF_DETECTED)
16180                         intel_ddi_init(dev_priv, PORT_F);
16181                 /*
16182                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16183                  */
16184                 if (IS_GEN9_BC(dev_priv) &&
16185                     intel_bios_is_port_present(dev_priv, PORT_E))
16186                         intel_ddi_init(dev_priv, PORT_E);
16187
16188         } else if (HAS_PCH_SPLIT(dev_priv)) {
16189                 int found;
16190
16191                 /*
16192                  * intel_edp_init_connector() depends on this completing first,
16193                  * to prevent the registration of both eDP and LVDS and the
16194                  * incorrect sharing of the PPS.
16195                  */
16196                 intel_lvds_init(dev_priv);
16197                 intel_crt_init(dev_priv);
16198
16199                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16200
16201                 if (ilk_has_edp_a(dev_priv))
16202                         intel_dp_init(dev_priv, DP_A, PORT_A);
16203
16204                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
16205                         /* PCH SDVOB multiplex with HDMIB */
16206                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16207                         if (!found)
16208                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16209                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
16210                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16211                 }
16212
16213                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
16214                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16215
16216                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
16217                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16218
16219                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
16220                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16221
16222                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
16223                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16224         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16225                 bool has_edp, has_port;
16226
16227                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16228                         intel_crt_init(dev_priv);
16229
16230                 /*
16231                  * The DP_DETECTED bit is the latched state of the DDC
16232                  * SDA pin at boot. However since eDP doesn't require DDC
16233                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
16234                  * eDP ports may have been muxed to an alternate function.
16235                  * Thus we can't rely on the DP_DETECTED bit alone to detect
16236                  * eDP ports. Consult the VBT as well as DP_DETECTED to
16237                  * detect eDP ports.
16238                  *
16239                  * Sadly the straps seem to be missing sometimes even for HDMI
16240                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16241                  * and VBT for the presence of the port. Additionally we can't
16242                  * trust the port type the VBT declares as we've seen at least
16243                  * HDMI ports that the VBT claim are DP or eDP.
16244                  */
16245                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16246                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16247                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
16248                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16249                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16250                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16251
16252                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16253                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16254                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
16255                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16256                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16257                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16258
16259                 if (IS_CHERRYVIEW(dev_priv)) {
16260                         /*
16261                          * eDP not supported on port D,
16262                          * so no need to worry about it
16263                          */
16264                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16265                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
16266                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16267                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
16268                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16269                 }
16270
16271                 vlv_dsi_init(dev_priv);
16272         } else if (IS_PINEVIEW(dev_priv)) {
16273                 intel_lvds_init(dev_priv);
16274                 intel_crt_init(dev_priv);
16275         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16276                 bool found = false;
16277
16278                 if (IS_MOBILE(dev_priv))
16279                         intel_lvds_init(dev_priv);
16280
16281                 intel_crt_init(dev_priv);
16282
16283                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16284                         DRM_DEBUG_KMS("probing SDVOB\n");
16285                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16286                         if (!found && IS_G4X(dev_priv)) {
16287                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
16288                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16289                         }
16290
16291                         if (!found && IS_G4X(dev_priv))
16292                                 intel_dp_init(dev_priv, DP_B, PORT_B);
16293                 }
16294
16295                 /* Before G4X SDVOC doesn't have its own detect register */
16296
16297                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16298                         DRM_DEBUG_KMS("probing SDVOC\n");
16299                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16300                 }
16301
16302                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
16303
16304                         if (IS_G4X(dev_priv)) {
16305                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
16306                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16307                         }
16308                         if (IS_G4X(dev_priv))
16309                                 intel_dp_init(dev_priv, DP_C, PORT_C);
16310                 }
16311
16312                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
16313                         intel_dp_init(dev_priv, DP_D, PORT_D);
16314
16315                 if (SUPPORTS_TV(dev_priv))
16316                         intel_tv_init(dev_priv);
16317         } else if (IS_GEN(dev_priv, 2)) {
16318                 if (IS_I85X(dev_priv))
16319                         intel_lvds_init(dev_priv);
16320
16321                 intel_crt_init(dev_priv);
16322                 intel_dvo_init(dev_priv);
16323         }
16324
16325         intel_psr_init(dev_priv);
16326
16327         for_each_intel_encoder(&dev_priv->drm, encoder) {
16328                 encoder->base.possible_crtcs =
16329                         intel_encoder_possible_crtcs(encoder);
16330                 encoder->base.possible_clones =
16331                         intel_encoder_possible_clones(encoder);
16332         }
16333
16334         intel_init_pch_refclk(dev_priv);
16335
16336         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16337 }
16338
16339 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16340 {
16341         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16342
16343         drm_framebuffer_cleanup(fb);
16344         intel_frontbuffer_put(intel_fb->frontbuffer);
16345
16346         kfree(intel_fb);
16347 }
16348
16349 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16350                                                 struct drm_file *file,
16351                                                 unsigned int *handle)
16352 {
16353         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16354
16355         if (obj->userptr.mm) {
16356                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
16357                 return -EINVAL;
16358         }
16359
16360         return drm_gem_handle_create(file, &obj->base, handle);
16361 }
16362
16363 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16364                                         struct drm_file *file,
16365                                         unsigned flags, unsigned color,
16366                                         struct drm_clip_rect *clips,
16367                                         unsigned num_clips)
16368 {
16369         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16370
16371         i915_gem_object_flush_if_display(obj);
16372         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16373
16374         return 0;
16375 }
16376
16377 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16378         .destroy = intel_user_framebuffer_destroy,
16379         .create_handle = intel_user_framebuffer_create_handle,
16380         .dirty = intel_user_framebuffer_dirty,
16381 };
16382
16383 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16384                                   struct drm_i915_gem_object *obj,
16385                                   struct drm_mode_fb_cmd2 *mode_cmd)
16386 {
16387         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16388         struct drm_framebuffer *fb = &intel_fb->base;
16389         u32 max_stride;
16390         unsigned int tiling, stride;
16391         int ret = -EINVAL;
16392         int i;
16393
16394         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16395         if (!intel_fb->frontbuffer)
16396                 return -ENOMEM;
16397
16398         i915_gem_object_lock(obj);
16399         tiling = i915_gem_object_get_tiling(obj);
16400         stride = i915_gem_object_get_stride(obj);
16401         i915_gem_object_unlock(obj);
16402
16403         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16404                 /*
16405                  * If there's a fence, enforce that
16406                  * the fb modifier and tiling mode match.
16407                  */
16408                 if (tiling != I915_TILING_NONE &&
16409                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16410                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
16411                         goto err;
16412                 }
16413         } else {
16414                 if (tiling == I915_TILING_X) {
16415                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16416                 } else if (tiling == I915_TILING_Y) {
16417                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
16418                         goto err;
16419                 }
16420         }
16421
16422         if (!drm_any_plane_has_format(&dev_priv->drm,
16423                                       mode_cmd->pixel_format,
16424                                       mode_cmd->modifier[0])) {
16425                 struct drm_format_name_buf format_name;
16426
16427                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
16428                               drm_get_format_name(mode_cmd->pixel_format,
16429                                                   &format_name),
16430                               mode_cmd->modifier[0]);
16431                 goto err;
16432         }
16433
16434         /*
16435          * gen2/3 display engine uses the fence if present,
16436          * so the tiling mode must match the fb modifier exactly.
16437          */
16438         if (INTEL_GEN(dev_priv) < 4 &&
16439             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16440                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
16441                 goto err;
16442         }
16443
16444         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16445                                          mode_cmd->modifier[0]);
16446         if (mode_cmd->pitches[0] > max_stride) {
16447                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
16448                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16449                               "tiled" : "linear",
16450                               mode_cmd->pitches[0], max_stride);
16451                 goto err;
16452         }
16453
16454         /*
16455          * If there's a fence, enforce that
16456          * the fb pitch and fence stride match.
16457          */
16458         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16459                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
16460                               mode_cmd->pitches[0], stride);
16461                 goto err;
16462         }
16463
16464         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16465         if (mode_cmd->offsets[0] != 0)
16466                 goto err;
16467
16468         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16469
16470         for (i = 0; i < fb->format->num_planes; i++) {
16471                 u32 stride_alignment;
16472
16473                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16474                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
16475                         goto err;
16476                 }
16477
16478                 stride_alignment = intel_fb_stride_alignment(fb, i);
16479
16480                 /*
16481                  * Display WA #0531: skl,bxt,kbl,glk
16482                  *
16483                  * Render decompression and plane width > 3840
16484                  * combined with horizontal panning requires the
16485                  * plane stride to be a multiple of 4. We'll just
16486                  * require the entire fb to accommodate that to avoid
16487                  * potential runtime errors at plane configuration time.
16488                  */
16489                 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
16490                     is_ccs_modifier(fb->modifier))
16491                         stride_alignment *= 4;
16492
16493                 if (fb->pitches[i] & (stride_alignment - 1)) {
16494                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
16495                                       i, fb->pitches[i], stride_alignment);
16496                         goto err;
16497                 }
16498
16499                 fb->obj[i] = &obj->base;
16500         }
16501
16502         ret = intel_fill_fb_info(dev_priv, fb);
16503         if (ret)
16504                 goto err;
16505
16506         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16507         if (ret) {
16508                 DRM_ERROR("framebuffer init failed %d\n", ret);
16509                 goto err;
16510         }
16511
16512         return 0;
16513
16514 err:
16515         intel_frontbuffer_put(intel_fb->frontbuffer);
16516         return ret;
16517 }
16518
16519 static struct drm_framebuffer *
16520 intel_user_framebuffer_create(struct drm_device *dev,
16521                               struct drm_file *filp,
16522                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
16523 {
16524         struct drm_framebuffer *fb;
16525         struct drm_i915_gem_object *obj;
16526         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16527
16528         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16529         if (!obj)
16530                 return ERR_PTR(-ENOENT);
16531
16532         fb = intel_framebuffer_create(obj, &mode_cmd);
16533         i915_gem_object_put(obj);
16534
16535         return fb;
16536 }
16537
16538 static void intel_atomic_state_free(struct drm_atomic_state *state)
16539 {
16540         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
16541
16542         drm_atomic_state_default_release(state);
16543
16544         i915_sw_fence_fini(&intel_state->commit_ready);
16545
16546         kfree(state);
16547 }
16548
16549 static enum drm_mode_status
16550 intel_mode_valid(struct drm_device *dev,
16551                  const struct drm_display_mode *mode)
16552 {
16553         struct drm_i915_private *dev_priv = to_i915(dev);
16554         int hdisplay_max, htotal_max;
16555         int vdisplay_max, vtotal_max;
16556
16557         /*
16558          * Can't reject DBLSCAN here because Xorg ddxen can add piles
16559          * of DBLSCAN modes to the output's mode list when they detect
16560          * the scaling mode property on the connector. And they don't
16561          * ask the kernel to validate those modes in any way until
16562          * modeset time at which point the client gets a protocol error.
16563          * So in order to not upset those clients we silently ignore the
16564          * DBLSCAN flag on such connectors. For other connectors we will
16565          * reject modes with the DBLSCAN flag in encoder->compute_config().
16566          * And we always reject DBLSCAN modes in connector->mode_valid()
16567          * as we never want such modes on the connector's mode list.
16568          */
16569
16570         if (mode->vscan > 1)
16571                 return MODE_NO_VSCAN;
16572
16573         if (mode->flags & DRM_MODE_FLAG_HSKEW)
16574                 return MODE_H_ILLEGAL;
16575
16576         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16577                            DRM_MODE_FLAG_NCSYNC |
16578                            DRM_MODE_FLAG_PCSYNC))
16579                 return MODE_HSYNC;
16580
16581         if (mode->flags & (DRM_MODE_FLAG_BCAST |
16582                            DRM_MODE_FLAG_PIXMUX |
16583                            DRM_MODE_FLAG_CLKDIV2))
16584                 return MODE_BAD;
16585
16586         /* Transcoder timing limits */
16587         if (INTEL_GEN(dev_priv) >= 11) {
16588                 hdisplay_max = 16384;
16589                 vdisplay_max = 8192;
16590                 htotal_max = 16384;
16591                 vtotal_max = 8192;
16592         } else if (INTEL_GEN(dev_priv) >= 9 ||
16593                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16594                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16595                 vdisplay_max = 4096;
16596                 htotal_max = 8192;
16597                 vtotal_max = 8192;
16598         } else if (INTEL_GEN(dev_priv) >= 3) {
16599                 hdisplay_max = 4096;
16600                 vdisplay_max = 4096;
16601                 htotal_max = 8192;
16602                 vtotal_max = 8192;
16603         } else {
16604                 hdisplay_max = 2048;
16605                 vdisplay_max = 2048;
16606                 htotal_max = 4096;
16607                 vtotal_max = 4096;
16608         }
16609
16610         if (mode->hdisplay > hdisplay_max ||
16611             mode->hsync_start > htotal_max ||
16612             mode->hsync_end > htotal_max ||
16613             mode->htotal > htotal_max)
16614                 return MODE_H_ILLEGAL;
16615
16616         if (mode->vdisplay > vdisplay_max ||
16617             mode->vsync_start > vtotal_max ||
16618             mode->vsync_end > vtotal_max ||
16619             mode->vtotal > vtotal_max)
16620                 return MODE_V_ILLEGAL;
16621
16622         if (INTEL_GEN(dev_priv) >= 5) {
16623                 if (mode->hdisplay < 64 ||
16624                     mode->htotal - mode->hdisplay < 32)
16625                         return MODE_H_ILLEGAL;
16626
16627                 if (mode->vtotal - mode->vdisplay < 5)
16628                         return MODE_V_ILLEGAL;
16629         } else {
16630                 if (mode->htotal - mode->hdisplay < 32)
16631                         return MODE_H_ILLEGAL;
16632
16633                 if (mode->vtotal - mode->vdisplay < 3)
16634                         return MODE_V_ILLEGAL;
16635         }
16636
16637         return MODE_OK;
16638 }
16639
16640 enum drm_mode_status
16641 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16642                                 const struct drm_display_mode *mode)
16643 {
16644         int plane_width_max, plane_height_max;
16645
16646         /*
16647          * intel_mode_valid() should be
16648          * sufficient on older platforms.
16649          */
16650         if (INTEL_GEN(dev_priv) < 9)
16651                 return MODE_OK;
16652
16653         /*
16654          * Most people will probably want a fullscreen
16655          * plane so let's not advertize modes that are
16656          * too big for that.
16657          */
16658         if (INTEL_GEN(dev_priv) >= 11) {
16659                 plane_width_max = 5120;
16660                 plane_height_max = 4320;
16661         } else {
16662                 plane_width_max = 5120;
16663                 plane_height_max = 4096;
16664         }
16665
16666         if (mode->hdisplay > plane_width_max)
16667                 return MODE_H_ILLEGAL;
16668
16669         if (mode->vdisplay > plane_height_max)
16670                 return MODE_V_ILLEGAL;
16671
16672         return MODE_OK;
16673 }
16674
16675 static const struct drm_mode_config_funcs intel_mode_funcs = {
16676         .fb_create = intel_user_framebuffer_create,
16677         .get_format_info = intel_get_format_info,
16678         .output_poll_changed = intel_fbdev_output_poll_changed,
16679         .mode_valid = intel_mode_valid,
16680         .atomic_check = intel_atomic_check,
16681         .atomic_commit = intel_atomic_commit,
16682         .atomic_state_alloc = intel_atomic_state_alloc,
16683         .atomic_state_clear = intel_atomic_state_clear,
16684         .atomic_state_free = intel_atomic_state_free,
16685 };
16686
16687 /**
16688  * intel_init_display_hooks - initialize the display modesetting hooks
16689  * @dev_priv: device private
16690  */
16691 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16692 {
16693         intel_init_cdclk_hooks(dev_priv);
16694
16695         if (INTEL_GEN(dev_priv) >= 9) {
16696                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16697                 dev_priv->display.get_initial_plane_config =
16698                         skylake_get_initial_plane_config;
16699                 dev_priv->display.crtc_compute_clock =
16700                         haswell_crtc_compute_clock;
16701                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16702                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16703         } else if (HAS_DDI(dev_priv)) {
16704                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16705                 dev_priv->display.get_initial_plane_config =
16706                         i9xx_get_initial_plane_config;
16707                 dev_priv->display.crtc_compute_clock =
16708                         haswell_crtc_compute_clock;
16709                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16710                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16711         } else if (HAS_PCH_SPLIT(dev_priv)) {
16712                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
16713                 dev_priv->display.get_initial_plane_config =
16714                         i9xx_get_initial_plane_config;
16715                 dev_priv->display.crtc_compute_clock =
16716                         ironlake_crtc_compute_clock;
16717                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
16718                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
16719         } else if (IS_CHERRYVIEW(dev_priv)) {
16720                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16721                 dev_priv->display.get_initial_plane_config =
16722                         i9xx_get_initial_plane_config;
16723                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16724                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16725                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16726         } else if (IS_VALLEYVIEW(dev_priv)) {
16727                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16728                 dev_priv->display.get_initial_plane_config =
16729                         i9xx_get_initial_plane_config;
16730                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16731                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16732                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16733         } else if (IS_G4X(dev_priv)) {
16734                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16735                 dev_priv->display.get_initial_plane_config =
16736                         i9xx_get_initial_plane_config;
16737                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16738                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16739                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16740         } else if (IS_PINEVIEW(dev_priv)) {
16741                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16742                 dev_priv->display.get_initial_plane_config =
16743                         i9xx_get_initial_plane_config;
16744                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16745                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16746                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16747         } else if (!IS_GEN(dev_priv, 2)) {
16748                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16749                 dev_priv->display.get_initial_plane_config =
16750                         i9xx_get_initial_plane_config;
16751                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16752                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16753                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16754         } else {
16755                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16756                 dev_priv->display.get_initial_plane_config =
16757                         i9xx_get_initial_plane_config;
16758                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16759                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16760                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16761         }
16762
16763         if (IS_GEN(dev_priv, 5)) {
16764                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
16765         } else if (IS_GEN(dev_priv, 6)) {
16766                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16767         } else if (IS_IVYBRIDGE(dev_priv)) {
16768                 /* FIXME: detect B0+ stepping and use auto training */
16769                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16770         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
16771                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
16772         }
16773
16774         if (INTEL_GEN(dev_priv) >= 9)
16775                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16776         else
16777                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16778
16779 }
16780
16781 void intel_modeset_init_hw(struct drm_i915_private *i915)
16782 {
16783         intel_update_cdclk(i915);
16784         intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
16785         i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
16786 }
16787
16788 /*
16789  * Calculate what we think the watermarks should be for the state we've read
16790  * out of the hardware and then immediately program those watermarks so that
16791  * we ensure the hardware settings match our internal state.
16792  *
16793  * We can calculate what we think WM's should be by creating a duplicate of the
16794  * current state (which was constructed during hardware readout) and running it
16795  * through the atomic check code to calculate new watermark values in the
16796  * state object.
16797  */
16798 static void sanitize_watermarks(struct drm_device *dev)
16799 {
16800         struct drm_i915_private *dev_priv = to_i915(dev);
16801         struct drm_atomic_state *state;
16802         struct intel_atomic_state *intel_state;
16803         struct intel_crtc *crtc;
16804         struct intel_crtc_state *crtc_state;
16805         struct drm_modeset_acquire_ctx ctx;
16806         int ret;
16807         int i;
16808
16809         /* Only supported on platforms that use atomic watermark design */
16810         if (!dev_priv->display.optimize_watermarks)
16811                 return;
16812
16813         /*
16814          * We need to hold connection_mutex before calling duplicate_state so
16815          * that the connector loop is protected.
16816          */
16817         drm_modeset_acquire_init(&ctx, 0);
16818 retry:
16819         ret = drm_modeset_lock_all_ctx(dev, &ctx);
16820         if (ret == -EDEADLK) {
16821                 drm_modeset_backoff(&ctx);
16822                 goto retry;
16823         } else if (WARN_ON(ret)) {
16824                 goto fail;
16825         }
16826
16827         state = drm_atomic_helper_duplicate_state(dev, &ctx);
16828         if (WARN_ON(IS_ERR(state)))
16829                 goto fail;
16830
16831         intel_state = to_intel_atomic_state(state);
16832
16833         /*
16834          * Hardware readout is the only time we don't want to calculate
16835          * intermediate watermarks (since we don't trust the current
16836          * watermarks).
16837          */
16838         if (!HAS_GMCH(dev_priv))
16839                 intel_state->skip_intermediate_wm = true;
16840
16841         ret = intel_atomic_check(dev, state);
16842         if (ret) {
16843                 /*
16844                  * If we fail here, it means that the hardware appears to be
16845                  * programmed in a way that shouldn't be possible, given our
16846                  * understanding of watermark requirements.  This might mean a
16847                  * mistake in the hardware readout code or a mistake in the
16848                  * watermark calculations for a given platform.  Raise a WARN
16849                  * so that this is noticeable.
16850                  *
16851                  * If this actually happens, we'll have to just leave the
16852                  * BIOS-programmed watermarks untouched and hope for the best.
16853                  */
16854                 WARN(true, "Could not determine valid watermarks for inherited state\n");
16855                 goto put_state;
16856         }
16857
16858         /* Write calculated watermark values back */
16859         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16860                 crtc_state->wm.need_postvbl_update = true;
16861                 dev_priv->display.optimize_watermarks(intel_state, crtc_state);
16862
16863                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16864         }
16865
16866 put_state:
16867         drm_atomic_state_put(state);
16868 fail:
16869         drm_modeset_drop_locks(&ctx);
16870         drm_modeset_acquire_fini(&ctx);
16871 }
16872
16873 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16874 {
16875         if (IS_GEN(dev_priv, 5)) {
16876                 u32 fdi_pll_clk =
16877                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16878
16879                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16880         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16881                 dev_priv->fdi_pll_freq = 270000;
16882         } else {
16883                 return;
16884         }
16885
16886         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16887 }
16888
16889 static int intel_initial_commit(struct drm_device *dev)
16890 {
16891         struct drm_atomic_state *state = NULL;
16892         struct drm_modeset_acquire_ctx ctx;
16893         struct intel_crtc *crtc;
16894         int ret = 0;
16895
16896         state = drm_atomic_state_alloc(dev);
16897         if (!state)
16898                 return -ENOMEM;
16899
16900         drm_modeset_acquire_init(&ctx, 0);
16901
16902 retry:
16903         state->acquire_ctx = &ctx;
16904
16905         for_each_intel_crtc(dev, crtc) {
16906                 struct intel_crtc_state *crtc_state =
16907                         intel_atomic_get_crtc_state(state, crtc);
16908
16909                 if (IS_ERR(crtc_state)) {
16910                         ret = PTR_ERR(crtc_state);
16911                         goto out;
16912                 }
16913
16914                 if (crtc_state->hw.active) {
16915                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
16916                         if (ret)
16917                                 goto out;
16918
16919                         /*
16920                          * FIXME hack to force a LUT update to avoid the
16921                          * plane update forcing the pipe gamma on without
16922                          * having a proper LUT loaded. Remove once we
16923                          * have readout for pipe gamma enable.
16924                          */
16925                         crtc_state->uapi.color_mgmt_changed = true;
16926                 }
16927         }
16928
16929         ret = drm_atomic_commit(state);
16930
16931 out:
16932         if (ret == -EDEADLK) {
16933                 drm_atomic_state_clear(state);
16934                 drm_modeset_backoff(&ctx);
16935                 goto retry;
16936         }
16937
16938         drm_atomic_state_put(state);
16939
16940         drm_modeset_drop_locks(&ctx);
16941         drm_modeset_acquire_fini(&ctx);
16942
16943         return ret;
16944 }
16945
16946 static void intel_mode_config_init(struct drm_i915_private *i915)
16947 {
16948         struct drm_mode_config *mode_config = &i915->drm.mode_config;
16949
16950         drm_mode_config_init(&i915->drm);
16951
16952         mode_config->min_width = 0;
16953         mode_config->min_height = 0;
16954
16955         mode_config->preferred_depth = 24;
16956         mode_config->prefer_shadow = 1;
16957
16958         mode_config->allow_fb_modifiers = true;
16959
16960         mode_config->funcs = &intel_mode_funcs;
16961
16962         /*
16963          * Maximum framebuffer dimensions, chosen to match
16964          * the maximum render engine surface size on gen4+.
16965          */
16966         if (INTEL_GEN(i915) >= 7) {
16967                 mode_config->max_width = 16384;
16968                 mode_config->max_height = 16384;
16969         } else if (INTEL_GEN(i915) >= 4) {
16970                 mode_config->max_width = 8192;
16971                 mode_config->max_height = 8192;
16972         } else if (IS_GEN(i915, 3)) {
16973                 mode_config->max_width = 4096;
16974                 mode_config->max_height = 4096;
16975         } else {
16976                 mode_config->max_width = 2048;
16977                 mode_config->max_height = 2048;
16978         }
16979
16980         if (IS_I845G(i915) || IS_I865G(i915)) {
16981                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
16982                 mode_config->cursor_height = 1023;
16983         } else if (IS_GEN(i915, 2)) {
16984                 mode_config->cursor_width = 64;
16985                 mode_config->cursor_height = 64;
16986         } else {
16987                 mode_config->cursor_width = 256;
16988                 mode_config->cursor_height = 256;
16989         }
16990 }
16991
16992 int intel_modeset_init(struct drm_i915_private *i915)
16993 {
16994         struct drm_device *dev = &i915->drm;
16995         enum pipe pipe;
16996         struct intel_crtc *crtc;
16997         int ret;
16998
16999         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17000         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17001                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17002
17003         intel_mode_config_init(i915);
17004
17005         ret = intel_bw_init(i915);
17006         if (ret)
17007                 return ret;
17008
17009         init_llist_head(&i915->atomic_helper.free_list);
17010         INIT_WORK(&i915->atomic_helper.free_work,
17011                   intel_atomic_helper_free_state_worker);
17012
17013         intel_init_quirks(i915);
17014
17015         intel_fbc_init(i915);
17016
17017         intel_init_pm(i915);
17018
17019         intel_panel_sanitize_ssc(i915);
17020
17021         intel_gmbus_setup(i915);
17022
17023         DRM_DEBUG_KMS("%d display pipe%s available.\n",
17024                       INTEL_NUM_PIPES(i915),
17025                       INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17026
17027         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
17028                 for_each_pipe(i915, pipe) {
17029                         ret = intel_crtc_init(i915, pipe);
17030                         if (ret) {
17031                                 drm_mode_config_cleanup(dev);
17032                                 return ret;
17033                         }
17034                 }
17035         }
17036
17037         intel_shared_dpll_init(dev);
17038         intel_update_fdi_pll_freq(i915);
17039
17040         intel_update_czclk(i915);
17041         intel_modeset_init_hw(i915);
17042
17043         intel_hdcp_component_init(i915);
17044
17045         if (i915->max_cdclk_freq == 0)
17046                 intel_update_max_cdclk(i915);
17047
17048         /* Just disable it once at startup */
17049         intel_vga_disable(i915);
17050         intel_setup_outputs(i915);
17051
17052         drm_modeset_lock_all(dev);
17053         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17054         drm_modeset_unlock_all(dev);
17055
17056         for_each_intel_crtc(dev, crtc) {
17057                 struct intel_initial_plane_config plane_config = {};
17058
17059                 if (!crtc->active)
17060                         continue;
17061
17062                 /*
17063                  * Note that reserving the BIOS fb up front prevents us
17064                  * from stuffing other stolen allocations like the ring
17065                  * on top.  This prevents some ugliness at boot time, and
17066                  * can even allow for smooth boot transitions if the BIOS
17067                  * fb is large enough for the active pipe configuration.
17068                  */
17069                 i915->display.get_initial_plane_config(crtc, &plane_config);
17070
17071                 /*
17072                  * If the fb is shared between multiple heads, we'll
17073                  * just get the first one.
17074                  */
17075                 intel_find_initial_plane_obj(crtc, &plane_config);
17076         }
17077
17078         /*
17079          * Make sure hardware watermarks really match the state we read out.
17080          * Note that we need to do this after reconstructing the BIOS fb's
17081          * since the watermark calculation done here will use pstate->fb.
17082          */
17083         if (!HAS_GMCH(i915))
17084                 sanitize_watermarks(dev);
17085
17086         /*
17087          * Force all active planes to recompute their states. So that on
17088          * mode_setcrtc after probe, all the intel_plane_state variables
17089          * are already calculated and there is no assert_plane warnings
17090          * during bootup.
17091          */
17092         ret = intel_initial_commit(dev);
17093         if (ret)
17094                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
17095
17096         return 0;
17097 }
17098
17099 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17100 {
17101         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17102         /* 640x480@60Hz, ~25175 kHz */
17103         struct dpll clock = {
17104                 .m1 = 18,
17105                 .m2 = 7,
17106                 .p1 = 13,
17107                 .p2 = 4,
17108                 .n = 2,
17109         };
17110         u32 dpll, fp;
17111         int i;
17112
17113         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
17114
17115         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17116                       pipe_name(pipe), clock.vco, clock.dot);
17117
17118         fp = i9xx_dpll_compute_fp(&clock);
17119         dpll = DPLL_DVO_2X_MODE |
17120                 DPLL_VGA_MODE_DIS |
17121                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17122                 PLL_P2_DIVIDE_BY_4 |
17123                 PLL_REF_INPUT_DREFCLK |
17124                 DPLL_VCO_ENABLE;
17125
17126         I915_WRITE(FP0(pipe), fp);
17127         I915_WRITE(FP1(pipe), fp);
17128
17129         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17130         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17131         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17132         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17133         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17134         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17135         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17136
17137         /*
17138          * Apparently we need to have VGA mode enabled prior to changing
17139          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17140          * dividers, even though the register value does change.
17141          */
17142         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17143         I915_WRITE(DPLL(pipe), dpll);
17144
17145         /* Wait for the clocks to stabilize. */
17146         POSTING_READ(DPLL(pipe));
17147         udelay(150);
17148
17149         /* The pixel multiplier can only be updated once the
17150          * DPLL is enabled and the clocks are stable.
17151          *
17152          * So write it again.
17153          */
17154         I915_WRITE(DPLL(pipe), dpll);
17155
17156         /* We do this three times for luck */
17157         for (i = 0; i < 3 ; i++) {
17158                 I915_WRITE(DPLL(pipe), dpll);
17159                 POSTING_READ(DPLL(pipe));
17160                 udelay(150); /* wait for warmup */
17161         }
17162
17163         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17164         POSTING_READ(PIPECONF(pipe));
17165
17166         intel_wait_for_pipe_scanline_moving(crtc);
17167 }
17168
17169 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17170 {
17171         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17172
17173         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
17174                       pipe_name(pipe));
17175
17176         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
17177         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
17178         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
17179         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
17180         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
17181
17182         I915_WRITE(PIPECONF(pipe), 0);
17183         POSTING_READ(PIPECONF(pipe));
17184
17185         intel_wait_for_pipe_scanline_stopped(crtc);
17186
17187         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
17188         POSTING_READ(DPLL(pipe));
17189 }
17190
17191 static void
17192 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17193 {
17194         struct intel_crtc *crtc;
17195
17196         if (INTEL_GEN(dev_priv) >= 4)
17197                 return;
17198
17199         for_each_intel_crtc(&dev_priv->drm, crtc) {
17200                 struct intel_plane *plane =
17201                         to_intel_plane(crtc->base.primary);
17202                 struct intel_crtc *plane_crtc;
17203                 enum pipe pipe;
17204
17205                 if (!plane->get_hw_state(plane, &pipe))
17206                         continue;
17207
17208                 if (pipe == crtc->pipe)
17209                         continue;
17210
17211                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17212                               plane->base.base.id, plane->base.name);
17213
17214                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17215                 intel_plane_disable_noatomic(plane_crtc, plane);
17216         }
17217 }
17218
17219 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17220 {
17221         struct drm_device *dev = crtc->base.dev;
17222         struct intel_encoder *encoder;
17223
17224         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17225                 return true;
17226
17227         return false;
17228 }
17229
17230 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17231 {
17232         struct drm_device *dev = encoder->base.dev;
17233         struct intel_connector *connector;
17234
17235         for_each_connector_on_encoder(dev, &encoder->base, connector)
17236                 return connector;
17237
17238         return NULL;
17239 }
17240
17241 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17242                               enum pipe pch_transcoder)
17243 {
17244         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17245                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17246 }
17247
17248 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
17249 {
17250         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
17251         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
17252         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17253
17254         if (INTEL_GEN(dev_priv) >= 9 ||
17255             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17256                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
17257                 u32 val;
17258
17259                 if (transcoder_is_dsi(cpu_transcoder))
17260                         return;
17261
17262                 val = I915_READ(reg);
17263                 val &= ~HSW_FRAME_START_DELAY_MASK;
17264                 val |= HSW_FRAME_START_DELAY(0);
17265                 I915_WRITE(reg, val);
17266         } else {
17267                 i915_reg_t reg = PIPECONF(cpu_transcoder);
17268                 u32 val;
17269
17270                 val = I915_READ(reg);
17271                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
17272                 val |= PIPECONF_FRAME_START_DELAY(0);
17273                 I915_WRITE(reg, val);
17274         }
17275
17276         if (!crtc_state->has_pch_encoder)
17277                 return;
17278
17279         if (HAS_PCH_IBX(dev_priv)) {
17280                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
17281                 u32 val;
17282
17283                 val = I915_READ(reg);
17284                 val &= ~TRANS_FRAME_START_DELAY_MASK;
17285                 val |= TRANS_FRAME_START_DELAY(0);
17286                 I915_WRITE(reg, val);
17287         } else {
17288                 i915_reg_t reg = TRANS_CHICKEN2(crtc->pipe);
17289                 u32 val;
17290
17291                 val = I915_READ(reg);
17292                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
17293                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
17294                 I915_WRITE(reg, val);
17295         }
17296 }
17297
17298 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17299                                 struct drm_modeset_acquire_ctx *ctx)
17300 {
17301         struct drm_device *dev = crtc->base.dev;
17302         struct drm_i915_private *dev_priv = to_i915(dev);
17303         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17304
17305         if (crtc_state->hw.active) {
17306                 struct intel_plane *plane;
17307
17308                 /* Clear any frame start delays used for debugging left by the BIOS */
17309                 intel_sanitize_frame_start_delay(crtc_state);
17310
17311                 /* Disable everything but the primary plane */
17312                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
17313                         const struct intel_plane_state *plane_state =
17314                                 to_intel_plane_state(plane->base.state);
17315
17316                         if (plane_state->uapi.visible &&
17317                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17318                                 intel_plane_disable_noatomic(crtc, plane);
17319                 }
17320
17321                 /*
17322                  * Disable any background color set by the BIOS, but enable the
17323                  * gamma and CSC to match how we program our planes.
17324                  */
17325                 if (INTEL_GEN(dev_priv) >= 9)
17326                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
17327                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
17328                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
17329         }
17330
17331         /* Adjust the state of the output pipe according to whether we
17332          * have active connectors/encoders. */
17333         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
17334                 intel_crtc_disable_noatomic(&crtc->base, ctx);
17335
17336         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
17337                 /*
17338                  * We start out with underrun reporting disabled to avoid races.
17339                  * For correct bookkeeping mark this on active crtcs.
17340                  *
17341                  * Also on gmch platforms we dont have any hardware bits to
17342                  * disable the underrun reporting. Which means we need to start
17343                  * out with underrun reporting disabled also on inactive pipes,
17344                  * since otherwise we'll complain about the garbage we read when
17345                  * e.g. coming up after runtime pm.
17346                  *
17347                  * No protection against concurrent access is required - at
17348                  * worst a fifo underrun happens which also sets this to false.
17349                  */
17350                 crtc->cpu_fifo_underrun_disabled = true;
17351                 /*
17352                  * We track the PCH trancoder underrun reporting state
17353                  * within the crtc. With crtc for pipe A housing the underrun
17354                  * reporting state for PCH transcoder A, crtc for pipe B housing
17355                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17356                  * and marking underrun reporting as disabled for the non-existing
17357                  * PCH transcoders B and C would prevent enabling the south
17358                  * error interrupt (see cpt_can_enable_serr_int()).
17359                  */
17360                 if (has_pch_trancoder(dev_priv, crtc->pipe))
17361                         crtc->pch_fifo_underrun_disabled = true;
17362         }
17363 }
17364
17365 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17366 {
17367         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
17368
17369         /*
17370          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17371          * the hardware when a high res displays plugged in. DPLL P
17372          * divider is zero, and the pipe timings are bonkers. We'll
17373          * try to disable everything in that case.
17374          *
17375          * FIXME would be nice to be able to sanitize this state
17376          * without several WARNs, but for now let's take the easy
17377          * road.
17378          */
17379         return IS_GEN(dev_priv, 6) &&
17380                 crtc_state->hw.active &&
17381                 crtc_state->shared_dpll &&
17382                 crtc_state->port_clock == 0;
17383 }
17384
17385 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17386 {
17387         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17388         struct intel_connector *connector;
17389         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17390         struct intel_crtc_state *crtc_state = crtc ?
17391                 to_intel_crtc_state(crtc->base.state) : NULL;
17392
17393         /* We need to check both for a crtc link (meaning that the
17394          * encoder is active and trying to read from a pipe) and the
17395          * pipe itself being active. */
17396         bool has_active_crtc = crtc_state &&
17397                 crtc_state->hw.active;
17398
17399         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17400                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17401                               pipe_name(crtc->pipe));
17402                 has_active_crtc = false;
17403         }
17404
17405         connector = intel_encoder_find_connector(encoder);
17406         if (connector && !has_active_crtc) {
17407                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17408                               encoder->base.base.id,
17409                               encoder->base.name);
17410
17411                 /* Connector is active, but has no active pipe. This is
17412                  * fallout from our resume register restoring. Disable
17413                  * the encoder manually again. */
17414                 if (crtc_state) {
17415                         struct drm_encoder *best_encoder;
17416
17417                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
17418                                       encoder->base.base.id,
17419                                       encoder->base.name);
17420
17421                         /* avoid oopsing in case the hooks consult best_encoder */
17422                         best_encoder = connector->base.state->best_encoder;
17423                         connector->base.state->best_encoder = &encoder->base;
17424
17425                         if (encoder->disable)
17426                                 encoder->disable(encoder, crtc_state,
17427                                                  connector->base.state);
17428                         if (encoder->post_disable)
17429                                 encoder->post_disable(encoder, crtc_state,
17430                                                       connector->base.state);
17431
17432                         connector->base.state->best_encoder = best_encoder;
17433                 }
17434                 encoder->base.crtc = NULL;
17435
17436                 /* Inconsistent output/port/pipe state happens presumably due to
17437                  * a bug in one of the get_hw_state functions. Or someplace else
17438                  * in our code, like the register restore mess on resume. Clamp
17439                  * things to off as a safer default. */
17440
17441                 connector->base.dpms = DRM_MODE_DPMS_OFF;
17442                 connector->base.encoder = NULL;
17443         }
17444
17445         /* notify opregion of the sanitized encoder state */
17446         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17447
17448         if (INTEL_GEN(dev_priv) >= 11)
17449                 icl_sanitize_encoder_pll_mapping(encoder);
17450 }
17451
17452 /* FIXME read out full plane state for all planes */
17453 static void readout_plane_state(struct drm_i915_private *dev_priv)
17454 {
17455         struct intel_plane *plane;
17456         struct intel_crtc *crtc;
17457
17458         for_each_intel_plane(&dev_priv->drm, plane) {
17459                 struct intel_plane_state *plane_state =
17460                         to_intel_plane_state(plane->base.state);
17461                 struct intel_crtc_state *crtc_state;
17462                 enum pipe pipe = PIPE_A;
17463                 bool visible;
17464
17465                 visible = plane->get_hw_state(plane, &pipe);
17466
17467                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17468                 crtc_state = to_intel_crtc_state(crtc->base.state);
17469
17470                 intel_set_plane_visible(crtc_state, plane_state, visible);
17471
17472                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17473                               plane->base.base.id, plane->base.name,
17474                               enableddisabled(visible), pipe_name(pipe));
17475         }
17476
17477         for_each_intel_crtc(&dev_priv->drm, crtc) {
17478                 struct intel_crtc_state *crtc_state =
17479                         to_intel_crtc_state(crtc->base.state);
17480
17481                 fixup_active_planes(crtc_state);
17482         }
17483 }
17484
17485 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17486 {
17487         struct drm_i915_private *dev_priv = to_i915(dev);
17488         enum pipe pipe;
17489         struct intel_crtc *crtc;
17490         struct intel_encoder *encoder;
17491         struct intel_connector *connector;
17492         struct drm_connector_list_iter conn_iter;
17493         int i;
17494
17495         dev_priv->active_pipes = 0;
17496
17497         for_each_intel_crtc(dev, crtc) {
17498                 struct intel_crtc_state *crtc_state =
17499                         to_intel_crtc_state(crtc->base.state);
17500
17501                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
17502                 intel_crtc_free_hw_state(crtc_state);
17503                 memset(crtc_state, 0, sizeof(*crtc_state));
17504                 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->uapi);
17505
17506                 crtc_state->hw.active = crtc_state->hw.enable =
17507                         dev_priv->display.get_pipe_config(crtc, crtc_state);
17508
17509                 crtc->base.enabled = crtc_state->hw.enable;
17510                 crtc->active = crtc_state->hw.active;
17511
17512                 if (crtc_state->hw.active)
17513                         dev_priv->active_pipes |= BIT(crtc->pipe);
17514
17515                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
17516                               crtc->base.base.id, crtc->base.name,
17517                               enableddisabled(crtc_state->hw.active));
17518         }
17519
17520         readout_plane_state(dev_priv);
17521
17522         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17523                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17524
17525                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
17526                                                         &pll->state.hw_state);
17527
17528                 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
17529                     pll->info->id == DPLL_ID_EHL_DPLL4) {
17530                         pll->wakeref = intel_display_power_get(dev_priv,
17531                                                                POWER_DOMAIN_DPLL_DC_OFF);
17532                 }
17533
17534                 pll->state.crtc_mask = 0;
17535                 for_each_intel_crtc(dev, crtc) {
17536                         struct intel_crtc_state *crtc_state =
17537                                 to_intel_crtc_state(crtc->base.state);
17538
17539                         if (crtc_state->hw.active &&
17540                             crtc_state->shared_dpll == pll)
17541                                 pll->state.crtc_mask |= 1 << crtc->pipe;
17542                 }
17543                 pll->active_mask = pll->state.crtc_mask;
17544
17545                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
17546                               pll->info->name, pll->state.crtc_mask, pll->on);
17547         }
17548
17549         for_each_intel_encoder(dev, encoder) {
17550                 pipe = 0;
17551
17552                 if (encoder->get_hw_state(encoder, &pipe)) {
17553                         struct intel_crtc_state *crtc_state;
17554
17555                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17556                         crtc_state = to_intel_crtc_state(crtc->base.state);
17557
17558                         encoder->base.crtc = &crtc->base;
17559                         encoder->get_config(encoder, crtc_state);
17560                 } else {
17561                         encoder->base.crtc = NULL;
17562                 }
17563
17564                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17565                               encoder->base.base.id, encoder->base.name,
17566                               enableddisabled(encoder->base.crtc),
17567                               pipe_name(pipe));
17568         }
17569
17570         drm_connector_list_iter_begin(dev, &conn_iter);
17571         for_each_intel_connector_iter(connector, &conn_iter) {
17572                 if (connector->get_hw_state(connector)) {
17573                         struct intel_crtc_state *crtc_state;
17574                         struct intel_crtc *crtc;
17575
17576                         connector->base.dpms = DRM_MODE_DPMS_ON;
17577
17578                         encoder = connector->encoder;
17579                         connector->base.encoder = &encoder->base;
17580
17581                         crtc = to_intel_crtc(encoder->base.crtc);
17582                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17583
17584                         if (crtc_state && crtc_state->hw.active) {
17585                                 /*
17586                                  * This has to be done during hardware readout
17587                                  * because anything calling .crtc_disable may
17588                                  * rely on the connector_mask being accurate.
17589                                  */
17590                                 crtc_state->uapi.connector_mask |=
17591                                         drm_connector_mask(&connector->base);
17592                                 crtc_state->uapi.encoder_mask |=
17593                                         drm_encoder_mask(&encoder->base);
17594                         }
17595                 } else {
17596                         connector->base.dpms = DRM_MODE_DPMS_OFF;
17597                         connector->base.encoder = NULL;
17598                 }
17599                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
17600                               connector->base.base.id, connector->base.name,
17601                               enableddisabled(connector->base.encoder));
17602         }
17603         drm_connector_list_iter_end(&conn_iter);
17604
17605         for_each_intel_crtc(dev, crtc) {
17606                 struct intel_bw_state *bw_state =
17607                         to_intel_bw_state(dev_priv->bw_obj.state);
17608                 struct intel_crtc_state *crtc_state =
17609                         to_intel_crtc_state(crtc->base.state);
17610                 struct intel_plane *plane;
17611                 int min_cdclk = 0;
17612
17613                 if (crtc_state->hw.active) {
17614                         struct drm_display_mode *mode = &crtc_state->hw.mode;
17615
17616                         intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
17617                                                     crtc_state);
17618
17619                         *mode = crtc_state->hw.adjusted_mode;
17620                         mode->hdisplay = crtc_state->pipe_src_w;
17621                         mode->vdisplay = crtc_state->pipe_src_h;
17622
17623                         /*
17624                          * The initial mode needs to be set in order to keep
17625                          * the atomic core happy. It wants a valid mode if the
17626                          * crtc's enabled, so we do the above call.
17627                          *
17628                          * But we don't set all the derived state fully, hence
17629                          * set a flag to indicate that a full recalculation is
17630                          * needed on the next commit.
17631                          */
17632                         mode->private_flags = I915_MODE_FLAG_INHERITED;
17633
17634                         intel_crtc_compute_pixel_rate(crtc_state);
17635
17636                         intel_crtc_update_active_timings(crtc_state);
17637
17638                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
17639                 }
17640
17641                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17642                         const struct intel_plane_state *plane_state =
17643                                 to_intel_plane_state(plane->base.state);
17644
17645                         /*
17646                          * FIXME don't have the fb yet, so can't
17647                          * use intel_plane_data_rate() :(
17648                          */
17649                         if (plane_state->uapi.visible)
17650                                 crtc_state->data_rate[plane->id] =
17651                                         4 * crtc_state->pixel_rate;
17652                         /*
17653                          * FIXME don't have the fb yet, so can't
17654                          * use plane->min_cdclk() :(
17655                          */
17656                         if (plane_state->uapi.visible && plane->min_cdclk) {
17657                                 if (crtc_state->double_wide ||
17658                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17659                                         crtc_state->min_cdclk[plane->id] =
17660                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17661                                 else
17662                                         crtc_state->min_cdclk[plane->id] =
17663                                                 crtc_state->pixel_rate;
17664                         }
17665                         DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
17666                                       plane->base.base.id, plane->base.name,
17667                                       crtc_state->min_cdclk[plane->id]);
17668                 }
17669
17670                 if (crtc_state->hw.active) {
17671                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17672                         if (WARN_ON(min_cdclk < 0))
17673                                 min_cdclk = 0;
17674                 }
17675
17676                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
17677                 dev_priv->min_voltage_level[crtc->pipe] =
17678                         crtc_state->min_voltage_level;
17679
17680                 intel_bw_crtc_update(bw_state, crtc_state);
17681
17682                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
17683         }
17684 }
17685
17686 static void
17687 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17688 {
17689         struct intel_encoder *encoder;
17690
17691         for_each_intel_encoder(&dev_priv->drm, encoder) {
17692                 struct intel_crtc_state *crtc_state;
17693
17694                 if (!encoder->get_power_domains)
17695                         continue;
17696
17697                 /*
17698                  * MST-primary and inactive encoders don't have a crtc state
17699                  * and neither of these require any power domain references.
17700                  */
17701                 if (!encoder->base.crtc)
17702                         continue;
17703
17704                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17705                 encoder->get_power_domains(encoder, crtc_state);
17706         }
17707 }
17708
17709 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17710 {
17711         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
17712         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
17713                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
17714                            DARBF_GATING_DIS);
17715
17716         if (IS_HASWELL(dev_priv)) {
17717                 /*
17718                  * WaRsPkgCStateDisplayPMReq:hsw
17719                  * System hang if this isn't done before disabling all planes!
17720                  */
17721                 I915_WRITE(CHICKEN_PAR1_1,
17722                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17723         }
17724 }
17725
17726 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17727                                        enum port port, i915_reg_t hdmi_reg)
17728 {
17729         u32 val = I915_READ(hdmi_reg);
17730
17731         if (val & SDVO_ENABLE ||
17732             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17733                 return;
17734
17735         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
17736                       port_name(port));
17737
17738         val &= ~SDVO_PIPE_SEL_MASK;
17739         val |= SDVO_PIPE_SEL(PIPE_A);
17740
17741         I915_WRITE(hdmi_reg, val);
17742 }
17743
17744 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
17745                                      enum port port, i915_reg_t dp_reg)
17746 {
17747         u32 val = I915_READ(dp_reg);
17748
17749         if (val & DP_PORT_EN ||
17750             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
17751                 return;
17752
17753         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
17754                       port_name(port));
17755
17756         val &= ~DP_PIPE_SEL_MASK;
17757         val |= DP_PIPE_SEL(PIPE_A);
17758
17759         I915_WRITE(dp_reg, val);
17760 }
17761
17762 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
17763 {
17764         /*
17765          * The BIOS may select transcoder B on some of the PCH
17766          * ports even it doesn't enable the port. This would trip
17767          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
17768          * Sanitize the transcoder select bits to prevent that. We
17769          * assume that the BIOS never actually enabled the port,
17770          * because if it did we'd actually have to toggle the port
17771          * on and back off to make the transcoder A select stick
17772          * (see. intel_dp_link_down(), intel_disable_hdmi(),
17773          * intel_disable_sdvo()).
17774          */
17775         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
17776         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
17777         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
17778
17779         /* PCH SDVOB multiplex with HDMIB */
17780         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
17781         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
17782         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
17783 }
17784
17785 /* Scan out the current hw modeset state,
17786  * and sanitizes it to the current state
17787  */
17788 static void
17789 intel_modeset_setup_hw_state(struct drm_device *dev,
17790                              struct drm_modeset_acquire_ctx *ctx)
17791 {
17792         struct drm_i915_private *dev_priv = to_i915(dev);
17793         struct intel_crtc_state *crtc_state;
17794         struct intel_encoder *encoder;
17795         struct intel_crtc *crtc;
17796         intel_wakeref_t wakeref;
17797         int i;
17798
17799         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
17800
17801         intel_early_display_was(dev_priv);
17802         intel_modeset_readout_hw_state(dev);
17803
17804         /* HW state is read out, now we need to sanitize this mess. */
17805
17806         /* Sanitize the TypeC port mode upfront, encoders depend on this */
17807         for_each_intel_encoder(dev, encoder) {
17808                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
17809
17810                 /* We need to sanitize only the MST primary port. */
17811                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
17812                     intel_phy_is_tc(dev_priv, phy))
17813                         intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
17814         }
17815
17816         get_encoder_power_domains(dev_priv);
17817
17818         if (HAS_PCH_IBX(dev_priv))
17819                 ibx_sanitize_pch_ports(dev_priv);
17820
17821         /*
17822          * intel_sanitize_plane_mapping() may need to do vblank
17823          * waits, so we need vblank interrupts restored beforehand.
17824          */
17825         for_each_intel_crtc(&dev_priv->drm, crtc) {
17826                 crtc_state = to_intel_crtc_state(crtc->base.state);
17827
17828                 drm_crtc_vblank_reset(&crtc->base);
17829
17830                 if (crtc_state->hw.active)
17831                         intel_crtc_vblank_on(crtc_state);
17832         }
17833
17834         intel_sanitize_plane_mapping(dev_priv);
17835
17836         for_each_intel_encoder(dev, encoder)
17837                 intel_sanitize_encoder(encoder);
17838
17839         for_each_intel_crtc(&dev_priv->drm, crtc) {
17840                 crtc_state = to_intel_crtc_state(crtc->base.state);
17841                 intel_sanitize_crtc(crtc, ctx);
17842                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
17843         }
17844
17845         intel_modeset_update_connector_atomic_state(dev);
17846
17847         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17848                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17849
17850                 if (!pll->on || pll->active_mask)
17851                         continue;
17852
17853                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17854                               pll->info->name);
17855
17856                 pll->info->funcs->disable(dev_priv, pll);
17857                 pll->on = false;
17858         }
17859
17860         if (IS_G4X(dev_priv)) {
17861                 g4x_wm_get_hw_state(dev_priv);
17862                 g4x_wm_sanitize(dev_priv);
17863         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17864                 vlv_wm_get_hw_state(dev_priv);
17865                 vlv_wm_sanitize(dev_priv);
17866         } else if (INTEL_GEN(dev_priv) >= 9) {
17867                 skl_wm_get_hw_state(dev_priv);
17868         } else if (HAS_PCH_SPLIT(dev_priv)) {
17869                 ilk_wm_get_hw_state(dev_priv);
17870         }
17871
17872         for_each_intel_crtc(dev, crtc) {
17873                 u64 put_domains;
17874
17875                 crtc_state = to_intel_crtc_state(crtc->base.state);
17876                 put_domains = modeset_get_crtc_power_domains(crtc_state);
17877                 if (WARN_ON(put_domains))
17878                         modeset_put_power_domains(dev_priv, put_domains);
17879         }
17880
17881         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17882
17883         intel_fbc_init_pipe_state(dev_priv);
17884 }
17885
17886 void intel_display_resume(struct drm_device *dev)
17887 {
17888         struct drm_i915_private *dev_priv = to_i915(dev);
17889         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17890         struct drm_modeset_acquire_ctx ctx;
17891         int ret;
17892
17893         dev_priv->modeset_restore_state = NULL;
17894         if (state)
17895                 state->acquire_ctx = &ctx;
17896
17897         drm_modeset_acquire_init(&ctx, 0);
17898
17899         while (1) {
17900                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17901                 if (ret != -EDEADLK)
17902                         break;
17903
17904                 drm_modeset_backoff(&ctx);
17905         }
17906
17907         if (!ret)
17908                 ret = __intel_display_resume(dev, state, &ctx);
17909
17910         intel_enable_ipc(dev_priv);
17911         drm_modeset_drop_locks(&ctx);
17912         drm_modeset_acquire_fini(&ctx);
17913
17914         if (ret)
17915                 DRM_ERROR("Restoring old state failed with %i\n", ret);
17916         if (state)
17917                 drm_atomic_state_put(state);
17918 }
17919
17920 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
17921 {
17922         struct intel_connector *connector;
17923         struct drm_connector_list_iter conn_iter;
17924
17925         /* Kill all the work that may have been queued by hpd. */
17926         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
17927         for_each_intel_connector_iter(connector, &conn_iter) {
17928                 if (connector->modeset_retry_work.func)
17929                         cancel_work_sync(&connector->modeset_retry_work);
17930                 if (connector->hdcp.shim) {
17931                         cancel_delayed_work_sync(&connector->hdcp.check_work);
17932                         cancel_work_sync(&connector->hdcp.prop_work);
17933                 }
17934         }
17935         drm_connector_list_iter_end(&conn_iter);
17936 }
17937
17938 void intel_modeset_driver_remove(struct drm_i915_private *i915)
17939 {
17940         flush_workqueue(i915->flip_wq);
17941         flush_workqueue(i915->modeset_wq);
17942
17943         flush_work(&i915->atomic_helper.free_work);
17944         WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
17945
17946         /*
17947          * Interrupts and polling as the first thing to avoid creating havoc.
17948          * Too much stuff here (turning of connectors, ...) would
17949          * experience fancy races otherwise.
17950          */
17951         intel_irq_uninstall(i915);
17952
17953         /*
17954          * Due to the hpd irq storm handling the hotplug work can re-arm the
17955          * poll handlers. Hence disable polling after hpd handling is shut down.
17956          */
17957         intel_hpd_poll_fini(i915);
17958
17959         /* poll work can call into fbdev, hence clean that up afterwards */
17960         intel_fbdev_fini(i915);
17961
17962         intel_unregister_dsm_handler();
17963
17964         intel_fbc_global_disable(i915);
17965
17966         /* flush any delayed tasks or pending work */
17967         flush_scheduled_work();
17968
17969         intel_hdcp_component_fini(i915);
17970
17971         drm_mode_config_cleanup(&i915->drm);
17972
17973         intel_overlay_cleanup(i915);
17974
17975         intel_gmbus_teardown(i915);
17976
17977         destroy_workqueue(i915->flip_wq);
17978         destroy_workqueue(i915->modeset_wq);
17979
17980         intel_fbc_cleanup_cfb(i915);
17981 }
17982
17983 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
17984
17985 struct intel_display_error_state {
17986
17987         u32 power_well_driver;
17988
17989         struct intel_cursor_error_state {
17990                 u32 control;
17991                 u32 position;
17992                 u32 base;
17993                 u32 size;
17994         } cursor[I915_MAX_PIPES];
17995
17996         struct intel_pipe_error_state {
17997                 bool power_domain_on;
17998                 u32 source;
17999                 u32 stat;
18000         } pipe[I915_MAX_PIPES];
18001
18002         struct intel_plane_error_state {
18003                 u32 control;
18004                 u32 stride;
18005                 u32 size;
18006                 u32 pos;
18007                 u32 addr;
18008                 u32 surface;
18009                 u32 tile_offset;
18010         } plane[I915_MAX_PIPES];
18011
18012         struct intel_transcoder_error_state {
18013                 bool available;
18014                 bool power_domain_on;
18015                 enum transcoder cpu_transcoder;
18016
18017                 u32 conf;
18018
18019                 u32 htotal;
18020                 u32 hblank;
18021                 u32 hsync;
18022                 u32 vtotal;
18023                 u32 vblank;
18024                 u32 vsync;
18025         } transcoder[5];
18026 };
18027
18028 struct intel_display_error_state *
18029 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18030 {
18031         struct intel_display_error_state *error;
18032         int transcoders[] = {
18033                 TRANSCODER_A,
18034                 TRANSCODER_B,
18035                 TRANSCODER_C,
18036                 TRANSCODER_D,
18037                 TRANSCODER_EDP,
18038         };
18039         int i;
18040
18041         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18042
18043         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
18044                 return NULL;
18045
18046         error = kzalloc(sizeof(*error), GFP_ATOMIC);
18047         if (error == NULL)
18048                 return NULL;
18049
18050         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18051                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
18052
18053         for_each_pipe(dev_priv, i) {
18054                 error->pipe[i].power_domain_on =
18055                         __intel_display_power_is_enabled(dev_priv,
18056                                                          POWER_DOMAIN_PIPE(i));
18057                 if (!error->pipe[i].power_domain_on)
18058                         continue;
18059
18060                 error->cursor[i].control = I915_READ(CURCNTR(i));
18061                 error->cursor[i].position = I915_READ(CURPOS(i));
18062                 error->cursor[i].base = I915_READ(CURBASE(i));
18063
18064                 error->plane[i].control = I915_READ(DSPCNTR(i));
18065                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
18066                 if (INTEL_GEN(dev_priv) <= 3) {
18067                         error->plane[i].size = I915_READ(DSPSIZE(i));
18068                         error->plane[i].pos = I915_READ(DSPPOS(i));
18069                 }
18070                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18071                         error->plane[i].addr = I915_READ(DSPADDR(i));
18072                 if (INTEL_GEN(dev_priv) >= 4) {
18073                         error->plane[i].surface = I915_READ(DSPSURF(i));
18074                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
18075                 }
18076
18077                 error->pipe[i].source = I915_READ(PIPESRC(i));
18078
18079                 if (HAS_GMCH(dev_priv))
18080                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
18081         }
18082
18083         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18084                 enum transcoder cpu_transcoder = transcoders[i];
18085
18086                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
18087                         continue;
18088
18089                 error->transcoder[i].available = true;
18090                 error->transcoder[i].power_domain_on =
18091                         __intel_display_power_is_enabled(dev_priv,
18092                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18093                 if (!error->transcoder[i].power_domain_on)
18094                         continue;
18095
18096                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18097
18098                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
18099                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
18100                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
18101                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
18102                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
18103                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
18104                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
18105         }
18106
18107         return error;
18108 }
18109
18110 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18111
18112 void
18113 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18114                                 struct intel_display_error_state *error)
18115 {
18116         struct drm_i915_private *dev_priv = m->i915;
18117         int i;
18118
18119         if (!error)
18120                 return;
18121
18122         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18123         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18124                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18125                            error->power_well_driver);
18126         for_each_pipe(dev_priv, i) {
18127                 err_printf(m, "Pipe [%d]:\n", i);
18128                 err_printf(m, "  Power: %s\n",
18129                            onoff(error->pipe[i].power_domain_on));
18130                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
18131                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
18132
18133                 err_printf(m, "Plane [%d]:\n", i);
18134                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
18135                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
18136                 if (INTEL_GEN(dev_priv) <= 3) {
18137                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
18138                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
18139                 }
18140                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18141                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
18142                 if (INTEL_GEN(dev_priv) >= 4) {
18143                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
18144                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
18145                 }
18146
18147                 err_printf(m, "Cursor [%d]:\n", i);
18148                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
18149                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
18150                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
18151         }
18152
18153         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18154                 if (!error->transcoder[i].available)
18155                         continue;
18156
18157                 err_printf(m, "CPU transcoder: %s\n",
18158                            transcoder_name(error->transcoder[i].cpu_transcoder));
18159                 err_printf(m, "  Power: %s\n",
18160                            onoff(error->transcoder[i].power_domain_on));
18161                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
18162                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
18163                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
18164                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
18165                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
18166                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
18167                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
18168         }
18169 }
18170
18171 #endif