]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/display/intel_display.c
drm/i915: Add intel_crtc_vblank_off()
[linux.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57
58 #include "gt/intel_rps.h"
59
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_dp_link_training.h"
70 #include "intel_fbc.h"
71 #include "intel_fbdev.h"
72 #include "intel_fifo_underrun.h"
73 #include "intel_frontbuffer.h"
74 #include "intel_hdcp.h"
75 #include "intel_hotplug.h"
76 #include "intel_overlay.h"
77 #include "intel_pipe_crc.h"
78 #include "intel_pm.h"
79 #include "intel_psr.h"
80 #include "intel_quirks.h"
81 #include "intel_sideband.h"
82 #include "intel_sprite.h"
83 #include "intel_tc.h"
84 #include "intel_vga.h"
85
86 /* Primary plane formats for gen <= 3 */
87 static const u32 i8xx_primary_formats[] = {
88         DRM_FORMAT_C8,
89         DRM_FORMAT_XRGB1555,
90         DRM_FORMAT_RGB565,
91         DRM_FORMAT_XRGB8888,
92 };
93
94 /* Primary plane formats for ivb (no fp16 due to hw issue) */
95 static const u32 ivb_primary_formats[] = {
96         DRM_FORMAT_C8,
97         DRM_FORMAT_RGB565,
98         DRM_FORMAT_XRGB8888,
99         DRM_FORMAT_XBGR8888,
100         DRM_FORMAT_XRGB2101010,
101         DRM_FORMAT_XBGR2101010,
102 };
103
104 /* Primary plane formats for gen >= 4, except ivb */
105 static const u32 i965_primary_formats[] = {
106         DRM_FORMAT_C8,
107         DRM_FORMAT_RGB565,
108         DRM_FORMAT_XRGB8888,
109         DRM_FORMAT_XBGR8888,
110         DRM_FORMAT_XRGB2101010,
111         DRM_FORMAT_XBGR2101010,
112         DRM_FORMAT_XBGR16161616F,
113 };
114
115 /* Primary plane formats for vlv/chv */
116 static const u32 vlv_primary_formats[] = {
117         DRM_FORMAT_C8,
118         DRM_FORMAT_RGB565,
119         DRM_FORMAT_XRGB8888,
120         DRM_FORMAT_XBGR8888,
121         DRM_FORMAT_ARGB8888,
122         DRM_FORMAT_ABGR8888,
123         DRM_FORMAT_XRGB2101010,
124         DRM_FORMAT_XBGR2101010,
125         DRM_FORMAT_ARGB2101010,
126         DRM_FORMAT_ABGR2101010,
127         DRM_FORMAT_XBGR16161616F,
128 };
129
130 static const u64 i9xx_format_modifiers[] = {
131         I915_FORMAT_MOD_X_TILED,
132         DRM_FORMAT_MOD_LINEAR,
133         DRM_FORMAT_MOD_INVALID
134 };
135
136 /* Cursor formats */
137 static const u32 intel_cursor_formats[] = {
138         DRM_FORMAT_ARGB8888,
139 };
140
141 static const u64 cursor_format_modifiers[] = {
142         DRM_FORMAT_MOD_LINEAR,
143         DRM_FORMAT_MOD_INVALID
144 };
145
146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
147                                 struct intel_crtc_state *pipe_config);
148 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
149                                    struct intel_crtc_state *pipe_config);
150
151 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
152                                   struct drm_i915_gem_object *obj,
153                                   struct drm_mode_fb_cmd2 *mode_cmd);
154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
157                                          const struct intel_link_m_n *m_n,
158                                          const struct intel_link_m_n *m2_n2);
159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
160 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
163 static void vlv_prepare_pll(struct intel_crtc *crtc,
164                             const struct intel_crtc_state *pipe_config);
165 static void chv_prepare_pll(struct intel_crtc *crtc,
166                             const struct intel_crtc_state *pipe_config);
167 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
168                                     struct intel_crtc_state *crtc_state);
169 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
170 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
171 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
172 static void intel_modeset_setup_hw_state(struct drm_device *dev,
173                                          struct drm_modeset_acquire_ctx *ctx);
174 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
175
176 struct intel_limit {
177         struct {
178                 int min, max;
179         } dot, vco, n, m, m1, m2, p, p1;
180
181         struct {
182                 int dot_limit;
183                 int p2_slow, p2_fast;
184         } p2;
185 };
186
187 /* returns HPLL frequency in kHz */
188 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
189 {
190         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
191
192         /* Obtain SKU information */
193         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
194                 CCK_FUSE_HPLL_FREQ_MASK;
195
196         return vco_freq[hpll_freq] * 1000;
197 }
198
199 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
200                       const char *name, u32 reg, int ref_freq)
201 {
202         u32 val;
203         int divider;
204
205         val = vlv_cck_read(dev_priv, reg);
206         divider = val & CCK_FREQUENCY_VALUES;
207
208         WARN((val & CCK_FREQUENCY_STATUS) !=
209              (divider << CCK_FREQUENCY_STATUS_SHIFT),
210              "%s change in progress\n", name);
211
212         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
213 }
214
215 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
216                            const char *name, u32 reg)
217 {
218         int hpll;
219
220         vlv_cck_get(dev_priv);
221
222         if (dev_priv->hpll_freq == 0)
223                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
224
225         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
226
227         vlv_cck_put(dev_priv);
228
229         return hpll;
230 }
231
232 static void intel_update_czclk(struct drm_i915_private *dev_priv)
233 {
234         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
235                 return;
236
237         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
238                                                       CCK_CZ_CLOCK_CONTROL);
239
240         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
241 }
242
243 static inline u32 /* units of 100MHz */
244 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
245                     const struct intel_crtc_state *pipe_config)
246 {
247         if (HAS_DDI(dev_priv))
248                 return pipe_config->port_clock; /* SPLL */
249         else
250                 return dev_priv->fdi_pll_freq;
251 }
252
253 static const struct intel_limit intel_limits_i8xx_dac = {
254         .dot = { .min = 25000, .max = 350000 },
255         .vco = { .min = 908000, .max = 1512000 },
256         .n = { .min = 2, .max = 16 },
257         .m = { .min = 96, .max = 140 },
258         .m1 = { .min = 18, .max = 26 },
259         .m2 = { .min = 6, .max = 16 },
260         .p = { .min = 4, .max = 128 },
261         .p1 = { .min = 2, .max = 33 },
262         .p2 = { .dot_limit = 165000,
263                 .p2_slow = 4, .p2_fast = 2 },
264 };
265
266 static const struct intel_limit intel_limits_i8xx_dvo = {
267         .dot = { .min = 25000, .max = 350000 },
268         .vco = { .min = 908000, .max = 1512000 },
269         .n = { .min = 2, .max = 16 },
270         .m = { .min = 96, .max = 140 },
271         .m1 = { .min = 18, .max = 26 },
272         .m2 = { .min = 6, .max = 16 },
273         .p = { .min = 4, .max = 128 },
274         .p1 = { .min = 2, .max = 33 },
275         .p2 = { .dot_limit = 165000,
276                 .p2_slow = 4, .p2_fast = 4 },
277 };
278
279 static const struct intel_limit intel_limits_i8xx_lvds = {
280         .dot = { .min = 25000, .max = 350000 },
281         .vco = { .min = 908000, .max = 1512000 },
282         .n = { .min = 2, .max = 16 },
283         .m = { .min = 96, .max = 140 },
284         .m1 = { .min = 18, .max = 26 },
285         .m2 = { .min = 6, .max = 16 },
286         .p = { .min = 4, .max = 128 },
287         .p1 = { .min = 1, .max = 6 },
288         .p2 = { .dot_limit = 165000,
289                 .p2_slow = 14, .p2_fast = 7 },
290 };
291
292 static const struct intel_limit intel_limits_i9xx_sdvo = {
293         .dot = { .min = 20000, .max = 400000 },
294         .vco = { .min = 1400000, .max = 2800000 },
295         .n = { .min = 1, .max = 6 },
296         .m = { .min = 70, .max = 120 },
297         .m1 = { .min = 8, .max = 18 },
298         .m2 = { .min = 3, .max = 7 },
299         .p = { .min = 5, .max = 80 },
300         .p1 = { .min = 1, .max = 8 },
301         .p2 = { .dot_limit = 200000,
302                 .p2_slow = 10, .p2_fast = 5 },
303 };
304
305 static const struct intel_limit intel_limits_i9xx_lvds = {
306         .dot = { .min = 20000, .max = 400000 },
307         .vco = { .min = 1400000, .max = 2800000 },
308         .n = { .min = 1, .max = 6 },
309         .m = { .min = 70, .max = 120 },
310         .m1 = { .min = 8, .max = 18 },
311         .m2 = { .min = 3, .max = 7 },
312         .p = { .min = 7, .max = 98 },
313         .p1 = { .min = 1, .max = 8 },
314         .p2 = { .dot_limit = 112000,
315                 .p2_slow = 14, .p2_fast = 7 },
316 };
317
318
319 static const struct intel_limit intel_limits_g4x_sdvo = {
320         .dot = { .min = 25000, .max = 270000 },
321         .vco = { .min = 1750000, .max = 3500000},
322         .n = { .min = 1, .max = 4 },
323         .m = { .min = 104, .max = 138 },
324         .m1 = { .min = 17, .max = 23 },
325         .m2 = { .min = 5, .max = 11 },
326         .p = { .min = 10, .max = 30 },
327         .p1 = { .min = 1, .max = 3},
328         .p2 = { .dot_limit = 270000,
329                 .p2_slow = 10,
330                 .p2_fast = 10
331         },
332 };
333
334 static const struct intel_limit intel_limits_g4x_hdmi = {
335         .dot = { .min = 22000, .max = 400000 },
336         .vco = { .min = 1750000, .max = 3500000},
337         .n = { .min = 1, .max = 4 },
338         .m = { .min = 104, .max = 138 },
339         .m1 = { .min = 16, .max = 23 },
340         .m2 = { .min = 5, .max = 11 },
341         .p = { .min = 5, .max = 80 },
342         .p1 = { .min = 1, .max = 8},
343         .p2 = { .dot_limit = 165000,
344                 .p2_slow = 10, .p2_fast = 5 },
345 };
346
347 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
348         .dot = { .min = 20000, .max = 115000 },
349         .vco = { .min = 1750000, .max = 3500000 },
350         .n = { .min = 1, .max = 3 },
351         .m = { .min = 104, .max = 138 },
352         .m1 = { .min = 17, .max = 23 },
353         .m2 = { .min = 5, .max = 11 },
354         .p = { .min = 28, .max = 112 },
355         .p1 = { .min = 2, .max = 8 },
356         .p2 = { .dot_limit = 0,
357                 .p2_slow = 14, .p2_fast = 14
358         },
359 };
360
361 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
362         .dot = { .min = 80000, .max = 224000 },
363         .vco = { .min = 1750000, .max = 3500000 },
364         .n = { .min = 1, .max = 3 },
365         .m = { .min = 104, .max = 138 },
366         .m1 = { .min = 17, .max = 23 },
367         .m2 = { .min = 5, .max = 11 },
368         .p = { .min = 14, .max = 42 },
369         .p1 = { .min = 2, .max = 6 },
370         .p2 = { .dot_limit = 0,
371                 .p2_slow = 7, .p2_fast = 7
372         },
373 };
374
375 static const struct intel_limit intel_limits_pineview_sdvo = {
376         .dot = { .min = 20000, .max = 400000},
377         .vco = { .min = 1700000, .max = 3500000 },
378         /* Pineview's Ncounter is a ring counter */
379         .n = { .min = 3, .max = 6 },
380         .m = { .min = 2, .max = 256 },
381         /* Pineview only has one combined m divider, which we treat as m2. */
382         .m1 = { .min = 0, .max = 0 },
383         .m2 = { .min = 0, .max = 254 },
384         .p = { .min = 5, .max = 80 },
385         .p1 = { .min = 1, .max = 8 },
386         .p2 = { .dot_limit = 200000,
387                 .p2_slow = 10, .p2_fast = 5 },
388 };
389
390 static const struct intel_limit intel_limits_pineview_lvds = {
391         .dot = { .min = 20000, .max = 400000 },
392         .vco = { .min = 1700000, .max = 3500000 },
393         .n = { .min = 3, .max = 6 },
394         .m = { .min = 2, .max = 256 },
395         .m1 = { .min = 0, .max = 0 },
396         .m2 = { .min = 0, .max = 254 },
397         .p = { .min = 7, .max = 112 },
398         .p1 = { .min = 1, .max = 8 },
399         .p2 = { .dot_limit = 112000,
400                 .p2_slow = 14, .p2_fast = 14 },
401 };
402
403 /* Ironlake / Sandybridge
404  *
405  * We calculate clock using (register_value + 2) for N/M1/M2, so here
406  * the range value for them is (actual_value - 2).
407  */
408 static const struct intel_limit intel_limits_ironlake_dac = {
409         .dot = { .min = 25000, .max = 350000 },
410         .vco = { .min = 1760000, .max = 3510000 },
411         .n = { .min = 1, .max = 5 },
412         .m = { .min = 79, .max = 127 },
413         .m1 = { .min = 12, .max = 22 },
414         .m2 = { .min = 5, .max = 9 },
415         .p = { .min = 5, .max = 80 },
416         .p1 = { .min = 1, .max = 8 },
417         .p2 = { .dot_limit = 225000,
418                 .p2_slow = 10, .p2_fast = 5 },
419 };
420
421 static const struct intel_limit intel_limits_ironlake_single_lvds = {
422         .dot = { .min = 25000, .max = 350000 },
423         .vco = { .min = 1760000, .max = 3510000 },
424         .n = { .min = 1, .max = 3 },
425         .m = { .min = 79, .max = 118 },
426         .m1 = { .min = 12, .max = 22 },
427         .m2 = { .min = 5, .max = 9 },
428         .p = { .min = 28, .max = 112 },
429         .p1 = { .min = 2, .max = 8 },
430         .p2 = { .dot_limit = 225000,
431                 .p2_slow = 14, .p2_fast = 14 },
432 };
433
434 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
435         .dot = { .min = 25000, .max = 350000 },
436         .vco = { .min = 1760000, .max = 3510000 },
437         .n = { .min = 1, .max = 3 },
438         .m = { .min = 79, .max = 127 },
439         .m1 = { .min = 12, .max = 22 },
440         .m2 = { .min = 5, .max = 9 },
441         .p = { .min = 14, .max = 56 },
442         .p1 = { .min = 2, .max = 8 },
443         .p2 = { .dot_limit = 225000,
444                 .p2_slow = 7, .p2_fast = 7 },
445 };
446
447 /* LVDS 100mhz refclk limits. */
448 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
449         .dot = { .min = 25000, .max = 350000 },
450         .vco = { .min = 1760000, .max = 3510000 },
451         .n = { .min = 1, .max = 2 },
452         .m = { .min = 79, .max = 126 },
453         .m1 = { .min = 12, .max = 22 },
454         .m2 = { .min = 5, .max = 9 },
455         .p = { .min = 28, .max = 112 },
456         .p1 = { .min = 2, .max = 8 },
457         .p2 = { .dot_limit = 225000,
458                 .p2_slow = 14, .p2_fast = 14 },
459 };
460
461 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
462         .dot = { .min = 25000, .max = 350000 },
463         .vco = { .min = 1760000, .max = 3510000 },
464         .n = { .min = 1, .max = 3 },
465         .m = { .min = 79, .max = 126 },
466         .m1 = { .min = 12, .max = 22 },
467         .m2 = { .min = 5, .max = 9 },
468         .p = { .min = 14, .max = 42 },
469         .p1 = { .min = 2, .max = 6 },
470         .p2 = { .dot_limit = 225000,
471                 .p2_slow = 7, .p2_fast = 7 },
472 };
473
474 static const struct intel_limit intel_limits_vlv = {
475          /*
476           * These are the data rate limits (measured in fast clocks)
477           * since those are the strictest limits we have. The fast
478           * clock and actual rate limits are more relaxed, so checking
479           * them would make no difference.
480           */
481         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
482         .vco = { .min = 4000000, .max = 6000000 },
483         .n = { .min = 1, .max = 7 },
484         .m1 = { .min = 2, .max = 3 },
485         .m2 = { .min = 11, .max = 156 },
486         .p1 = { .min = 2, .max = 3 },
487         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
488 };
489
490 static const struct intel_limit intel_limits_chv = {
491         /*
492          * These are the data rate limits (measured in fast clocks)
493          * since those are the strictest limits we have.  The fast
494          * clock and actual rate limits are more relaxed, so checking
495          * them would make no difference.
496          */
497         .dot = { .min = 25000 * 5, .max = 540000 * 5},
498         .vco = { .min = 4800000, .max = 6480000 },
499         .n = { .min = 1, .max = 1 },
500         .m1 = { .min = 2, .max = 2 },
501         .m2 = { .min = 24 << 22, .max = 175 << 22 },
502         .p1 = { .min = 2, .max = 4 },
503         .p2 = { .p2_slow = 1, .p2_fast = 14 },
504 };
505
506 static const struct intel_limit intel_limits_bxt = {
507         /* FIXME: find real dot limits */
508         .dot = { .min = 0, .max = INT_MAX },
509         .vco = { .min = 4800000, .max = 6700000 },
510         .n = { .min = 1, .max = 1 },
511         .m1 = { .min = 2, .max = 2 },
512         /* FIXME: find real m2 limits */
513         .m2 = { .min = 2 << 22, .max = 255 << 22 },
514         .p1 = { .min = 2, .max = 4 },
515         .p2 = { .p2_slow = 1, .p2_fast = 20 },
516 };
517
518 /* WA Display #0827: Gen9:all */
519 static void
520 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
521 {
522         if (enable)
523                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
524                            I915_READ(CLKGATE_DIS_PSL(pipe)) |
525                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
526         else
527                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
528                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
529                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
530 }
531
532 /* Wa_2006604312:icl */
533 static void
534 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
535                        bool enable)
536 {
537         if (enable)
538                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
539                            I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
540         else
541                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
542                            I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
543 }
544
545 static bool
546 needs_modeset(const struct intel_crtc_state *state)
547 {
548         return drm_atomic_crtc_needs_modeset(&state->uapi);
549 }
550
551 bool
552 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
553 {
554         return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
555                 crtc_state->sync_mode_slaves_mask);
556 }
557
558 static bool
559 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
560 {
561         return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
562                 crtc_state->sync_mode_slaves_mask);
563 }
564
565 /*
566  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
567  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
568  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
569  * The helpers' return value is the rate of the clock that is fed to the
570  * display engine's pipe which can be the above fast dot clock rate or a
571  * divided-down version of it.
572  */
573 /* m1 is reserved as 0 in Pineview, n is a ring counter */
574 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
575 {
576         clock->m = clock->m2 + 2;
577         clock->p = clock->p1 * clock->p2;
578         if (WARN_ON(clock->n == 0 || clock->p == 0))
579                 return 0;
580         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
581         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
582
583         return clock->dot;
584 }
585
586 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
587 {
588         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
589 }
590
591 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
592 {
593         clock->m = i9xx_dpll_compute_m(clock);
594         clock->p = clock->p1 * clock->p2;
595         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
596                 return 0;
597         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
598         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
599
600         return clock->dot;
601 }
602
603 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
604 {
605         clock->m = clock->m1 * clock->m2;
606         clock->p = clock->p1 * clock->p2;
607         if (WARN_ON(clock->n == 0 || clock->p == 0))
608                 return 0;
609         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
610         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
611
612         return clock->dot / 5;
613 }
614
615 int chv_calc_dpll_params(int refclk, struct dpll *clock)
616 {
617         clock->m = clock->m1 * clock->m2;
618         clock->p = clock->p1 * clock->p2;
619         if (WARN_ON(clock->n == 0 || clock->p == 0))
620                 return 0;
621         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
622                                            clock->n << 22);
623         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
624
625         return clock->dot / 5;
626 }
627
628 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
629
630 /*
631  * Returns whether the given set of divisors are valid for a given refclk with
632  * the given connectors.
633  */
634 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
635                                const struct intel_limit *limit,
636                                const struct dpll *clock)
637 {
638         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
639                 INTELPllInvalid("n out of range\n");
640         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
641                 INTELPllInvalid("p1 out of range\n");
642         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
643                 INTELPllInvalid("m2 out of range\n");
644         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
645                 INTELPllInvalid("m1 out of range\n");
646
647         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
648             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
649                 if (clock->m1 <= clock->m2)
650                         INTELPllInvalid("m1 <= m2\n");
651
652         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
653             !IS_GEN9_LP(dev_priv)) {
654                 if (clock->p < limit->p.min || limit->p.max < clock->p)
655                         INTELPllInvalid("p out of range\n");
656                 if (clock->m < limit->m.min || limit->m.max < clock->m)
657                         INTELPllInvalid("m out of range\n");
658         }
659
660         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
661                 INTELPllInvalid("vco out of range\n");
662         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
663          * connector, etc., rather than just a single range.
664          */
665         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
666                 INTELPllInvalid("dot out of range\n");
667
668         return true;
669 }
670
671 static int
672 i9xx_select_p2_div(const struct intel_limit *limit,
673                    const struct intel_crtc_state *crtc_state,
674                    int target)
675 {
676         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
677
678         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
679                 /*
680                  * For LVDS just rely on its current settings for dual-channel.
681                  * We haven't figured out how to reliably set up different
682                  * single/dual channel state, if we even can.
683                  */
684                 if (intel_is_dual_link_lvds(dev_priv))
685                         return limit->p2.p2_fast;
686                 else
687                         return limit->p2.p2_slow;
688         } else {
689                 if (target < limit->p2.dot_limit)
690                         return limit->p2.p2_slow;
691                 else
692                         return limit->p2.p2_fast;
693         }
694 }
695
696 /*
697  * Returns a set of divisors for the desired target clock with the given
698  * refclk, or FALSE.  The returned values represent the clock equation:
699  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
700  *
701  * Target and reference clocks are specified in kHz.
702  *
703  * If match_clock is provided, then best_clock P divider must match the P
704  * divider from @match_clock used for LVDS downclocking.
705  */
706 static bool
707 i9xx_find_best_dpll(const struct intel_limit *limit,
708                     struct intel_crtc_state *crtc_state,
709                     int target, int refclk, struct dpll *match_clock,
710                     struct dpll *best_clock)
711 {
712         struct drm_device *dev = crtc_state->uapi.crtc->dev;
713         struct dpll clock;
714         int err = target;
715
716         memset(best_clock, 0, sizeof(*best_clock));
717
718         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
719
720         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
721              clock.m1++) {
722                 for (clock.m2 = limit->m2.min;
723                      clock.m2 <= limit->m2.max; clock.m2++) {
724                         if (clock.m2 >= clock.m1)
725                                 break;
726                         for (clock.n = limit->n.min;
727                              clock.n <= limit->n.max; clock.n++) {
728                                 for (clock.p1 = limit->p1.min;
729                                         clock.p1 <= limit->p1.max; clock.p1++) {
730                                         int this_err;
731
732                                         i9xx_calc_dpll_params(refclk, &clock);
733                                         if (!intel_PLL_is_valid(to_i915(dev),
734                                                                 limit,
735                                                                 &clock))
736                                                 continue;
737                                         if (match_clock &&
738                                             clock.p != match_clock->p)
739                                                 continue;
740
741                                         this_err = abs(clock.dot - target);
742                                         if (this_err < err) {
743                                                 *best_clock = clock;
744                                                 err = this_err;
745                                         }
746                                 }
747                         }
748                 }
749         }
750
751         return (err != target);
752 }
753
754 /*
755  * Returns a set of divisors for the desired target clock with the given
756  * refclk, or FALSE.  The returned values represent the clock equation:
757  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
758  *
759  * Target and reference clocks are specified in kHz.
760  *
761  * If match_clock is provided, then best_clock P divider must match the P
762  * divider from @match_clock used for LVDS downclocking.
763  */
764 static bool
765 pnv_find_best_dpll(const struct intel_limit *limit,
766                    struct intel_crtc_state *crtc_state,
767                    int target, int refclk, struct dpll *match_clock,
768                    struct dpll *best_clock)
769 {
770         struct drm_device *dev = crtc_state->uapi.crtc->dev;
771         struct dpll clock;
772         int err = target;
773
774         memset(best_clock, 0, sizeof(*best_clock));
775
776         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
777
778         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
779              clock.m1++) {
780                 for (clock.m2 = limit->m2.min;
781                      clock.m2 <= limit->m2.max; clock.m2++) {
782                         for (clock.n = limit->n.min;
783                              clock.n <= limit->n.max; clock.n++) {
784                                 for (clock.p1 = limit->p1.min;
785                                         clock.p1 <= limit->p1.max; clock.p1++) {
786                                         int this_err;
787
788                                         pnv_calc_dpll_params(refclk, &clock);
789                                         if (!intel_PLL_is_valid(to_i915(dev),
790                                                                 limit,
791                                                                 &clock))
792                                                 continue;
793                                         if (match_clock &&
794                                             clock.p != match_clock->p)
795                                                 continue;
796
797                                         this_err = abs(clock.dot - target);
798                                         if (this_err < err) {
799                                                 *best_clock = clock;
800                                                 err = this_err;
801                                         }
802                                 }
803                         }
804                 }
805         }
806
807         return (err != target);
808 }
809
810 /*
811  * Returns a set of divisors for the desired target clock with the given
812  * refclk, or FALSE.  The returned values represent the clock equation:
813  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
814  *
815  * Target and reference clocks are specified in kHz.
816  *
817  * If match_clock is provided, then best_clock P divider must match the P
818  * divider from @match_clock used for LVDS downclocking.
819  */
820 static bool
821 g4x_find_best_dpll(const struct intel_limit *limit,
822                    struct intel_crtc_state *crtc_state,
823                    int target, int refclk, struct dpll *match_clock,
824                    struct dpll *best_clock)
825 {
826         struct drm_device *dev = crtc_state->uapi.crtc->dev;
827         struct dpll clock;
828         int max_n;
829         bool found = false;
830         /* approximately equals target * 0.00585 */
831         int err_most = (target >> 8) + (target >> 9);
832
833         memset(best_clock, 0, sizeof(*best_clock));
834
835         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
836
837         max_n = limit->n.max;
838         /* based on hardware requirement, prefer smaller n to precision */
839         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
840                 /* based on hardware requirement, prefere larger m1,m2 */
841                 for (clock.m1 = limit->m1.max;
842                      clock.m1 >= limit->m1.min; clock.m1--) {
843                         for (clock.m2 = limit->m2.max;
844                              clock.m2 >= limit->m2.min; clock.m2--) {
845                                 for (clock.p1 = limit->p1.max;
846                                      clock.p1 >= limit->p1.min; clock.p1--) {
847                                         int this_err;
848
849                                         i9xx_calc_dpll_params(refclk, &clock);
850                                         if (!intel_PLL_is_valid(to_i915(dev),
851                                                                 limit,
852                                                                 &clock))
853                                                 continue;
854
855                                         this_err = abs(clock.dot - target);
856                                         if (this_err < err_most) {
857                                                 *best_clock = clock;
858                                                 err_most = this_err;
859                                                 max_n = clock.n;
860                                                 found = true;
861                                         }
862                                 }
863                         }
864                 }
865         }
866         return found;
867 }
868
869 /*
870  * Check if the calculated PLL configuration is more optimal compared to the
871  * best configuration and error found so far. Return the calculated error.
872  */
873 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
874                                const struct dpll *calculated_clock,
875                                const struct dpll *best_clock,
876                                unsigned int best_error_ppm,
877                                unsigned int *error_ppm)
878 {
879         /*
880          * For CHV ignore the error and consider only the P value.
881          * Prefer a bigger P value based on HW requirements.
882          */
883         if (IS_CHERRYVIEW(to_i915(dev))) {
884                 *error_ppm = 0;
885
886                 return calculated_clock->p > best_clock->p;
887         }
888
889         if (WARN_ON_ONCE(!target_freq))
890                 return false;
891
892         *error_ppm = div_u64(1000000ULL *
893                                 abs(target_freq - calculated_clock->dot),
894                              target_freq);
895         /*
896          * Prefer a better P value over a better (smaller) error if the error
897          * is small. Ensure this preference for future configurations too by
898          * setting the error to 0.
899          */
900         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
901                 *error_ppm = 0;
902
903                 return true;
904         }
905
906         return *error_ppm + 10 < best_error_ppm;
907 }
908
909 /*
910  * Returns a set of divisors for the desired target clock with the given
911  * refclk, or FALSE.  The returned values represent the clock equation:
912  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
913  */
914 static bool
915 vlv_find_best_dpll(const struct intel_limit *limit,
916                    struct intel_crtc_state *crtc_state,
917                    int target, int refclk, struct dpll *match_clock,
918                    struct dpll *best_clock)
919 {
920         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
921         struct drm_device *dev = crtc->base.dev;
922         struct dpll clock;
923         unsigned int bestppm = 1000000;
924         /* min update 19.2 MHz */
925         int max_n = min(limit->n.max, refclk / 19200);
926         bool found = false;
927
928         target *= 5; /* fast clock */
929
930         memset(best_clock, 0, sizeof(*best_clock));
931
932         /* based on hardware requirement, prefer smaller n to precision */
933         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
934                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
935                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
936                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
937                                 clock.p = clock.p1 * clock.p2;
938                                 /* based on hardware requirement, prefer bigger m1,m2 values */
939                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
940                                         unsigned int ppm;
941
942                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
943                                                                      refclk * clock.m1);
944
945                                         vlv_calc_dpll_params(refclk, &clock);
946
947                                         if (!intel_PLL_is_valid(to_i915(dev),
948                                                                 limit,
949                                                                 &clock))
950                                                 continue;
951
952                                         if (!vlv_PLL_is_optimal(dev, target,
953                                                                 &clock,
954                                                                 best_clock,
955                                                                 bestppm, &ppm))
956                                                 continue;
957
958                                         *best_clock = clock;
959                                         bestppm = ppm;
960                                         found = true;
961                                 }
962                         }
963                 }
964         }
965
966         return found;
967 }
968
969 /*
970  * Returns a set of divisors for the desired target clock with the given
971  * refclk, or FALSE.  The returned values represent the clock equation:
972  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
973  */
974 static bool
975 chv_find_best_dpll(const struct intel_limit *limit,
976                    struct intel_crtc_state *crtc_state,
977                    int target, int refclk, struct dpll *match_clock,
978                    struct dpll *best_clock)
979 {
980         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
981         struct drm_device *dev = crtc->base.dev;
982         unsigned int best_error_ppm;
983         struct dpll clock;
984         u64 m2;
985         int found = false;
986
987         memset(best_clock, 0, sizeof(*best_clock));
988         best_error_ppm = 1000000;
989
990         /*
991          * Based on hardware doc, the n always set to 1, and m1 always
992          * set to 2.  If requires to support 200Mhz refclk, we need to
993          * revisit this because n may not 1 anymore.
994          */
995         clock.n = 1, clock.m1 = 2;
996         target *= 5;    /* fast clock */
997
998         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
999                 for (clock.p2 = limit->p2.p2_fast;
1000                                 clock.p2 >= limit->p2.p2_slow;
1001                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1002                         unsigned int error_ppm;
1003
1004                         clock.p = clock.p1 * clock.p2;
1005
1006                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1007                                                    refclk * clock.m1);
1008
1009                         if (m2 > INT_MAX/clock.m1)
1010                                 continue;
1011
1012                         clock.m2 = m2;
1013
1014                         chv_calc_dpll_params(refclk, &clock);
1015
1016                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1017                                 continue;
1018
1019                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1020                                                 best_error_ppm, &error_ppm))
1021                                 continue;
1022
1023                         *best_clock = clock;
1024                         best_error_ppm = error_ppm;
1025                         found = true;
1026                 }
1027         }
1028
1029         return found;
1030 }
1031
1032 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1033                         struct dpll *best_clock)
1034 {
1035         int refclk = 100000;
1036         const struct intel_limit *limit = &intel_limits_bxt;
1037
1038         return chv_find_best_dpll(limit, crtc_state,
1039                                   crtc_state->port_clock, refclk,
1040                                   NULL, best_clock);
1041 }
1042
1043 bool intel_crtc_active(struct intel_crtc *crtc)
1044 {
1045         /* Be paranoid as we can arrive here with only partial
1046          * state retrieved from the hardware during setup.
1047          *
1048          * We can ditch the adjusted_mode.crtc_clock check as soon
1049          * as Haswell has gained clock readout/fastboot support.
1050          *
1051          * We can ditch the crtc->primary->state->fb check as soon as we can
1052          * properly reconstruct framebuffers.
1053          *
1054          * FIXME: The intel_crtc->active here should be switched to
1055          * crtc->state->active once we have proper CRTC states wired up
1056          * for atomic.
1057          */
1058         return crtc->active && crtc->base.primary->state->fb &&
1059                 crtc->config->hw.adjusted_mode.crtc_clock;
1060 }
1061
1062 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1063                                              enum pipe pipe)
1064 {
1065         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1066
1067         return crtc->config->cpu_transcoder;
1068 }
1069
1070 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1071                                     enum pipe pipe)
1072 {
1073         i915_reg_t reg = PIPEDSL(pipe);
1074         u32 line1, line2;
1075         u32 line_mask;
1076
1077         if (IS_GEN(dev_priv, 2))
1078                 line_mask = DSL_LINEMASK_GEN2;
1079         else
1080                 line_mask = DSL_LINEMASK_GEN3;
1081
1082         line1 = I915_READ(reg) & line_mask;
1083         msleep(5);
1084         line2 = I915_READ(reg) & line_mask;
1085
1086         return line1 != line2;
1087 }
1088
1089 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1090 {
1091         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1092         enum pipe pipe = crtc->pipe;
1093
1094         /* Wait for the display line to settle/start moving */
1095         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1096                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1097                           pipe_name(pipe), onoff(state));
1098 }
1099
1100 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1101 {
1102         wait_for_pipe_scanline_moving(crtc, false);
1103 }
1104
1105 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1106 {
1107         wait_for_pipe_scanline_moving(crtc, true);
1108 }
1109
1110 static void
1111 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1112 {
1113         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1114         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1115
1116         if (INTEL_GEN(dev_priv) >= 4) {
1117                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1118                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1119
1120                 /* Wait for the Pipe State to go off */
1121                 if (intel_de_wait_for_clear(dev_priv, reg,
1122                                             I965_PIPECONF_ACTIVE, 100))
1123                         WARN(1, "pipe_off wait timed out\n");
1124         } else {
1125                 intel_wait_for_pipe_scanline_stopped(crtc);
1126         }
1127 }
1128
1129 /* Only for pre-ILK configs */
1130 void assert_pll(struct drm_i915_private *dev_priv,
1131                 enum pipe pipe, bool state)
1132 {
1133         u32 val;
1134         bool cur_state;
1135
1136         val = I915_READ(DPLL(pipe));
1137         cur_state = !!(val & DPLL_VCO_ENABLE);
1138         I915_STATE_WARN(cur_state != state,
1139              "PLL state assertion failure (expected %s, current %s)\n",
1140                         onoff(state), onoff(cur_state));
1141 }
1142
1143 /* XXX: the dsi pll is shared between MIPI DSI ports */
1144 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1145 {
1146         u32 val;
1147         bool cur_state;
1148
1149         vlv_cck_get(dev_priv);
1150         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1151         vlv_cck_put(dev_priv);
1152
1153         cur_state = val & DSI_PLL_VCO_EN;
1154         I915_STATE_WARN(cur_state != state,
1155              "DSI PLL state assertion failure (expected %s, current %s)\n",
1156                         onoff(state), onoff(cur_state));
1157 }
1158
1159 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1160                           enum pipe pipe, bool state)
1161 {
1162         bool cur_state;
1163         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1164                                                                       pipe);
1165
1166         if (HAS_DDI(dev_priv)) {
1167                 /* DDI does not have a specific FDI_TX register */
1168                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1169                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1170         } else {
1171                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1172                 cur_state = !!(val & FDI_TX_ENABLE);
1173         }
1174         I915_STATE_WARN(cur_state != state,
1175              "FDI TX state assertion failure (expected %s, current %s)\n",
1176                         onoff(state), onoff(cur_state));
1177 }
1178 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1179 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1180
1181 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1182                           enum pipe pipe, bool state)
1183 {
1184         u32 val;
1185         bool cur_state;
1186
1187         val = I915_READ(FDI_RX_CTL(pipe));
1188         cur_state = !!(val & FDI_RX_ENABLE);
1189         I915_STATE_WARN(cur_state != state,
1190              "FDI RX state assertion failure (expected %s, current %s)\n",
1191                         onoff(state), onoff(cur_state));
1192 }
1193 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1194 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1195
1196 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1197                                       enum pipe pipe)
1198 {
1199         u32 val;
1200
1201         /* ILK FDI PLL is always enabled */
1202         if (IS_GEN(dev_priv, 5))
1203                 return;
1204
1205         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1206         if (HAS_DDI(dev_priv))
1207                 return;
1208
1209         val = I915_READ(FDI_TX_CTL(pipe));
1210         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1211 }
1212
1213 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1214                        enum pipe pipe, bool state)
1215 {
1216         u32 val;
1217         bool cur_state;
1218
1219         val = I915_READ(FDI_RX_CTL(pipe));
1220         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1221         I915_STATE_WARN(cur_state != state,
1222              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1223                         onoff(state), onoff(cur_state));
1224 }
1225
1226 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1227 {
1228         i915_reg_t pp_reg;
1229         u32 val;
1230         enum pipe panel_pipe = INVALID_PIPE;
1231         bool locked = true;
1232
1233         if (WARN_ON(HAS_DDI(dev_priv)))
1234                 return;
1235
1236         if (HAS_PCH_SPLIT(dev_priv)) {
1237                 u32 port_sel;
1238
1239                 pp_reg = PP_CONTROL(0);
1240                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1241
1242                 switch (port_sel) {
1243                 case PANEL_PORT_SELECT_LVDS:
1244                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1245                         break;
1246                 case PANEL_PORT_SELECT_DPA:
1247                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1248                         break;
1249                 case PANEL_PORT_SELECT_DPC:
1250                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1251                         break;
1252                 case PANEL_PORT_SELECT_DPD:
1253                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1254                         break;
1255                 default:
1256                         MISSING_CASE(port_sel);
1257                         break;
1258                 }
1259         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1260                 /* presumably write lock depends on pipe, not port select */
1261                 pp_reg = PP_CONTROL(pipe);
1262                 panel_pipe = pipe;
1263         } else {
1264                 u32 port_sel;
1265
1266                 pp_reg = PP_CONTROL(0);
1267                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1268
1269                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1270                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1271         }
1272
1273         val = I915_READ(pp_reg);
1274         if (!(val & PANEL_POWER_ON) ||
1275             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1276                 locked = false;
1277
1278         I915_STATE_WARN(panel_pipe == pipe && locked,
1279              "panel assertion failure, pipe %c regs locked\n",
1280              pipe_name(pipe));
1281 }
1282
1283 void assert_pipe(struct drm_i915_private *dev_priv,
1284                  enum pipe pipe, bool state)
1285 {
1286         bool cur_state;
1287         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1288                                                                       pipe);
1289         enum intel_display_power_domain power_domain;
1290         intel_wakeref_t wakeref;
1291
1292         /* we keep both pipes enabled on 830 */
1293         if (IS_I830(dev_priv))
1294                 state = true;
1295
1296         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1297         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1298         if (wakeref) {
1299                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1300                 cur_state = !!(val & PIPECONF_ENABLE);
1301
1302                 intel_display_power_put(dev_priv, power_domain, wakeref);
1303         } else {
1304                 cur_state = false;
1305         }
1306
1307         I915_STATE_WARN(cur_state != state,
1308              "pipe %c assertion failure (expected %s, current %s)\n",
1309                         pipe_name(pipe), onoff(state), onoff(cur_state));
1310 }
1311
1312 static void assert_plane(struct intel_plane *plane, bool state)
1313 {
1314         enum pipe pipe;
1315         bool cur_state;
1316
1317         cur_state = plane->get_hw_state(plane, &pipe);
1318
1319         I915_STATE_WARN(cur_state != state,
1320                         "%s assertion failure (expected %s, current %s)\n",
1321                         plane->base.name, onoff(state), onoff(cur_state));
1322 }
1323
1324 #define assert_plane_enabled(p) assert_plane(p, true)
1325 #define assert_plane_disabled(p) assert_plane(p, false)
1326
1327 static void assert_planes_disabled(struct intel_crtc *crtc)
1328 {
1329         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1330         struct intel_plane *plane;
1331
1332         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1333                 assert_plane_disabled(plane);
1334 }
1335
1336 static void assert_vblank_disabled(struct drm_crtc *crtc)
1337 {
1338         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1339                 drm_crtc_vblank_put(crtc);
1340 }
1341
1342 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1343                                     enum pipe pipe)
1344 {
1345         u32 val;
1346         bool enabled;
1347
1348         val = I915_READ(PCH_TRANSCONF(pipe));
1349         enabled = !!(val & TRANS_ENABLE);
1350         I915_STATE_WARN(enabled,
1351              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1352              pipe_name(pipe));
1353 }
1354
1355 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1356                                    enum pipe pipe, enum port port,
1357                                    i915_reg_t dp_reg)
1358 {
1359         enum pipe port_pipe;
1360         bool state;
1361
1362         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1363
1364         I915_STATE_WARN(state && port_pipe == pipe,
1365                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1366                         port_name(port), pipe_name(pipe));
1367
1368         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1369                         "IBX PCH DP %c still using transcoder B\n",
1370                         port_name(port));
1371 }
1372
1373 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1374                                      enum pipe pipe, enum port port,
1375                                      i915_reg_t hdmi_reg)
1376 {
1377         enum pipe port_pipe;
1378         bool state;
1379
1380         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1381
1382         I915_STATE_WARN(state && port_pipe == pipe,
1383                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1384                         port_name(port), pipe_name(pipe));
1385
1386         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1387                         "IBX PCH HDMI %c still using transcoder B\n",
1388                         port_name(port));
1389 }
1390
1391 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1392                                       enum pipe pipe)
1393 {
1394         enum pipe port_pipe;
1395
1396         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1397         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1398         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1399
1400         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1401                         port_pipe == pipe,
1402                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1403                         pipe_name(pipe));
1404
1405         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1406                         port_pipe == pipe,
1407                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1408                         pipe_name(pipe));
1409
1410         /* PCH SDVOB multiplex with HDMIB */
1411         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1412         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1413         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1414 }
1415
1416 static void _vlv_enable_pll(struct intel_crtc *crtc,
1417                             const struct intel_crtc_state *pipe_config)
1418 {
1419         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1420         enum pipe pipe = crtc->pipe;
1421
1422         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1423         POSTING_READ(DPLL(pipe));
1424         udelay(150);
1425
1426         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1427                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1428 }
1429
1430 static void vlv_enable_pll(struct intel_crtc *crtc,
1431                            const struct intel_crtc_state *pipe_config)
1432 {
1433         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1434         enum pipe pipe = crtc->pipe;
1435
1436         assert_pipe_disabled(dev_priv, pipe);
1437
1438         /* PLL is protected by panel, make sure we can write it */
1439         assert_panel_unlocked(dev_priv, pipe);
1440
1441         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1442                 _vlv_enable_pll(crtc, pipe_config);
1443
1444         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1445         POSTING_READ(DPLL_MD(pipe));
1446 }
1447
1448
1449 static void _chv_enable_pll(struct intel_crtc *crtc,
1450                             const struct intel_crtc_state *pipe_config)
1451 {
1452         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1453         enum pipe pipe = crtc->pipe;
1454         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1455         u32 tmp;
1456
1457         vlv_dpio_get(dev_priv);
1458
1459         /* Enable back the 10bit clock to display controller */
1460         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1461         tmp |= DPIO_DCLKP_EN;
1462         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1463
1464         vlv_dpio_put(dev_priv);
1465
1466         /*
1467          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1468          */
1469         udelay(1);
1470
1471         /* Enable PLL */
1472         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1473
1474         /* Check PLL is locked */
1475         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1476                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1477 }
1478
1479 static void chv_enable_pll(struct intel_crtc *crtc,
1480                            const struct intel_crtc_state *pipe_config)
1481 {
1482         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1483         enum pipe pipe = crtc->pipe;
1484
1485         assert_pipe_disabled(dev_priv, pipe);
1486
1487         /* PLL is protected by panel, make sure we can write it */
1488         assert_panel_unlocked(dev_priv, pipe);
1489
1490         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1491                 _chv_enable_pll(crtc, pipe_config);
1492
1493         if (pipe != PIPE_A) {
1494                 /*
1495                  * WaPixelRepeatModeFixForC0:chv
1496                  *
1497                  * DPLLCMD is AWOL. Use chicken bits to propagate
1498                  * the value from DPLLBMD to either pipe B or C.
1499                  */
1500                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1501                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1502                 I915_WRITE(CBR4_VLV, 0);
1503                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1504
1505                 /*
1506                  * DPLLB VGA mode also seems to cause problems.
1507                  * We should always have it disabled.
1508                  */
1509                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1510         } else {
1511                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1512                 POSTING_READ(DPLL_MD(pipe));
1513         }
1514 }
1515
1516 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1517 {
1518         if (IS_I830(dev_priv))
1519                 return false;
1520
1521         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1522 }
1523
1524 static void i9xx_enable_pll(struct intel_crtc *crtc,
1525                             const struct intel_crtc_state *crtc_state)
1526 {
1527         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1528         i915_reg_t reg = DPLL(crtc->pipe);
1529         u32 dpll = crtc_state->dpll_hw_state.dpll;
1530         int i;
1531
1532         assert_pipe_disabled(dev_priv, crtc->pipe);
1533
1534         /* PLL is protected by panel, make sure we can write it */
1535         if (i9xx_has_pps(dev_priv))
1536                 assert_panel_unlocked(dev_priv, crtc->pipe);
1537
1538         /*
1539          * Apparently we need to have VGA mode enabled prior to changing
1540          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1541          * dividers, even though the register value does change.
1542          */
1543         I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1544         I915_WRITE(reg, dpll);
1545
1546         /* Wait for the clocks to stabilize. */
1547         POSTING_READ(reg);
1548         udelay(150);
1549
1550         if (INTEL_GEN(dev_priv) >= 4) {
1551                 I915_WRITE(DPLL_MD(crtc->pipe),
1552                            crtc_state->dpll_hw_state.dpll_md);
1553         } else {
1554                 /* The pixel multiplier can only be updated once the
1555                  * DPLL is enabled and the clocks are stable.
1556                  *
1557                  * So write it again.
1558                  */
1559                 I915_WRITE(reg, dpll);
1560         }
1561
1562         /* We do this three times for luck */
1563         for (i = 0; i < 3; i++) {
1564                 I915_WRITE(reg, dpll);
1565                 POSTING_READ(reg);
1566                 udelay(150); /* wait for warmup */
1567         }
1568 }
1569
1570 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1571 {
1572         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1573         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1574         enum pipe pipe = crtc->pipe;
1575
1576         /* Don't disable pipe or pipe PLLs if needed */
1577         if (IS_I830(dev_priv))
1578                 return;
1579
1580         /* Make sure the pipe isn't still relying on us */
1581         assert_pipe_disabled(dev_priv, pipe);
1582
1583         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1584         POSTING_READ(DPLL(pipe));
1585 }
1586
1587 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1588 {
1589         u32 val;
1590
1591         /* Make sure the pipe isn't still relying on us */
1592         assert_pipe_disabled(dev_priv, pipe);
1593
1594         val = DPLL_INTEGRATED_REF_CLK_VLV |
1595                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1596         if (pipe != PIPE_A)
1597                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1598
1599         I915_WRITE(DPLL(pipe), val);
1600         POSTING_READ(DPLL(pipe));
1601 }
1602
1603 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1604 {
1605         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1606         u32 val;
1607
1608         /* Make sure the pipe isn't still relying on us */
1609         assert_pipe_disabled(dev_priv, pipe);
1610
1611         val = DPLL_SSC_REF_CLK_CHV |
1612                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1613         if (pipe != PIPE_A)
1614                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1615
1616         I915_WRITE(DPLL(pipe), val);
1617         POSTING_READ(DPLL(pipe));
1618
1619         vlv_dpio_get(dev_priv);
1620
1621         /* Disable 10bit clock to display controller */
1622         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1623         val &= ~DPIO_DCLKP_EN;
1624         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1625
1626         vlv_dpio_put(dev_priv);
1627 }
1628
1629 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1630                          struct intel_digital_port *dport,
1631                          unsigned int expected_mask)
1632 {
1633         u32 port_mask;
1634         i915_reg_t dpll_reg;
1635
1636         switch (dport->base.port) {
1637         case PORT_B:
1638                 port_mask = DPLL_PORTB_READY_MASK;
1639                 dpll_reg = DPLL(0);
1640                 break;
1641         case PORT_C:
1642                 port_mask = DPLL_PORTC_READY_MASK;
1643                 dpll_reg = DPLL(0);
1644                 expected_mask <<= 4;
1645                 break;
1646         case PORT_D:
1647                 port_mask = DPLL_PORTD_READY_MASK;
1648                 dpll_reg = DPIO_PHY_STATUS;
1649                 break;
1650         default:
1651                 BUG();
1652         }
1653
1654         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1655                                        port_mask, expected_mask, 1000))
1656                 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1657                      dport->base.base.base.id, dport->base.base.name,
1658                      I915_READ(dpll_reg) & port_mask, expected_mask);
1659 }
1660
1661 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1662 {
1663         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1664         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1665         enum pipe pipe = crtc->pipe;
1666         i915_reg_t reg;
1667         u32 val, pipeconf_val;
1668
1669         /* Make sure PCH DPLL is enabled */
1670         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1671
1672         /* FDI must be feeding us bits for PCH ports */
1673         assert_fdi_tx_enabled(dev_priv, pipe);
1674         assert_fdi_rx_enabled(dev_priv, pipe);
1675
1676         if (HAS_PCH_CPT(dev_priv)) {
1677                 reg = TRANS_CHICKEN2(pipe);
1678                 val = I915_READ(reg);
1679                 /*
1680                  * Workaround: Set the timing override bit
1681                  * before enabling the pch transcoder.
1682                  */
1683                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1684                 /* Configure frame start delay to match the CPU */
1685                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1686                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1687                 I915_WRITE(reg, val);
1688         }
1689
1690         reg = PCH_TRANSCONF(pipe);
1691         val = I915_READ(reg);
1692         pipeconf_val = I915_READ(PIPECONF(pipe));
1693
1694         if (HAS_PCH_IBX(dev_priv)) {
1695                 /* Configure frame start delay to match the CPU */
1696                 val &= ~TRANS_FRAME_START_DELAY_MASK;
1697                 val |= TRANS_FRAME_START_DELAY(0);
1698
1699                 /*
1700                  * Make the BPC in transcoder be consistent with
1701                  * that in pipeconf reg. For HDMI we must use 8bpc
1702                  * here for both 8bpc and 12bpc.
1703                  */
1704                 val &= ~PIPECONF_BPC_MASK;
1705                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1706                         val |= PIPECONF_8BPC;
1707                 else
1708                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1709         }
1710
1711         val &= ~TRANS_INTERLACE_MASK;
1712         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1713                 if (HAS_PCH_IBX(dev_priv) &&
1714                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1715                         val |= TRANS_LEGACY_INTERLACED_ILK;
1716                 else
1717                         val |= TRANS_INTERLACED;
1718         } else {
1719                 val |= TRANS_PROGRESSIVE;
1720         }
1721
1722         I915_WRITE(reg, val | TRANS_ENABLE);
1723         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1724                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1725 }
1726
1727 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1728                                       enum transcoder cpu_transcoder)
1729 {
1730         u32 val, pipeconf_val;
1731
1732         /* FDI must be feeding us bits for PCH ports */
1733         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1734         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1735
1736         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1737         /* Workaround: set timing override bit. */
1738         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1739         /* Configure frame start delay to match the CPU */
1740         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1741         val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1742         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1743
1744         val = TRANS_ENABLE;
1745         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1746
1747         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1748             PIPECONF_INTERLACED_ILK)
1749                 val |= TRANS_INTERLACED;
1750         else
1751                 val |= TRANS_PROGRESSIVE;
1752
1753         I915_WRITE(LPT_TRANSCONF, val);
1754         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1755                                   TRANS_STATE_ENABLE, 100))
1756                 DRM_ERROR("Failed to enable PCH transcoder\n");
1757 }
1758
1759 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1760                                             enum pipe pipe)
1761 {
1762         i915_reg_t reg;
1763         u32 val;
1764
1765         /* FDI relies on the transcoder */
1766         assert_fdi_tx_disabled(dev_priv, pipe);
1767         assert_fdi_rx_disabled(dev_priv, pipe);
1768
1769         /* Ports must be off as well */
1770         assert_pch_ports_disabled(dev_priv, pipe);
1771
1772         reg = PCH_TRANSCONF(pipe);
1773         val = I915_READ(reg);
1774         val &= ~TRANS_ENABLE;
1775         I915_WRITE(reg, val);
1776         /* wait for PCH transcoder off, transcoder state */
1777         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1778                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1779
1780         if (HAS_PCH_CPT(dev_priv)) {
1781                 /* Workaround: Clear the timing override chicken bit again. */
1782                 reg = TRANS_CHICKEN2(pipe);
1783                 val = I915_READ(reg);
1784                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1785                 I915_WRITE(reg, val);
1786         }
1787 }
1788
1789 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1790 {
1791         u32 val;
1792
1793         val = I915_READ(LPT_TRANSCONF);
1794         val &= ~TRANS_ENABLE;
1795         I915_WRITE(LPT_TRANSCONF, val);
1796         /* wait for PCH transcoder off, transcoder state */
1797         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1798                                     TRANS_STATE_ENABLE, 50))
1799                 DRM_ERROR("Failed to disable PCH transcoder\n");
1800
1801         /* Workaround: clear timing override bit. */
1802         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1803         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1804         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1805 }
1806
1807 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1808 {
1809         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1810
1811         if (HAS_PCH_LPT(dev_priv))
1812                 return PIPE_A;
1813         else
1814                 return crtc->pipe;
1815 }
1816
1817 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1818 {
1819         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1820
1821         /*
1822          * On i965gm the hardware frame counter reads
1823          * zero when the TV encoder is enabled :(
1824          */
1825         if (IS_I965GM(dev_priv) &&
1826             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1827                 return 0;
1828
1829         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1830                 return 0xffffffff; /* full 32 bit counter */
1831         else if (INTEL_GEN(dev_priv) >= 3)
1832                 return 0xffffff; /* only 24 bits of frame count */
1833         else
1834                 return 0; /* Gen2 doesn't have a hardware frame counter */
1835 }
1836
1837 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1838 {
1839         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1840
1841         drm_crtc_set_max_vblank_count(&crtc->base,
1842                                       intel_crtc_max_vblank_count(crtc_state));
1843         drm_crtc_vblank_on(&crtc->base);
1844 }
1845
1846 static void intel_crtc_vblank_off(struct intel_crtc *crtc)
1847 {
1848         drm_crtc_vblank_off(&crtc->base);
1849         assert_vblank_disabled(&crtc->base);
1850 }
1851
1852 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1853 {
1854         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1855         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1856         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1857         enum pipe pipe = crtc->pipe;
1858         i915_reg_t reg;
1859         u32 val;
1860
1861         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1862
1863         assert_planes_disabled(crtc);
1864
1865         /*
1866          * A pipe without a PLL won't actually be able to drive bits from
1867          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1868          * need the check.
1869          */
1870         if (HAS_GMCH(dev_priv)) {
1871                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1872                         assert_dsi_pll_enabled(dev_priv);
1873                 else
1874                         assert_pll_enabled(dev_priv, pipe);
1875         } else {
1876                 if (new_crtc_state->has_pch_encoder) {
1877                         /* if driving the PCH, we need FDI enabled */
1878                         assert_fdi_rx_pll_enabled(dev_priv,
1879                                                   intel_crtc_pch_transcoder(crtc));
1880                         assert_fdi_tx_pll_enabled(dev_priv,
1881                                                   (enum pipe) cpu_transcoder);
1882                 }
1883                 /* FIXME: assert CPU port conditions for SNB+ */
1884         }
1885
1886         trace_intel_pipe_enable(crtc);
1887
1888         reg = PIPECONF(cpu_transcoder);
1889         val = I915_READ(reg);
1890         if (val & PIPECONF_ENABLE) {
1891                 /* we keep both pipes enabled on 830 */
1892                 WARN_ON(!IS_I830(dev_priv));
1893                 return;
1894         }
1895
1896         I915_WRITE(reg, val | PIPECONF_ENABLE);
1897         POSTING_READ(reg);
1898
1899         /*
1900          * Until the pipe starts PIPEDSL reads will return a stale value,
1901          * which causes an apparent vblank timestamp jump when PIPEDSL
1902          * resets to its proper value. That also messes up the frame count
1903          * when it's derived from the timestamps. So let's wait for the
1904          * pipe to start properly before we call drm_crtc_vblank_on()
1905          */
1906         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1907                 intel_wait_for_pipe_scanline_moving(crtc);
1908 }
1909
1910 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1911 {
1912         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1913         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1914         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1915         enum pipe pipe = crtc->pipe;
1916         i915_reg_t reg;
1917         u32 val;
1918
1919         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1920
1921         /*
1922          * Make sure planes won't keep trying to pump pixels to us,
1923          * or we might hang the display.
1924          */
1925         assert_planes_disabled(crtc);
1926
1927         trace_intel_pipe_disable(crtc);
1928
1929         reg = PIPECONF(cpu_transcoder);
1930         val = I915_READ(reg);
1931         if ((val & PIPECONF_ENABLE) == 0)
1932                 return;
1933
1934         /*
1935          * Double wide has implications for planes
1936          * so best keep it disabled when not needed.
1937          */
1938         if (old_crtc_state->double_wide)
1939                 val &= ~PIPECONF_DOUBLE_WIDE;
1940
1941         /* Don't disable pipe or pipe PLLs if needed */
1942         if (!IS_I830(dev_priv))
1943                 val &= ~PIPECONF_ENABLE;
1944
1945         I915_WRITE(reg, val);
1946         if ((val & PIPECONF_ENABLE) == 0)
1947                 intel_wait_for_pipe_off(old_crtc_state);
1948 }
1949
1950 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1951 {
1952         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1953 }
1954
1955 static unsigned int
1956 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1957 {
1958         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1959         unsigned int cpp = fb->format->cpp[color_plane];
1960
1961         switch (fb->modifier) {
1962         case DRM_FORMAT_MOD_LINEAR:
1963                 return intel_tile_size(dev_priv);
1964         case I915_FORMAT_MOD_X_TILED:
1965                 if (IS_GEN(dev_priv, 2))
1966                         return 128;
1967                 else
1968                         return 512;
1969         case I915_FORMAT_MOD_Y_TILED_CCS:
1970                 if (color_plane == 1)
1971                         return 128;
1972                 /* fall through */
1973         case I915_FORMAT_MOD_Y_TILED:
1974                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1975                         return 128;
1976                 else
1977                         return 512;
1978         case I915_FORMAT_MOD_Yf_TILED_CCS:
1979                 if (color_plane == 1)
1980                         return 128;
1981                 /* fall through */
1982         case I915_FORMAT_MOD_Yf_TILED:
1983                 switch (cpp) {
1984                 case 1:
1985                         return 64;
1986                 case 2:
1987                 case 4:
1988                         return 128;
1989                 case 8:
1990                 case 16:
1991                         return 256;
1992                 default:
1993                         MISSING_CASE(cpp);
1994                         return cpp;
1995                 }
1996                 break;
1997         default:
1998                 MISSING_CASE(fb->modifier);
1999                 return cpp;
2000         }
2001 }
2002
2003 static unsigned int
2004 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2005 {
2006         return intel_tile_size(to_i915(fb->dev)) /
2007                 intel_tile_width_bytes(fb, color_plane);
2008 }
2009
2010 /* Return the tile dimensions in pixel units */
2011 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2012                             unsigned int *tile_width,
2013                             unsigned int *tile_height)
2014 {
2015         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2016         unsigned int cpp = fb->format->cpp[color_plane];
2017
2018         *tile_width = tile_width_bytes / cpp;
2019         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
2020 }
2021
2022 unsigned int
2023 intel_fb_align_height(const struct drm_framebuffer *fb,
2024                       int color_plane, unsigned int height)
2025 {
2026         unsigned int tile_height = intel_tile_height(fb, color_plane);
2027
2028         return ALIGN(height, tile_height);
2029 }
2030
2031 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2032 {
2033         unsigned int size = 0;
2034         int i;
2035
2036         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2037                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2038
2039         return size;
2040 }
2041
2042 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2043 {
2044         unsigned int size = 0;
2045         int i;
2046
2047         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2048                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2049
2050         return size;
2051 }
2052
2053 static void
2054 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2055                         const struct drm_framebuffer *fb,
2056                         unsigned int rotation)
2057 {
2058         view->type = I915_GGTT_VIEW_NORMAL;
2059         if (drm_rotation_90_or_270(rotation)) {
2060                 view->type = I915_GGTT_VIEW_ROTATED;
2061                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2062         }
2063 }
2064
2065 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2066 {
2067         if (IS_I830(dev_priv))
2068                 return 16 * 1024;
2069         else if (IS_I85X(dev_priv))
2070                 return 256;
2071         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2072                 return 32;
2073         else
2074                 return 4 * 1024;
2075 }
2076
2077 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2078 {
2079         if (INTEL_GEN(dev_priv) >= 9)
2080                 return 256 * 1024;
2081         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2082                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2083                 return 128 * 1024;
2084         else if (INTEL_GEN(dev_priv) >= 4)
2085                 return 4 * 1024;
2086         else
2087                 return 0;
2088 }
2089
2090 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2091                                          int color_plane)
2092 {
2093         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2094
2095         /* AUX_DIST needs only 4K alignment */
2096         if (color_plane == 1)
2097                 return 4096;
2098
2099         switch (fb->modifier) {
2100         case DRM_FORMAT_MOD_LINEAR:
2101                 return intel_linear_alignment(dev_priv);
2102         case I915_FORMAT_MOD_X_TILED:
2103                 if (INTEL_GEN(dev_priv) >= 9)
2104                         return 256 * 1024;
2105                 return 0;
2106         case I915_FORMAT_MOD_Y_TILED_CCS:
2107         case I915_FORMAT_MOD_Yf_TILED_CCS:
2108         case I915_FORMAT_MOD_Y_TILED:
2109         case I915_FORMAT_MOD_Yf_TILED:
2110                 return 1 * 1024 * 1024;
2111         default:
2112                 MISSING_CASE(fb->modifier);
2113                 return 0;
2114         }
2115 }
2116
2117 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2118 {
2119         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2120         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2121
2122         return INTEL_GEN(dev_priv) < 4 ||
2123                 (plane->has_fbc &&
2124                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2125 }
2126
2127 struct i915_vma *
2128 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2129                            const struct i915_ggtt_view *view,
2130                            bool uses_fence,
2131                            unsigned long *out_flags)
2132 {
2133         struct drm_device *dev = fb->dev;
2134         struct drm_i915_private *dev_priv = to_i915(dev);
2135         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2136         intel_wakeref_t wakeref;
2137         struct i915_vma *vma;
2138         unsigned int pinctl;
2139         u32 alignment;
2140
2141         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2142                 return ERR_PTR(-EINVAL);
2143
2144         alignment = intel_surf_alignment(fb, 0);
2145
2146         /* Note that the w/a also requires 64 PTE of padding following the
2147          * bo. We currently fill all unused PTE with the shadow page and so
2148          * we should always have valid PTE following the scanout preventing
2149          * the VT-d warning.
2150          */
2151         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2152                 alignment = 256 * 1024;
2153
2154         /*
2155          * Global gtt pte registers are special registers which actually forward
2156          * writes to a chunk of system memory. Which means that there is no risk
2157          * that the register values disappear as soon as we call
2158          * intel_runtime_pm_put(), so it is correct to wrap only the
2159          * pin/unpin/fence and not more.
2160          */
2161         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2162         i915_gem_object_lock(obj);
2163
2164         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2165
2166         pinctl = 0;
2167
2168         /* Valleyview is definitely limited to scanning out the first
2169          * 512MiB. Lets presume this behaviour was inherited from the
2170          * g4x display engine and that all earlier gen are similarly
2171          * limited. Testing suggests that it is a little more
2172          * complicated than this. For example, Cherryview appears quite
2173          * happy to scanout from anywhere within its global aperture.
2174          */
2175         if (HAS_GMCH(dev_priv))
2176                 pinctl |= PIN_MAPPABLE;
2177
2178         vma = i915_gem_object_pin_to_display_plane(obj,
2179                                                    alignment, view, pinctl);
2180         if (IS_ERR(vma))
2181                 goto err;
2182
2183         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2184                 int ret;
2185
2186                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2187                  * fence, whereas 965+ only requires a fence if using
2188                  * framebuffer compression.  For simplicity, we always, when
2189                  * possible, install a fence as the cost is not that onerous.
2190                  *
2191                  * If we fail to fence the tiled scanout, then either the
2192                  * modeset will reject the change (which is highly unlikely as
2193                  * the affected systems, all but one, do not have unmappable
2194                  * space) or we will not be able to enable full powersaving
2195                  * techniques (also likely not to apply due to various limits
2196                  * FBC and the like impose on the size of the buffer, which
2197                  * presumably we violated anyway with this unmappable buffer).
2198                  * Anyway, it is presumably better to stumble onwards with
2199                  * something and try to run the system in a "less than optimal"
2200                  * mode that matches the user configuration.
2201                  */
2202                 ret = i915_vma_pin_fence(vma);
2203                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2204                         i915_gem_object_unpin_from_display_plane(vma);
2205                         vma = ERR_PTR(ret);
2206                         goto err;
2207                 }
2208
2209                 if (ret == 0 && vma->fence)
2210                         *out_flags |= PLANE_HAS_FENCE;
2211         }
2212
2213         i915_vma_get(vma);
2214 err:
2215         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2216
2217         i915_gem_object_unlock(obj);
2218         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2219         return vma;
2220 }
2221
2222 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2223 {
2224         i915_gem_object_lock(vma->obj);
2225         if (flags & PLANE_HAS_FENCE)
2226                 i915_vma_unpin_fence(vma);
2227         i915_gem_object_unpin_from_display_plane(vma);
2228         i915_gem_object_unlock(vma->obj);
2229
2230         i915_vma_put(vma);
2231 }
2232
2233 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2234                           unsigned int rotation)
2235 {
2236         if (drm_rotation_90_or_270(rotation))
2237                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2238         else
2239                 return fb->pitches[color_plane];
2240 }
2241
2242 /*
2243  * Convert the x/y offsets into a linear offset.
2244  * Only valid with 0/180 degree rotation, which is fine since linear
2245  * offset is only used with linear buffers on pre-hsw and tiled buffers
2246  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2247  */
2248 u32 intel_fb_xy_to_linear(int x, int y,
2249                           const struct intel_plane_state *state,
2250                           int color_plane)
2251 {
2252         const struct drm_framebuffer *fb = state->hw.fb;
2253         unsigned int cpp = fb->format->cpp[color_plane];
2254         unsigned int pitch = state->color_plane[color_plane].stride;
2255
2256         return y * pitch + x * cpp;
2257 }
2258
2259 /*
2260  * Add the x/y offsets derived from fb->offsets[] to the user
2261  * specified plane src x/y offsets. The resulting x/y offsets
2262  * specify the start of scanout from the beginning of the gtt mapping.
2263  */
2264 void intel_add_fb_offsets(int *x, int *y,
2265                           const struct intel_plane_state *state,
2266                           int color_plane)
2267
2268 {
2269         *x += state->color_plane[color_plane].x;
2270         *y += state->color_plane[color_plane].y;
2271 }
2272
2273 static u32 intel_adjust_tile_offset(int *x, int *y,
2274                                     unsigned int tile_width,
2275                                     unsigned int tile_height,
2276                                     unsigned int tile_size,
2277                                     unsigned int pitch_tiles,
2278                                     u32 old_offset,
2279                                     u32 new_offset)
2280 {
2281         unsigned int pitch_pixels = pitch_tiles * tile_width;
2282         unsigned int tiles;
2283
2284         WARN_ON(old_offset & (tile_size - 1));
2285         WARN_ON(new_offset & (tile_size - 1));
2286         WARN_ON(new_offset > old_offset);
2287
2288         tiles = (old_offset - new_offset) / tile_size;
2289
2290         *y += tiles / pitch_tiles * tile_height;
2291         *x += tiles % pitch_tiles * tile_width;
2292
2293         /* minimize x in case it got needlessly big */
2294         *y += *x / pitch_pixels * tile_height;
2295         *x %= pitch_pixels;
2296
2297         return new_offset;
2298 }
2299
2300 static bool is_surface_linear(u64 modifier, int color_plane)
2301 {
2302         return modifier == DRM_FORMAT_MOD_LINEAR;
2303 }
2304
2305 static u32 intel_adjust_aligned_offset(int *x, int *y,
2306                                        const struct drm_framebuffer *fb,
2307                                        int color_plane,
2308                                        unsigned int rotation,
2309                                        unsigned int pitch,
2310                                        u32 old_offset, u32 new_offset)
2311 {
2312         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2313         unsigned int cpp = fb->format->cpp[color_plane];
2314
2315         WARN_ON(new_offset > old_offset);
2316
2317         if (!is_surface_linear(fb->modifier, color_plane)) {
2318                 unsigned int tile_size, tile_width, tile_height;
2319                 unsigned int pitch_tiles;
2320
2321                 tile_size = intel_tile_size(dev_priv);
2322                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2323
2324                 if (drm_rotation_90_or_270(rotation)) {
2325                         pitch_tiles = pitch / tile_height;
2326                         swap(tile_width, tile_height);
2327                 } else {
2328                         pitch_tiles = pitch / (tile_width * cpp);
2329                 }
2330
2331                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2332                                          tile_size, pitch_tiles,
2333                                          old_offset, new_offset);
2334         } else {
2335                 old_offset += *y * pitch + *x * cpp;
2336
2337                 *y = (old_offset - new_offset) / pitch;
2338                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2339         }
2340
2341         return new_offset;
2342 }
2343
2344 /*
2345  * Adjust the tile offset by moving the difference into
2346  * the x/y offsets.
2347  */
2348 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2349                                              const struct intel_plane_state *state,
2350                                              int color_plane,
2351                                              u32 old_offset, u32 new_offset)
2352 {
2353         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2354                                            state->hw.rotation,
2355                                            state->color_plane[color_plane].stride,
2356                                            old_offset, new_offset);
2357 }
2358
2359 /*
2360  * Computes the aligned offset to the base tile and adjusts
2361  * x, y. bytes per pixel is assumed to be a power-of-two.
2362  *
2363  * In the 90/270 rotated case, x and y are assumed
2364  * to be already rotated to match the rotated GTT view, and
2365  * pitch is the tile_height aligned framebuffer height.
2366  *
2367  * This function is used when computing the derived information
2368  * under intel_framebuffer, so using any of that information
2369  * here is not allowed. Anything under drm_framebuffer can be
2370  * used. This is why the user has to pass in the pitch since it
2371  * is specified in the rotated orientation.
2372  */
2373 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2374                                         int *x, int *y,
2375                                         const struct drm_framebuffer *fb,
2376                                         int color_plane,
2377                                         unsigned int pitch,
2378                                         unsigned int rotation,
2379                                         u32 alignment)
2380 {
2381         unsigned int cpp = fb->format->cpp[color_plane];
2382         u32 offset, offset_aligned;
2383
2384         if (alignment)
2385                 alignment--;
2386
2387         if (!is_surface_linear(fb->modifier, color_plane)) {
2388                 unsigned int tile_size, tile_width, tile_height;
2389                 unsigned int tile_rows, tiles, pitch_tiles;
2390
2391                 tile_size = intel_tile_size(dev_priv);
2392                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2393
2394                 if (drm_rotation_90_or_270(rotation)) {
2395                         pitch_tiles = pitch / tile_height;
2396                         swap(tile_width, tile_height);
2397                 } else {
2398                         pitch_tiles = pitch / (tile_width * cpp);
2399                 }
2400
2401                 tile_rows = *y / tile_height;
2402                 *y %= tile_height;
2403
2404                 tiles = *x / tile_width;
2405                 *x %= tile_width;
2406
2407                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2408                 offset_aligned = offset & ~alignment;
2409
2410                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2411                                          tile_size, pitch_tiles,
2412                                          offset, offset_aligned);
2413         } else {
2414                 offset = *y * pitch + *x * cpp;
2415                 offset_aligned = offset & ~alignment;
2416
2417                 *y = (offset & alignment) / pitch;
2418                 *x = ((offset & alignment) - *y * pitch) / cpp;
2419         }
2420
2421         return offset_aligned;
2422 }
2423
2424 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2425                                               const struct intel_plane_state *state,
2426                                               int color_plane)
2427 {
2428         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2429         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2430         const struct drm_framebuffer *fb = state->hw.fb;
2431         unsigned int rotation = state->hw.rotation;
2432         int pitch = state->color_plane[color_plane].stride;
2433         u32 alignment;
2434
2435         if (intel_plane->id == PLANE_CURSOR)
2436                 alignment = intel_cursor_alignment(dev_priv);
2437         else
2438                 alignment = intel_surf_alignment(fb, color_plane);
2439
2440         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2441                                             pitch, rotation, alignment);
2442 }
2443
2444 /* Convert the fb->offset[] into x/y offsets */
2445 static int intel_fb_offset_to_xy(int *x, int *y,
2446                                  const struct drm_framebuffer *fb,
2447                                  int color_plane)
2448 {
2449         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2450         unsigned int height;
2451
2452         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2453             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2454                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2455                               fb->offsets[color_plane], color_plane);
2456                 return -EINVAL;
2457         }
2458
2459         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2460         height = ALIGN(height, intel_tile_height(fb, color_plane));
2461
2462         /* Catch potential overflows early */
2463         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2464                             fb->offsets[color_plane])) {
2465                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2466                               fb->offsets[color_plane], fb->pitches[color_plane],
2467                               color_plane);
2468                 return -ERANGE;
2469         }
2470
2471         *x = 0;
2472         *y = 0;
2473
2474         intel_adjust_aligned_offset(x, y,
2475                                     fb, color_plane, DRM_MODE_ROTATE_0,
2476                                     fb->pitches[color_plane],
2477                                     fb->offsets[color_plane], 0);
2478
2479         return 0;
2480 }
2481
2482 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2483 {
2484         switch (fb_modifier) {
2485         case I915_FORMAT_MOD_X_TILED:
2486                 return I915_TILING_X;
2487         case I915_FORMAT_MOD_Y_TILED:
2488         case I915_FORMAT_MOD_Y_TILED_CCS:
2489                 return I915_TILING_Y;
2490         default:
2491                 return I915_TILING_NONE;
2492         }
2493 }
2494
2495 /*
2496  * From the Sky Lake PRM:
2497  * "The Color Control Surface (CCS) contains the compression status of
2498  *  the cache-line pairs. The compression state of the cache-line pair
2499  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2500  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2501  *  cache-line-pairs. CCS is always Y tiled."
2502  *
2503  * Since cache line pairs refers to horizontally adjacent cache lines,
2504  * each cache line in the CCS corresponds to an area of 32x16 cache
2505  * lines on the main surface. Since each pixel is 4 bytes, this gives
2506  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2507  * main surface.
2508  */
2509 static const struct drm_format_info ccs_formats[] = {
2510         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2511           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2512         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2513           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2514         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2515           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2516         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2517           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2518 };
2519
2520 static const struct drm_format_info *
2521 lookup_format_info(const struct drm_format_info formats[],
2522                    int num_formats, u32 format)
2523 {
2524         int i;
2525
2526         for (i = 0; i < num_formats; i++) {
2527                 if (formats[i].format == format)
2528                         return &formats[i];
2529         }
2530
2531         return NULL;
2532 }
2533
2534 static const struct drm_format_info *
2535 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2536 {
2537         switch (cmd->modifier[0]) {
2538         case I915_FORMAT_MOD_Y_TILED_CCS:
2539         case I915_FORMAT_MOD_Yf_TILED_CCS:
2540                 return lookup_format_info(ccs_formats,
2541                                           ARRAY_SIZE(ccs_formats),
2542                                           cmd->pixel_format);
2543         default:
2544                 return NULL;
2545         }
2546 }
2547
2548 bool is_ccs_modifier(u64 modifier)
2549 {
2550         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2551                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2552 }
2553
2554 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2555                               u32 pixel_format, u64 modifier)
2556 {
2557         struct intel_crtc *crtc;
2558         struct intel_plane *plane;
2559
2560         /*
2561          * We assume the primary plane for pipe A has
2562          * the highest stride limits of them all.
2563          */
2564         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2565         if (!crtc)
2566                 return 0;
2567
2568         plane = to_intel_plane(crtc->base.primary);
2569
2570         return plane->max_stride(plane, pixel_format, modifier,
2571                                  DRM_MODE_ROTATE_0);
2572 }
2573
2574 static
2575 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2576                         u32 pixel_format, u64 modifier)
2577 {
2578         /*
2579          * Arbitrary limit for gen4+ chosen to match the
2580          * render engine max stride.
2581          *
2582          * The new CCS hash mode makes remapping impossible
2583          */
2584         if (!is_ccs_modifier(modifier)) {
2585                 if (INTEL_GEN(dev_priv) >= 7)
2586                         return 256*1024;
2587                 else if (INTEL_GEN(dev_priv) >= 4)
2588                         return 128*1024;
2589         }
2590
2591         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2592 }
2593
2594 static u32
2595 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2596 {
2597         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2598
2599         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2600                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2601                                                            fb->format->format,
2602                                                            fb->modifier);
2603
2604                 /*
2605                  * To make remapping with linear generally feasible
2606                  * we need the stride to be page aligned.
2607                  */
2608                 if (fb->pitches[color_plane] > max_stride)
2609                         return intel_tile_size(dev_priv);
2610                 else
2611                         return 64;
2612         } else {
2613                 return intel_tile_width_bytes(fb, color_plane);
2614         }
2615 }
2616
2617 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2618 {
2619         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2620         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2621         const struct drm_framebuffer *fb = plane_state->hw.fb;
2622         int i;
2623
2624         /* We don't want to deal with remapping with cursors */
2625         if (plane->id == PLANE_CURSOR)
2626                 return false;
2627
2628         /*
2629          * The display engine limits already match/exceed the
2630          * render engine limits, so not much point in remapping.
2631          * Would also need to deal with the fence POT alignment
2632          * and gen2 2KiB GTT tile size.
2633          */
2634         if (INTEL_GEN(dev_priv) < 4)
2635                 return false;
2636
2637         /*
2638          * The new CCS hash mode isn't compatible with remapping as
2639          * the virtual address of the pages affects the compressed data.
2640          */
2641         if (is_ccs_modifier(fb->modifier))
2642                 return false;
2643
2644         /* Linear needs a page aligned stride for remapping */
2645         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2646                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2647
2648                 for (i = 0; i < fb->format->num_planes; i++) {
2649                         if (fb->pitches[i] & alignment)
2650                                 return false;
2651                 }
2652         }
2653
2654         return true;
2655 }
2656
2657 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2658 {
2659         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2660         const struct drm_framebuffer *fb = plane_state->hw.fb;
2661         unsigned int rotation = plane_state->hw.rotation;
2662         u32 stride, max_stride;
2663
2664         /*
2665          * No remapping for invisible planes since we don't have
2666          * an actual source viewport to remap.
2667          */
2668         if (!plane_state->uapi.visible)
2669                 return false;
2670
2671         if (!intel_plane_can_remap(plane_state))
2672                 return false;
2673
2674         /*
2675          * FIXME: aux plane limits on gen9+ are
2676          * unclear in Bspec, for now no checking.
2677          */
2678         stride = intel_fb_pitch(fb, 0, rotation);
2679         max_stride = plane->max_stride(plane, fb->format->format,
2680                                        fb->modifier, rotation);
2681
2682         return stride > max_stride;
2683 }
2684
2685 static int
2686 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2687                    struct drm_framebuffer *fb)
2688 {
2689         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2690         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2691         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2692         u32 gtt_offset_rotated = 0;
2693         unsigned int max_size = 0;
2694         int i, num_planes = fb->format->num_planes;
2695         unsigned int tile_size = intel_tile_size(dev_priv);
2696
2697         for (i = 0; i < num_planes; i++) {
2698                 unsigned int width, height;
2699                 unsigned int cpp, size;
2700                 u32 offset;
2701                 int x, y;
2702                 int ret;
2703
2704                 cpp = fb->format->cpp[i];
2705                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2706                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2707
2708                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2709                 if (ret) {
2710                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2711                                       i, fb->offsets[i]);
2712                         return ret;
2713                 }
2714
2715                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2716                         int hsub = fb->format->hsub;
2717                         int vsub = fb->format->vsub;
2718                         int tile_width, tile_height;
2719                         int main_x, main_y;
2720                         int ccs_x, ccs_y;
2721
2722                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2723                         tile_width *= hsub;
2724                         tile_height *= vsub;
2725
2726                         ccs_x = (x * hsub) % tile_width;
2727                         ccs_y = (y * vsub) % tile_height;
2728                         main_x = intel_fb->normal[0].x % tile_width;
2729                         main_y = intel_fb->normal[0].y % tile_height;
2730
2731                         /*
2732                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2733                          * x/y offsets must match between CCS and the main surface.
2734                          */
2735                         if (main_x != ccs_x || main_y != ccs_y) {
2736                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2737                                               main_x, main_y,
2738                                               ccs_x, ccs_y,
2739                                               intel_fb->normal[0].x,
2740                                               intel_fb->normal[0].y,
2741                                               x, y);
2742                                 return -EINVAL;
2743                         }
2744                 }
2745
2746                 /*
2747                  * The fence (if used) is aligned to the start of the object
2748                  * so having the framebuffer wrap around across the edge of the
2749                  * fenced region doesn't really work. We have no API to configure
2750                  * the fence start offset within the object (nor could we probably
2751                  * on gen2/3). So it's just easier if we just require that the
2752                  * fb layout agrees with the fence layout. We already check that the
2753                  * fb stride matches the fence stride elsewhere.
2754                  */
2755                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2756                     (x + width) * cpp > fb->pitches[i]) {
2757                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2758                                       i, fb->offsets[i]);
2759                         return -EINVAL;
2760                 }
2761
2762                 /*
2763                  * First pixel of the framebuffer from
2764                  * the start of the normal gtt mapping.
2765                  */
2766                 intel_fb->normal[i].x = x;
2767                 intel_fb->normal[i].y = y;
2768
2769                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2770                                                       fb->pitches[i],
2771                                                       DRM_MODE_ROTATE_0,
2772                                                       tile_size);
2773                 offset /= tile_size;
2774
2775                 if (!is_surface_linear(fb->modifier, i)) {
2776                         unsigned int tile_width, tile_height;
2777                         unsigned int pitch_tiles;
2778                         struct drm_rect r;
2779
2780                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2781
2782                         rot_info->plane[i].offset = offset;
2783                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2784                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2785                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2786
2787                         intel_fb->rotated[i].pitch =
2788                                 rot_info->plane[i].height * tile_height;
2789
2790                         /* how many tiles does this plane need */
2791                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2792                         /*
2793                          * If the plane isn't horizontally tile aligned,
2794                          * we need one more tile.
2795                          */
2796                         if (x != 0)
2797                                 size++;
2798
2799                         /* rotate the x/y offsets to match the GTT view */
2800                         drm_rect_init(&r, x, y, width, height);
2801                         drm_rect_rotate(&r,
2802                                         rot_info->plane[i].width * tile_width,
2803                                         rot_info->plane[i].height * tile_height,
2804                                         DRM_MODE_ROTATE_270);
2805                         x = r.x1;
2806                         y = r.y1;
2807
2808                         /* rotate the tile dimensions to match the GTT view */
2809                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2810                         swap(tile_width, tile_height);
2811
2812                         /*
2813                          * We only keep the x/y offsets, so push all of the
2814                          * gtt offset into the x/y offsets.
2815                          */
2816                         intel_adjust_tile_offset(&x, &y,
2817                                                  tile_width, tile_height,
2818                                                  tile_size, pitch_tiles,
2819                                                  gtt_offset_rotated * tile_size, 0);
2820
2821                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2822
2823                         /*
2824                          * First pixel of the framebuffer from
2825                          * the start of the rotated gtt mapping.
2826                          */
2827                         intel_fb->rotated[i].x = x;
2828                         intel_fb->rotated[i].y = y;
2829                 } else {
2830                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2831                                             x * cpp, tile_size);
2832                 }
2833
2834                 /* how many tiles in total needed in the bo */
2835                 max_size = max(max_size, offset + size);
2836         }
2837
2838         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2839                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2840                               mul_u32_u32(max_size, tile_size), obj->base.size);
2841                 return -EINVAL;
2842         }
2843
2844         return 0;
2845 }
2846
2847 static void
2848 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2849 {
2850         struct drm_i915_private *dev_priv =
2851                 to_i915(plane_state->uapi.plane->dev);
2852         struct drm_framebuffer *fb = plane_state->hw.fb;
2853         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2854         struct intel_rotation_info *info = &plane_state->view.rotated;
2855         unsigned int rotation = plane_state->hw.rotation;
2856         int i, num_planes = fb->format->num_planes;
2857         unsigned int tile_size = intel_tile_size(dev_priv);
2858         unsigned int src_x, src_y;
2859         unsigned int src_w, src_h;
2860         u32 gtt_offset = 0;
2861
2862         memset(&plane_state->view, 0, sizeof(plane_state->view));
2863         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2864                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2865
2866         src_x = plane_state->uapi.src.x1 >> 16;
2867         src_y = plane_state->uapi.src.y1 >> 16;
2868         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2869         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2870
2871         WARN_ON(is_ccs_modifier(fb->modifier));
2872
2873         /* Make src coordinates relative to the viewport */
2874         drm_rect_translate(&plane_state->uapi.src,
2875                            -(src_x << 16), -(src_y << 16));
2876
2877         /* Rotate src coordinates to match rotated GTT view */
2878         if (drm_rotation_90_or_270(rotation))
2879                 drm_rect_rotate(&plane_state->uapi.src,
2880                                 src_w << 16, src_h << 16,
2881                                 DRM_MODE_ROTATE_270);
2882
2883         for (i = 0; i < num_planes; i++) {
2884                 unsigned int hsub = i ? fb->format->hsub : 1;
2885                 unsigned int vsub = i ? fb->format->vsub : 1;
2886                 unsigned int cpp = fb->format->cpp[i];
2887                 unsigned int tile_width, tile_height;
2888                 unsigned int width, height;
2889                 unsigned int pitch_tiles;
2890                 unsigned int x, y;
2891                 u32 offset;
2892
2893                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2894
2895                 x = src_x / hsub;
2896                 y = src_y / vsub;
2897                 width = src_w / hsub;
2898                 height = src_h / vsub;
2899
2900                 /*
2901                  * First pixel of the src viewport from the
2902                  * start of the normal gtt mapping.
2903                  */
2904                 x += intel_fb->normal[i].x;
2905                 y += intel_fb->normal[i].y;
2906
2907                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2908                                                       fb, i, fb->pitches[i],
2909                                                       DRM_MODE_ROTATE_0, tile_size);
2910                 offset /= tile_size;
2911
2912                 info->plane[i].offset = offset;
2913                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2914                                                      tile_width * cpp);
2915                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2916                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2917
2918                 if (drm_rotation_90_or_270(rotation)) {
2919                         struct drm_rect r;
2920
2921                         /* rotate the x/y offsets to match the GTT view */
2922                         drm_rect_init(&r, x, y, width, height);
2923                         drm_rect_rotate(&r,
2924                                         info->plane[i].width * tile_width,
2925                                         info->plane[i].height * tile_height,
2926                                         DRM_MODE_ROTATE_270);
2927                         x = r.x1;
2928                         y = r.y1;
2929
2930                         pitch_tiles = info->plane[i].height;
2931                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2932
2933                         /* rotate the tile dimensions to match the GTT view */
2934                         swap(tile_width, tile_height);
2935                 } else {
2936                         pitch_tiles = info->plane[i].width;
2937                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2938                 }
2939
2940                 /*
2941                  * We only keep the x/y offsets, so push all of the
2942                  * gtt offset into the x/y offsets.
2943                  */
2944                 intel_adjust_tile_offset(&x, &y,
2945                                          tile_width, tile_height,
2946                                          tile_size, pitch_tiles,
2947                                          gtt_offset * tile_size, 0);
2948
2949                 gtt_offset += info->plane[i].width * info->plane[i].height;
2950
2951                 plane_state->color_plane[i].offset = 0;
2952                 plane_state->color_plane[i].x = x;
2953                 plane_state->color_plane[i].y = y;
2954         }
2955 }
2956
2957 static int
2958 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2959 {
2960         const struct intel_framebuffer *fb =
2961                 to_intel_framebuffer(plane_state->hw.fb);
2962         unsigned int rotation = plane_state->hw.rotation;
2963         int i, num_planes;
2964
2965         if (!fb)
2966                 return 0;
2967
2968         num_planes = fb->base.format->num_planes;
2969
2970         if (intel_plane_needs_remap(plane_state)) {
2971                 intel_plane_remap_gtt(plane_state);
2972
2973                 /*
2974                  * Sometimes even remapping can't overcome
2975                  * the stride limitations :( Can happen with
2976                  * big plane sizes and suitably misaligned
2977                  * offsets.
2978                  */
2979                 return intel_plane_check_stride(plane_state);
2980         }
2981
2982         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2983
2984         for (i = 0; i < num_planes; i++) {
2985                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2986                 plane_state->color_plane[i].offset = 0;
2987
2988                 if (drm_rotation_90_or_270(rotation)) {
2989                         plane_state->color_plane[i].x = fb->rotated[i].x;
2990                         plane_state->color_plane[i].y = fb->rotated[i].y;
2991                 } else {
2992                         plane_state->color_plane[i].x = fb->normal[i].x;
2993                         plane_state->color_plane[i].y = fb->normal[i].y;
2994                 }
2995         }
2996
2997         /* Rotate src coordinates to match rotated GTT view */
2998         if (drm_rotation_90_or_270(rotation))
2999                 drm_rect_rotate(&plane_state->uapi.src,
3000                                 fb->base.width << 16, fb->base.height << 16,
3001                                 DRM_MODE_ROTATE_270);
3002
3003         return intel_plane_check_stride(plane_state);
3004 }
3005
3006 static int i9xx_format_to_fourcc(int format)
3007 {
3008         switch (format) {
3009         case DISPPLANE_8BPP:
3010                 return DRM_FORMAT_C8;
3011         case DISPPLANE_BGRA555:
3012                 return DRM_FORMAT_ARGB1555;
3013         case DISPPLANE_BGRX555:
3014                 return DRM_FORMAT_XRGB1555;
3015         case DISPPLANE_BGRX565:
3016                 return DRM_FORMAT_RGB565;
3017         default:
3018         case DISPPLANE_BGRX888:
3019                 return DRM_FORMAT_XRGB8888;
3020         case DISPPLANE_RGBX888:
3021                 return DRM_FORMAT_XBGR8888;
3022         case DISPPLANE_BGRA888:
3023                 return DRM_FORMAT_ARGB8888;
3024         case DISPPLANE_RGBA888:
3025                 return DRM_FORMAT_ABGR8888;
3026         case DISPPLANE_BGRX101010:
3027                 return DRM_FORMAT_XRGB2101010;
3028         case DISPPLANE_RGBX101010:
3029                 return DRM_FORMAT_XBGR2101010;
3030         case DISPPLANE_BGRA101010:
3031                 return DRM_FORMAT_ARGB2101010;
3032         case DISPPLANE_RGBA101010:
3033                 return DRM_FORMAT_ABGR2101010;
3034         case DISPPLANE_RGBX161616:
3035                 return DRM_FORMAT_XBGR16161616F;
3036         }
3037 }
3038
3039 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3040 {
3041         switch (format) {
3042         case PLANE_CTL_FORMAT_RGB_565:
3043                 return DRM_FORMAT_RGB565;
3044         case PLANE_CTL_FORMAT_NV12:
3045                 return DRM_FORMAT_NV12;
3046         case PLANE_CTL_FORMAT_P010:
3047                 return DRM_FORMAT_P010;
3048         case PLANE_CTL_FORMAT_P012:
3049                 return DRM_FORMAT_P012;
3050         case PLANE_CTL_FORMAT_P016:
3051                 return DRM_FORMAT_P016;
3052         case PLANE_CTL_FORMAT_Y210:
3053                 return DRM_FORMAT_Y210;
3054         case PLANE_CTL_FORMAT_Y212:
3055                 return DRM_FORMAT_Y212;
3056         case PLANE_CTL_FORMAT_Y216:
3057                 return DRM_FORMAT_Y216;
3058         case PLANE_CTL_FORMAT_Y410:
3059                 return DRM_FORMAT_XVYU2101010;
3060         case PLANE_CTL_FORMAT_Y412:
3061                 return DRM_FORMAT_XVYU12_16161616;
3062         case PLANE_CTL_FORMAT_Y416:
3063                 return DRM_FORMAT_XVYU16161616;
3064         default:
3065         case PLANE_CTL_FORMAT_XRGB_8888:
3066                 if (rgb_order) {
3067                         if (alpha)
3068                                 return DRM_FORMAT_ABGR8888;
3069                         else
3070                                 return DRM_FORMAT_XBGR8888;
3071                 } else {
3072                         if (alpha)
3073                                 return DRM_FORMAT_ARGB8888;
3074                         else
3075                                 return DRM_FORMAT_XRGB8888;
3076                 }
3077         case PLANE_CTL_FORMAT_XRGB_2101010:
3078                 if (rgb_order) {
3079                         if (alpha)
3080                                 return DRM_FORMAT_ABGR2101010;
3081                         else
3082                                 return DRM_FORMAT_XBGR2101010;
3083                 } else {
3084                         if (alpha)
3085                                 return DRM_FORMAT_ARGB2101010;
3086                         else
3087                                 return DRM_FORMAT_XRGB2101010;
3088                 }
3089         case PLANE_CTL_FORMAT_XRGB_16161616F:
3090                 if (rgb_order) {
3091                         if (alpha)
3092                                 return DRM_FORMAT_ABGR16161616F;
3093                         else
3094                                 return DRM_FORMAT_XBGR16161616F;
3095                 } else {
3096                         if (alpha)
3097                                 return DRM_FORMAT_ARGB16161616F;
3098                         else
3099                                 return DRM_FORMAT_XRGB16161616F;
3100                 }
3101         }
3102 }
3103
3104 static bool
3105 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3106                               struct intel_initial_plane_config *plane_config)
3107 {
3108         struct drm_device *dev = crtc->base.dev;
3109         struct drm_i915_private *dev_priv = to_i915(dev);
3110         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3111         struct drm_framebuffer *fb = &plane_config->fb->base;
3112         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3113         u32 size_aligned = round_up(plane_config->base + plane_config->size,
3114                                     PAGE_SIZE);
3115         struct drm_i915_gem_object *obj;
3116         bool ret = false;
3117
3118         size_aligned -= base_aligned;
3119
3120         if (plane_config->size == 0)
3121                 return false;
3122
3123         /* If the FB is too big, just don't use it since fbdev is not very
3124          * important and we should probably use that space with FBC or other
3125          * features. */
3126         if (size_aligned * 2 > dev_priv->stolen_usable_size)
3127                 return false;
3128
3129         switch (fb->modifier) {
3130         case DRM_FORMAT_MOD_LINEAR:
3131         case I915_FORMAT_MOD_X_TILED:
3132         case I915_FORMAT_MOD_Y_TILED:
3133                 break;
3134         default:
3135                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3136                                  fb->modifier);
3137                 return false;
3138         }
3139
3140         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3141                                                              base_aligned,
3142                                                              base_aligned,
3143                                                              size_aligned);
3144         if (IS_ERR(obj))
3145                 return false;
3146
3147         switch (plane_config->tiling) {
3148         case I915_TILING_NONE:
3149                 break;
3150         case I915_TILING_X:
3151         case I915_TILING_Y:
3152                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3153                 break;
3154         default:
3155                 MISSING_CASE(plane_config->tiling);
3156                 goto out;
3157         }
3158
3159         mode_cmd.pixel_format = fb->format->format;
3160         mode_cmd.width = fb->width;
3161         mode_cmd.height = fb->height;
3162         mode_cmd.pitches[0] = fb->pitches[0];
3163         mode_cmd.modifier[0] = fb->modifier;
3164         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3165
3166         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3167                 DRM_DEBUG_KMS("intel fb init failed\n");
3168                 goto out;
3169         }
3170
3171
3172         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3173         ret = true;
3174 out:
3175         i915_gem_object_put(obj);
3176         return ret;
3177 }
3178
3179 static void
3180 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3181                         struct intel_plane_state *plane_state,
3182                         bool visible)
3183 {
3184         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3185
3186         plane_state->uapi.visible = visible;
3187
3188         if (visible)
3189                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3190         else
3191                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3192 }
3193
3194 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3195 {
3196         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3197         struct drm_plane *plane;
3198
3199         /*
3200          * Active_planes aliases if multiple "primary" or cursor planes
3201          * have been used on the same (or wrong) pipe. plane_mask uses
3202          * unique ids, hence we can use that to reconstruct active_planes.
3203          */
3204         crtc_state->active_planes = 0;
3205
3206         drm_for_each_plane_mask(plane, &dev_priv->drm,
3207                                 crtc_state->uapi.plane_mask)
3208                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3209 }
3210
3211 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3212                                          struct intel_plane *plane)
3213 {
3214         struct intel_crtc_state *crtc_state =
3215                 to_intel_crtc_state(crtc->base.state);
3216         struct intel_plane_state *plane_state =
3217                 to_intel_plane_state(plane->base.state);
3218
3219         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3220                       plane->base.base.id, plane->base.name,
3221                       crtc->base.base.id, crtc->base.name);
3222
3223         intel_set_plane_visible(crtc_state, plane_state, false);
3224         fixup_active_planes(crtc_state);
3225         crtc_state->data_rate[plane->id] = 0;
3226         crtc_state->min_cdclk[plane->id] = 0;
3227
3228         if (plane->id == PLANE_PRIMARY)
3229                 intel_pre_disable_primary_noatomic(&crtc->base);
3230
3231         intel_disable_plane(plane, crtc_state);
3232 }
3233
3234 static struct intel_frontbuffer *
3235 to_intel_frontbuffer(struct drm_framebuffer *fb)
3236 {
3237         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3238 }
3239
3240 static void
3241 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3242                              struct intel_initial_plane_config *plane_config)
3243 {
3244         struct drm_device *dev = intel_crtc->base.dev;
3245         struct drm_i915_private *dev_priv = to_i915(dev);
3246         struct drm_crtc *c;
3247         struct drm_plane *primary = intel_crtc->base.primary;
3248         struct drm_plane_state *plane_state = primary->state;
3249         struct intel_plane *intel_plane = to_intel_plane(primary);
3250         struct intel_plane_state *intel_state =
3251                 to_intel_plane_state(plane_state);
3252         struct drm_framebuffer *fb;
3253
3254         if (!plane_config->fb)
3255                 return;
3256
3257         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3258                 fb = &plane_config->fb->base;
3259                 goto valid_fb;
3260         }
3261
3262         kfree(plane_config->fb);
3263
3264         /*
3265          * Failed to alloc the obj, check to see if we should share
3266          * an fb with another CRTC instead
3267          */
3268         for_each_crtc(dev, c) {
3269                 struct intel_plane_state *state;
3270
3271                 if (c == &intel_crtc->base)
3272                         continue;
3273
3274                 if (!to_intel_crtc(c)->active)
3275                         continue;
3276
3277                 state = to_intel_plane_state(c->primary->state);
3278                 if (!state->vma)
3279                         continue;
3280
3281                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3282                         fb = state->hw.fb;
3283                         drm_framebuffer_get(fb);
3284                         goto valid_fb;
3285                 }
3286         }
3287
3288         /*
3289          * We've failed to reconstruct the BIOS FB.  Current display state
3290          * indicates that the primary plane is visible, but has a NULL FB,
3291          * which will lead to problems later if we don't fix it up.  The
3292          * simplest solution is to just disable the primary plane now and
3293          * pretend the BIOS never had it enabled.
3294          */
3295         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3296
3297         return;
3298
3299 valid_fb:
3300         intel_state->hw.rotation = plane_config->rotation;
3301         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3302                                 intel_state->hw.rotation);
3303         intel_state->color_plane[0].stride =
3304                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3305
3306         intel_state->vma =
3307                 intel_pin_and_fence_fb_obj(fb,
3308                                            &intel_state->view,
3309                                            intel_plane_uses_fence(intel_state),
3310                                            &intel_state->flags);
3311         if (IS_ERR(intel_state->vma)) {
3312                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3313                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
3314
3315                 intel_state->vma = NULL;
3316                 drm_framebuffer_put(fb);
3317                 return;
3318         }
3319
3320         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3321
3322         plane_state->src_x = 0;
3323         plane_state->src_y = 0;
3324         plane_state->src_w = fb->width << 16;
3325         plane_state->src_h = fb->height << 16;
3326
3327         plane_state->crtc_x = 0;
3328         plane_state->crtc_y = 0;
3329         plane_state->crtc_w = fb->width;
3330         plane_state->crtc_h = fb->height;
3331
3332         intel_state->uapi.src = drm_plane_state_src(plane_state);
3333         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3334
3335         if (plane_config->tiling)
3336                 dev_priv->preserve_bios_swizzle = true;
3337
3338         plane_state->fb = fb;
3339         plane_state->crtc = &intel_crtc->base;
3340         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3341
3342         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3343                   &to_intel_frontbuffer(fb)->bits);
3344 }
3345
3346 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3347                                int color_plane,
3348                                unsigned int rotation)
3349 {
3350         int cpp = fb->format->cpp[color_plane];
3351
3352         switch (fb->modifier) {
3353         case DRM_FORMAT_MOD_LINEAR:
3354         case I915_FORMAT_MOD_X_TILED:
3355                 /*
3356                  * Validated limit is 4k, but has 5k should
3357                  * work apart from the following features:
3358                  * - Ytile (already limited to 4k)
3359                  * - FP16 (already limited to 4k)
3360                  * - render compression (already limited to 4k)
3361                  * - KVMR sprite and cursor (don't care)
3362                  * - horizontal panning (TODO verify this)
3363                  * - pipe and plane scaling (TODO verify this)
3364                  */
3365                 if (cpp == 8)
3366                         return 4096;
3367                 else
3368                         return 5120;
3369         case I915_FORMAT_MOD_Y_TILED_CCS:
3370         case I915_FORMAT_MOD_Yf_TILED_CCS:
3371                 /* FIXME AUX plane? */
3372         case I915_FORMAT_MOD_Y_TILED:
3373         case I915_FORMAT_MOD_Yf_TILED:
3374                 if (cpp == 8)
3375                         return 2048;
3376                 else
3377                         return 4096;
3378         default:
3379                 MISSING_CASE(fb->modifier);
3380                 return 2048;
3381         }
3382 }
3383
3384 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3385                                int color_plane,
3386                                unsigned int rotation)
3387 {
3388         int cpp = fb->format->cpp[color_plane];
3389
3390         switch (fb->modifier) {
3391         case DRM_FORMAT_MOD_LINEAR:
3392         case I915_FORMAT_MOD_X_TILED:
3393                 if (cpp == 8)
3394                         return 4096;
3395                 else
3396                         return 5120;
3397         case I915_FORMAT_MOD_Y_TILED_CCS:
3398         case I915_FORMAT_MOD_Yf_TILED_CCS:
3399                 /* FIXME AUX plane? */
3400         case I915_FORMAT_MOD_Y_TILED:
3401         case I915_FORMAT_MOD_Yf_TILED:
3402                 if (cpp == 8)
3403                         return 2048;
3404                 else
3405                         return 5120;
3406         default:
3407                 MISSING_CASE(fb->modifier);
3408                 return 2048;
3409         }
3410 }
3411
3412 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3413                                int color_plane,
3414                                unsigned int rotation)
3415 {
3416         return 5120;
3417 }
3418
3419 static int skl_max_plane_height(void)
3420 {
3421         return 4096;
3422 }
3423
3424 static int icl_max_plane_height(void)
3425 {
3426         return 4320;
3427 }
3428
3429 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3430                                            int main_x, int main_y, u32 main_offset)
3431 {
3432         const struct drm_framebuffer *fb = plane_state->hw.fb;
3433         int hsub = fb->format->hsub;
3434         int vsub = fb->format->vsub;
3435         int aux_x = plane_state->color_plane[1].x;
3436         int aux_y = plane_state->color_plane[1].y;
3437         u32 aux_offset = plane_state->color_plane[1].offset;
3438         u32 alignment = intel_surf_alignment(fb, 1);
3439
3440         while (aux_offset >= main_offset && aux_y <= main_y) {
3441                 int x, y;
3442
3443                 if (aux_x == main_x && aux_y == main_y)
3444                         break;
3445
3446                 if (aux_offset == 0)
3447                         break;
3448
3449                 x = aux_x / hsub;
3450                 y = aux_y / vsub;
3451                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3452                                                                aux_offset, aux_offset - alignment);
3453                 aux_x = x * hsub + aux_x % hsub;
3454                 aux_y = y * vsub + aux_y % vsub;
3455         }
3456
3457         if (aux_x != main_x || aux_y != main_y)
3458                 return false;
3459
3460         plane_state->color_plane[1].offset = aux_offset;
3461         plane_state->color_plane[1].x = aux_x;
3462         plane_state->color_plane[1].y = aux_y;
3463
3464         return true;
3465 }
3466
3467 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3468 {
3469         struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3470         const struct drm_framebuffer *fb = plane_state->hw.fb;
3471         unsigned int rotation = plane_state->hw.rotation;
3472         int x = plane_state->uapi.src.x1 >> 16;
3473         int y = plane_state->uapi.src.y1 >> 16;
3474         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3475         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3476         int max_width;
3477         int max_height;
3478         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3479
3480         if (INTEL_GEN(dev_priv) >= 11)
3481                 max_width = icl_max_plane_width(fb, 0, rotation);
3482         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3483                 max_width = glk_max_plane_width(fb, 0, rotation);
3484         else
3485                 max_width = skl_max_plane_width(fb, 0, rotation);
3486
3487         if (INTEL_GEN(dev_priv) >= 11)
3488                 max_height = icl_max_plane_height();
3489         else
3490                 max_height = skl_max_plane_height();
3491
3492         if (w > max_width || h > max_height) {
3493                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3494                               w, h, max_width, max_height);
3495                 return -EINVAL;
3496         }
3497
3498         intel_add_fb_offsets(&x, &y, plane_state, 0);
3499         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3500         alignment = intel_surf_alignment(fb, 0);
3501
3502         /*
3503          * AUX surface offset is specified as the distance from the
3504          * main surface offset, and it must be non-negative. Make
3505          * sure that is what we will get.
3506          */
3507         if (offset > aux_offset)
3508                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3509                                                            offset, aux_offset & ~(alignment - 1));
3510
3511         /*
3512          * When using an X-tiled surface, the plane blows up
3513          * if the x offset + width exceed the stride.
3514          *
3515          * TODO: linear and Y-tiled seem fine, Yf untested,
3516          */
3517         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3518                 int cpp = fb->format->cpp[0];
3519
3520                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3521                         if (offset == 0) {
3522                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3523                                 return -EINVAL;
3524                         }
3525
3526                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3527                                                                    offset, offset - alignment);
3528                 }
3529         }
3530
3531         /*
3532          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3533          * they match with the main surface x/y offsets.
3534          */
3535         if (is_ccs_modifier(fb->modifier)) {
3536                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3537                         if (offset == 0)
3538                                 break;
3539
3540                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3541                                                                    offset, offset - alignment);
3542                 }
3543
3544                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3545                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3546                         return -EINVAL;
3547                 }
3548         }
3549
3550         plane_state->color_plane[0].offset = offset;
3551         plane_state->color_plane[0].x = x;
3552         plane_state->color_plane[0].y = y;
3553
3554         /*
3555          * Put the final coordinates back so that the src
3556          * coordinate checks will see the right values.
3557          */
3558         drm_rect_translate_to(&plane_state->uapi.src,
3559                               x << 16, y << 16);
3560
3561         return 0;
3562 }
3563
3564 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3565 {
3566         const struct drm_framebuffer *fb = plane_state->hw.fb;
3567         unsigned int rotation = plane_state->hw.rotation;
3568         int max_width = skl_max_plane_width(fb, 1, rotation);
3569         int max_height = 4096;
3570         int x = plane_state->uapi.src.x1 >> 17;
3571         int y = plane_state->uapi.src.y1 >> 17;
3572         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3573         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3574         u32 offset;
3575
3576         intel_add_fb_offsets(&x, &y, plane_state, 1);
3577         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3578
3579         /* FIXME not quite sure how/if these apply to the chroma plane */
3580         if (w > max_width || h > max_height) {
3581                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3582                               w, h, max_width, max_height);
3583                 return -EINVAL;
3584         }
3585
3586         plane_state->color_plane[1].offset = offset;
3587         plane_state->color_plane[1].x = x;
3588         plane_state->color_plane[1].y = y;
3589
3590         return 0;
3591 }
3592
3593 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3594 {
3595         const struct drm_framebuffer *fb = plane_state->hw.fb;
3596         int src_x = plane_state->uapi.src.x1 >> 16;
3597         int src_y = plane_state->uapi.src.y1 >> 16;
3598         int hsub = fb->format->hsub;
3599         int vsub = fb->format->vsub;
3600         int x = src_x / hsub;
3601         int y = src_y / vsub;
3602         u32 offset;
3603
3604         intel_add_fb_offsets(&x, &y, plane_state, 1);
3605         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3606
3607         plane_state->color_plane[1].offset = offset;
3608         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3609         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3610
3611         return 0;
3612 }
3613
3614 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3615 {
3616         const struct drm_framebuffer *fb = plane_state->hw.fb;
3617         int ret;
3618
3619         ret = intel_plane_compute_gtt(plane_state);
3620         if (ret)
3621                 return ret;
3622
3623         if (!plane_state->uapi.visible)
3624                 return 0;
3625
3626         /*
3627          * Handle the AUX surface first since
3628          * the main surface setup depends on it.
3629          */
3630         if (drm_format_info_is_yuv_semiplanar(fb->format)) {
3631                 ret = skl_check_nv12_aux_surface(plane_state);
3632                 if (ret)
3633                         return ret;
3634         } else if (is_ccs_modifier(fb->modifier)) {
3635                 ret = skl_check_ccs_aux_surface(plane_state);
3636                 if (ret)
3637                         return ret;
3638         } else {
3639                 plane_state->color_plane[1].offset = ~0xfff;
3640                 plane_state->color_plane[1].x = 0;
3641                 plane_state->color_plane[1].y = 0;
3642         }
3643
3644         ret = skl_check_main_surface(plane_state);
3645         if (ret)
3646                 return ret;
3647
3648         return 0;
3649 }
3650
3651 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
3652                              const struct intel_plane_state *plane_state,
3653                              unsigned int *num, unsigned int *den)
3654 {
3655         const struct drm_framebuffer *fb = plane_state->hw.fb;
3656         unsigned int cpp = fb->format->cpp[0];
3657
3658         /*
3659          * g4x bspec says 64bpp pixel rate can't exceed 80%
3660          * of cdclk when the sprite plane is enabled on the
3661          * same pipe. ilk/snb bspec says 64bpp pixel rate is
3662          * never allowed to exceed 80% of cdclk. Let's just go
3663          * with the ilk/snb limit always.
3664          */
3665         if (cpp == 8) {
3666                 *num = 10;
3667                 *den = 8;
3668         } else {
3669                 *num = 1;
3670                 *den = 1;
3671         }
3672 }
3673
3674 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
3675                                 const struct intel_plane_state *plane_state)
3676 {
3677         unsigned int pixel_rate;
3678         unsigned int num, den;
3679
3680         /*
3681          * Note that crtc_state->pixel_rate accounts for both
3682          * horizontal and vertical panel fitter downscaling factors.
3683          * Pre-HSW bspec tells us to only consider the horizontal
3684          * downscaling factor here. We ignore that and just consider
3685          * both for simplicity.
3686          */
3687         pixel_rate = crtc_state->pixel_rate;
3688
3689         i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
3690
3691         /* two pixels per clock with double wide pipe */
3692         if (crtc_state->double_wide)
3693                 den *= 2;
3694
3695         return DIV_ROUND_UP(pixel_rate * num, den);
3696 }
3697
3698 unsigned int
3699 i9xx_plane_max_stride(struct intel_plane *plane,
3700                       u32 pixel_format, u64 modifier,
3701                       unsigned int rotation)
3702 {
3703         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3704
3705         if (!HAS_GMCH(dev_priv)) {
3706                 return 32*1024;
3707         } else if (INTEL_GEN(dev_priv) >= 4) {
3708                 if (modifier == I915_FORMAT_MOD_X_TILED)
3709                         return 16*1024;
3710                 else
3711                         return 32*1024;
3712         } else if (INTEL_GEN(dev_priv) >= 3) {
3713                 if (modifier == I915_FORMAT_MOD_X_TILED)
3714                         return 8*1024;
3715                 else
3716                         return 16*1024;
3717         } else {
3718                 if (plane->i9xx_plane == PLANE_C)
3719                         return 4*1024;
3720                 else
3721                         return 8*1024;
3722         }
3723 }
3724
3725 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3726 {
3727         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3728         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3729         u32 dspcntr = 0;
3730
3731         if (crtc_state->gamma_enable)
3732                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3733
3734         if (crtc_state->csc_enable)
3735                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3736
3737         if (INTEL_GEN(dev_priv) < 5)
3738                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3739
3740         return dspcntr;
3741 }
3742
3743 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3744                           const struct intel_plane_state *plane_state)
3745 {
3746         struct drm_i915_private *dev_priv =
3747                 to_i915(plane_state->uapi.plane->dev);
3748         const struct drm_framebuffer *fb = plane_state->hw.fb;
3749         unsigned int rotation = plane_state->hw.rotation;
3750         u32 dspcntr;
3751
3752         dspcntr = DISPLAY_PLANE_ENABLE;
3753
3754         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3755             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3756                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3757
3758         switch (fb->format->format) {
3759         case DRM_FORMAT_C8:
3760                 dspcntr |= DISPPLANE_8BPP;
3761                 break;
3762         case DRM_FORMAT_XRGB1555:
3763                 dspcntr |= DISPPLANE_BGRX555;
3764                 break;
3765         case DRM_FORMAT_ARGB1555:
3766                 dspcntr |= DISPPLANE_BGRA555;
3767                 break;
3768         case DRM_FORMAT_RGB565:
3769                 dspcntr |= DISPPLANE_BGRX565;
3770                 break;
3771         case DRM_FORMAT_XRGB8888:
3772                 dspcntr |= DISPPLANE_BGRX888;
3773                 break;
3774         case DRM_FORMAT_XBGR8888:
3775                 dspcntr |= DISPPLANE_RGBX888;
3776                 break;
3777         case DRM_FORMAT_ARGB8888:
3778                 dspcntr |= DISPPLANE_BGRA888;
3779                 break;
3780         case DRM_FORMAT_ABGR8888:
3781                 dspcntr |= DISPPLANE_RGBA888;
3782                 break;
3783         case DRM_FORMAT_XRGB2101010:
3784                 dspcntr |= DISPPLANE_BGRX101010;
3785                 break;
3786         case DRM_FORMAT_XBGR2101010:
3787                 dspcntr |= DISPPLANE_RGBX101010;
3788                 break;
3789         case DRM_FORMAT_ARGB2101010:
3790                 dspcntr |= DISPPLANE_BGRA101010;
3791                 break;
3792         case DRM_FORMAT_ABGR2101010:
3793                 dspcntr |= DISPPLANE_RGBA101010;
3794                 break;
3795         case DRM_FORMAT_XBGR16161616F:
3796                 dspcntr |= DISPPLANE_RGBX161616;
3797                 break;
3798         default:
3799                 MISSING_CASE(fb->format->format);
3800                 return 0;
3801         }
3802
3803         if (INTEL_GEN(dev_priv) >= 4 &&
3804             fb->modifier == I915_FORMAT_MOD_X_TILED)
3805                 dspcntr |= DISPPLANE_TILED;
3806
3807         if (rotation & DRM_MODE_ROTATE_180)
3808                 dspcntr |= DISPPLANE_ROTATE_180;
3809
3810         if (rotation & DRM_MODE_REFLECT_X)
3811                 dspcntr |= DISPPLANE_MIRROR;
3812
3813         return dspcntr;
3814 }
3815
3816 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3817 {
3818         struct drm_i915_private *dev_priv =
3819                 to_i915(plane_state->uapi.plane->dev);
3820         const struct drm_framebuffer *fb = plane_state->hw.fb;
3821         int src_x, src_y, src_w;
3822         u32 offset;
3823         int ret;
3824
3825         ret = intel_plane_compute_gtt(plane_state);
3826         if (ret)
3827                 return ret;
3828
3829         if (!plane_state->uapi.visible)
3830                 return 0;
3831
3832         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3833         src_x = plane_state->uapi.src.x1 >> 16;
3834         src_y = plane_state->uapi.src.y1 >> 16;
3835
3836         /* Undocumented hardware limit on i965/g4x/vlv/chv */
3837         if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
3838                 return -EINVAL;
3839
3840         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3841
3842         if (INTEL_GEN(dev_priv) >= 4)
3843                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3844                                                             plane_state, 0);
3845         else
3846                 offset = 0;
3847
3848         /*
3849          * Put the final coordinates back so that the src
3850          * coordinate checks will see the right values.
3851          */
3852         drm_rect_translate_to(&plane_state->uapi.src,
3853                               src_x << 16, src_y << 16);
3854
3855         /* HSW/BDW do this automagically in hardware */
3856         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3857                 unsigned int rotation = plane_state->hw.rotation;
3858                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3859                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3860
3861                 if (rotation & DRM_MODE_ROTATE_180) {
3862                         src_x += src_w - 1;
3863                         src_y += src_h - 1;
3864                 } else if (rotation & DRM_MODE_REFLECT_X) {
3865                         src_x += src_w - 1;
3866                 }
3867         }
3868
3869         plane_state->color_plane[0].offset = offset;
3870         plane_state->color_plane[0].x = src_x;
3871         plane_state->color_plane[0].y = src_y;
3872
3873         return 0;
3874 }
3875
3876 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
3877 {
3878         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3879         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3880
3881         if (IS_CHERRYVIEW(dev_priv))
3882                 return i9xx_plane == PLANE_B;
3883         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3884                 return false;
3885         else if (IS_GEN(dev_priv, 4))
3886                 return i9xx_plane == PLANE_C;
3887         else
3888                 return i9xx_plane == PLANE_B ||
3889                         i9xx_plane == PLANE_C;
3890 }
3891
3892 static int
3893 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3894                  struct intel_plane_state *plane_state)
3895 {
3896         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3897         int ret;
3898
3899         ret = chv_plane_check_rotation(plane_state);
3900         if (ret)
3901                 return ret;
3902
3903         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
3904                                                   &crtc_state->uapi,
3905                                                   DRM_PLANE_HELPER_NO_SCALING,
3906                                                   DRM_PLANE_HELPER_NO_SCALING,
3907                                                   i9xx_plane_has_windowing(plane),
3908                                                   true);
3909         if (ret)
3910                 return ret;
3911
3912         ret = i9xx_check_plane_surface(plane_state);
3913         if (ret)
3914                 return ret;
3915
3916         if (!plane_state->uapi.visible)
3917                 return 0;
3918
3919         ret = intel_plane_check_src_coordinates(plane_state);
3920         if (ret)
3921                 return ret;
3922
3923         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3924
3925         return 0;
3926 }
3927
3928 static void i9xx_update_plane(struct intel_plane *plane,
3929                               const struct intel_crtc_state *crtc_state,
3930                               const struct intel_plane_state *plane_state)
3931 {
3932         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3933         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3934         u32 linear_offset;
3935         int x = plane_state->color_plane[0].x;
3936         int y = plane_state->color_plane[0].y;
3937         int crtc_x = plane_state->uapi.dst.x1;
3938         int crtc_y = plane_state->uapi.dst.y1;
3939         int crtc_w = drm_rect_width(&plane_state->uapi.dst);
3940         int crtc_h = drm_rect_height(&plane_state->uapi.dst);
3941         unsigned long irqflags;
3942         u32 dspaddr_offset;
3943         u32 dspcntr;
3944
3945         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3946
3947         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3948
3949         if (INTEL_GEN(dev_priv) >= 4)
3950                 dspaddr_offset = plane_state->color_plane[0].offset;
3951         else
3952                 dspaddr_offset = linear_offset;
3953
3954         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3955
3956         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3957
3958         if (INTEL_GEN(dev_priv) < 4) {
3959                 /*
3960                  * PLANE_A doesn't actually have a full window
3961                  * generator but let's assume we still need to
3962                  * program whatever is there.
3963                  */
3964                 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3965                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3966                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3967         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3968                 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3969                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3970                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3971                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3972         }
3973
3974         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3975                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3976         } else if (INTEL_GEN(dev_priv) >= 4) {
3977                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3978                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3979         }
3980
3981         /*
3982          * The control register self-arms if the plane was previously
3983          * disabled. Try to make the plane enable atomic by writing
3984          * the control register just before the surface register.
3985          */
3986         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3987         if (INTEL_GEN(dev_priv) >= 4)
3988                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3989                               intel_plane_ggtt_offset(plane_state) +
3990                               dspaddr_offset);
3991         else
3992                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3993                               intel_plane_ggtt_offset(plane_state) +
3994                               dspaddr_offset);
3995
3996         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3997 }
3998
3999 static void i9xx_disable_plane(struct intel_plane *plane,
4000                                const struct intel_crtc_state *crtc_state)
4001 {
4002         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4003         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4004         unsigned long irqflags;
4005         u32 dspcntr;
4006
4007         /*
4008          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4009          * enable on ilk+ affect the pipe bottom color as
4010          * well, so we must configure them even if the plane
4011          * is disabled.
4012          *
4013          * On pre-g4x there is no way to gamma correct the
4014          * pipe bottom color but we'll keep on doing this
4015          * anyway so that the crtc state readout works correctly.
4016          */
4017         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4018
4019         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4020
4021         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
4022         if (INTEL_GEN(dev_priv) >= 4)
4023                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
4024         else
4025                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
4026
4027         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4028 }
4029
4030 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4031                                     enum pipe *pipe)
4032 {
4033         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4034         enum intel_display_power_domain power_domain;
4035         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4036         intel_wakeref_t wakeref;
4037         bool ret;
4038         u32 val;
4039
4040         /*
4041          * Not 100% correct for planes that can move between pipes,
4042          * but that's only the case for gen2-4 which don't have any
4043          * display power wells.
4044          */
4045         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4046         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4047         if (!wakeref)
4048                 return false;
4049
4050         val = I915_READ(DSPCNTR(i9xx_plane));
4051
4052         ret = val & DISPLAY_PLANE_ENABLE;
4053
4054         if (INTEL_GEN(dev_priv) >= 5)
4055                 *pipe = plane->pipe;
4056         else
4057                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4058                         DISPPLANE_SEL_PIPE_SHIFT;
4059
4060         intel_display_power_put(dev_priv, power_domain, wakeref);
4061
4062         return ret;
4063 }
4064
4065 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4066 {
4067         struct drm_device *dev = intel_crtc->base.dev;
4068         struct drm_i915_private *dev_priv = to_i915(dev);
4069
4070         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4071         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4072         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4073 }
4074
4075 /*
4076  * This function detaches (aka. unbinds) unused scalers in hardware
4077  */
4078 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4079 {
4080         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4081         const struct intel_crtc_scaler_state *scaler_state =
4082                 &crtc_state->scaler_state;
4083         int i;
4084
4085         /* loop through and disable scalers that aren't in use */
4086         for (i = 0; i < intel_crtc->num_scalers; i++) {
4087                 if (!scaler_state->scalers[i].in_use)
4088                         skl_detach_scaler(intel_crtc, i);
4089         }
4090 }
4091
4092 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4093                                           int color_plane, unsigned int rotation)
4094 {
4095         /*
4096          * The stride is either expressed as a multiple of 64 bytes chunks for
4097          * linear buffers or in number of tiles for tiled buffers.
4098          */
4099         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
4100                 return 64;
4101         else if (drm_rotation_90_or_270(rotation))
4102                 return intel_tile_height(fb, color_plane);
4103         else
4104                 return intel_tile_width_bytes(fb, color_plane);
4105 }
4106
4107 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4108                      int color_plane)
4109 {
4110         const struct drm_framebuffer *fb = plane_state->hw.fb;
4111         unsigned int rotation = plane_state->hw.rotation;
4112         u32 stride = plane_state->color_plane[color_plane].stride;
4113
4114         if (color_plane >= fb->format->num_planes)
4115                 return 0;
4116
4117         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4118 }
4119
4120 static u32 skl_plane_ctl_format(u32 pixel_format)
4121 {
4122         switch (pixel_format) {
4123         case DRM_FORMAT_C8:
4124                 return PLANE_CTL_FORMAT_INDEXED;
4125         case DRM_FORMAT_RGB565:
4126                 return PLANE_CTL_FORMAT_RGB_565;
4127         case DRM_FORMAT_XBGR8888:
4128         case DRM_FORMAT_ABGR8888:
4129                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4130         case DRM_FORMAT_XRGB8888:
4131         case DRM_FORMAT_ARGB8888:
4132                 return PLANE_CTL_FORMAT_XRGB_8888;
4133         case DRM_FORMAT_XBGR2101010:
4134         case DRM_FORMAT_ABGR2101010:
4135                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4136         case DRM_FORMAT_XRGB2101010:
4137         case DRM_FORMAT_ARGB2101010:
4138                 return PLANE_CTL_FORMAT_XRGB_2101010;
4139         case DRM_FORMAT_XBGR16161616F:
4140         case DRM_FORMAT_ABGR16161616F:
4141                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4142         case DRM_FORMAT_XRGB16161616F:
4143         case DRM_FORMAT_ARGB16161616F:
4144                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4145         case DRM_FORMAT_YUYV:
4146                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4147         case DRM_FORMAT_YVYU:
4148                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4149         case DRM_FORMAT_UYVY:
4150                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4151         case DRM_FORMAT_VYUY:
4152                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4153         case DRM_FORMAT_NV12:
4154                 return PLANE_CTL_FORMAT_NV12;
4155         case DRM_FORMAT_P010:
4156                 return PLANE_CTL_FORMAT_P010;
4157         case DRM_FORMAT_P012:
4158                 return PLANE_CTL_FORMAT_P012;
4159         case DRM_FORMAT_P016:
4160                 return PLANE_CTL_FORMAT_P016;
4161         case DRM_FORMAT_Y210:
4162                 return PLANE_CTL_FORMAT_Y210;
4163         case DRM_FORMAT_Y212:
4164                 return PLANE_CTL_FORMAT_Y212;
4165         case DRM_FORMAT_Y216:
4166                 return PLANE_CTL_FORMAT_Y216;
4167         case DRM_FORMAT_XVYU2101010:
4168                 return PLANE_CTL_FORMAT_Y410;
4169         case DRM_FORMAT_XVYU12_16161616:
4170                 return PLANE_CTL_FORMAT_Y412;
4171         case DRM_FORMAT_XVYU16161616:
4172                 return PLANE_CTL_FORMAT_Y416;
4173         default:
4174                 MISSING_CASE(pixel_format);
4175         }
4176
4177         return 0;
4178 }
4179
4180 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4181 {
4182         if (!plane_state->hw.fb->format->has_alpha)
4183                 return PLANE_CTL_ALPHA_DISABLE;
4184
4185         switch (plane_state->hw.pixel_blend_mode) {
4186         case DRM_MODE_BLEND_PIXEL_NONE:
4187                 return PLANE_CTL_ALPHA_DISABLE;
4188         case DRM_MODE_BLEND_PREMULTI:
4189                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4190         case DRM_MODE_BLEND_COVERAGE:
4191                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4192         default:
4193                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4194                 return PLANE_CTL_ALPHA_DISABLE;
4195         }
4196 }
4197
4198 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4199 {
4200         if (!plane_state->hw.fb->format->has_alpha)
4201                 return PLANE_COLOR_ALPHA_DISABLE;
4202
4203         switch (plane_state->hw.pixel_blend_mode) {
4204         case DRM_MODE_BLEND_PIXEL_NONE:
4205                 return PLANE_COLOR_ALPHA_DISABLE;
4206         case DRM_MODE_BLEND_PREMULTI:
4207                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4208         case DRM_MODE_BLEND_COVERAGE:
4209                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4210         default:
4211                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4212                 return PLANE_COLOR_ALPHA_DISABLE;
4213         }
4214 }
4215
4216 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4217 {
4218         switch (fb_modifier) {
4219         case DRM_FORMAT_MOD_LINEAR:
4220                 break;
4221         case I915_FORMAT_MOD_X_TILED:
4222                 return PLANE_CTL_TILED_X;
4223         case I915_FORMAT_MOD_Y_TILED:
4224                 return PLANE_CTL_TILED_Y;
4225         case I915_FORMAT_MOD_Y_TILED_CCS:
4226                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4227         case I915_FORMAT_MOD_Yf_TILED:
4228                 return PLANE_CTL_TILED_YF;
4229         case I915_FORMAT_MOD_Yf_TILED_CCS:
4230                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4231         default:
4232                 MISSING_CASE(fb_modifier);
4233         }
4234
4235         return 0;
4236 }
4237
4238 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4239 {
4240         switch (rotate) {
4241         case DRM_MODE_ROTATE_0:
4242                 break;
4243         /*
4244          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4245          * while i915 HW rotation is clockwise, thats why this swapping.
4246          */
4247         case DRM_MODE_ROTATE_90:
4248                 return PLANE_CTL_ROTATE_270;
4249         case DRM_MODE_ROTATE_180:
4250                 return PLANE_CTL_ROTATE_180;
4251         case DRM_MODE_ROTATE_270:
4252                 return PLANE_CTL_ROTATE_90;
4253         default:
4254                 MISSING_CASE(rotate);
4255         }
4256
4257         return 0;
4258 }
4259
4260 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4261 {
4262         switch (reflect) {
4263         case 0:
4264                 break;
4265         case DRM_MODE_REFLECT_X:
4266                 return PLANE_CTL_FLIP_HORIZONTAL;
4267         case DRM_MODE_REFLECT_Y:
4268         default:
4269                 MISSING_CASE(reflect);
4270         }
4271
4272         return 0;
4273 }
4274
4275 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4276 {
4277         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4278         u32 plane_ctl = 0;
4279
4280         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4281                 return plane_ctl;
4282
4283         if (crtc_state->gamma_enable)
4284                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4285
4286         if (crtc_state->csc_enable)
4287                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4288
4289         return plane_ctl;
4290 }
4291
4292 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4293                   const struct intel_plane_state *plane_state)
4294 {
4295         struct drm_i915_private *dev_priv =
4296                 to_i915(plane_state->uapi.plane->dev);
4297         const struct drm_framebuffer *fb = plane_state->hw.fb;
4298         unsigned int rotation = plane_state->hw.rotation;
4299         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4300         u32 plane_ctl;
4301
4302         plane_ctl = PLANE_CTL_ENABLE;
4303
4304         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4305                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4306                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4307
4308                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4309                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4310
4311                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4312                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4313         }
4314
4315         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4316         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4317         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4318
4319         if (INTEL_GEN(dev_priv) >= 10)
4320                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4321                                                 DRM_MODE_REFLECT_MASK);
4322
4323         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4324                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4325         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4326                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4327
4328         return plane_ctl;
4329 }
4330
4331 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4332 {
4333         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4334         u32 plane_color_ctl = 0;
4335
4336         if (INTEL_GEN(dev_priv) >= 11)
4337                 return plane_color_ctl;
4338
4339         if (crtc_state->gamma_enable)
4340                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4341
4342         if (crtc_state->csc_enable)
4343                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4344
4345         return plane_color_ctl;
4346 }
4347
4348 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4349                         const struct intel_plane_state *plane_state)
4350 {
4351         struct drm_i915_private *dev_priv =
4352                 to_i915(plane_state->uapi.plane->dev);
4353         const struct drm_framebuffer *fb = plane_state->hw.fb;
4354         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4355         u32 plane_color_ctl = 0;
4356
4357         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4358         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4359
4360         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4361                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4362                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4363                 else
4364                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4365
4366                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4367                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4368         } else if (fb->format->is_yuv) {
4369                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4370         }
4371
4372         return plane_color_ctl;
4373 }
4374
4375 static int
4376 __intel_display_resume(struct drm_device *dev,
4377                        struct drm_atomic_state *state,
4378                        struct drm_modeset_acquire_ctx *ctx)
4379 {
4380         struct drm_crtc_state *crtc_state;
4381         struct drm_crtc *crtc;
4382         int i, ret;
4383
4384         intel_modeset_setup_hw_state(dev, ctx);
4385         intel_vga_redisable(to_i915(dev));
4386
4387         if (!state)
4388                 return 0;
4389
4390         /*
4391          * We've duplicated the state, pointers to the old state are invalid.
4392          *
4393          * Don't attempt to use the old state until we commit the duplicated state.
4394          */
4395         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4396                 /*
4397                  * Force recalculation even if we restore
4398                  * current state. With fast modeset this may not result
4399                  * in a modeset when the state is compatible.
4400                  */
4401                 crtc_state->mode_changed = true;
4402         }
4403
4404         /* ignore any reset values/BIOS leftovers in the WM registers */
4405         if (!HAS_GMCH(to_i915(dev)))
4406                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4407
4408         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4409
4410         WARN_ON(ret == -EDEADLK);
4411         return ret;
4412 }
4413
4414 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4415 {
4416         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4417                 intel_has_gpu_reset(&dev_priv->gt));
4418 }
4419
4420 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4421 {
4422         struct drm_device *dev = &dev_priv->drm;
4423         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4424         struct drm_atomic_state *state;
4425         int ret;
4426
4427         /* reset doesn't touch the display */
4428         if (!i915_modparams.force_reset_modeset_test &&
4429             !gpu_reset_clobbers_display(dev_priv))
4430                 return;
4431
4432         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4433         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4434         smp_mb__after_atomic();
4435         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4436
4437         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4438                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4439                 intel_gt_set_wedged(&dev_priv->gt);
4440         }
4441
4442         /*
4443          * Need mode_config.mutex so that we don't
4444          * trample ongoing ->detect() and whatnot.
4445          */
4446         mutex_lock(&dev->mode_config.mutex);
4447         drm_modeset_acquire_init(ctx, 0);
4448         while (1) {
4449                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4450                 if (ret != -EDEADLK)
4451                         break;
4452
4453                 drm_modeset_backoff(ctx);
4454         }
4455         /*
4456          * Disabling the crtcs gracefully seems nicer. Also the
4457          * g33 docs say we should at least disable all the planes.
4458          */
4459         state = drm_atomic_helper_duplicate_state(dev, ctx);
4460         if (IS_ERR(state)) {
4461                 ret = PTR_ERR(state);
4462                 DRM_ERROR("Duplicating state failed with %i\n", ret);
4463                 return;
4464         }
4465
4466         ret = drm_atomic_helper_disable_all(dev, ctx);
4467         if (ret) {
4468                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4469                 drm_atomic_state_put(state);
4470                 return;
4471         }
4472
4473         dev_priv->modeset_restore_state = state;
4474         state->acquire_ctx = ctx;
4475 }
4476
4477 void intel_finish_reset(struct drm_i915_private *dev_priv)
4478 {
4479         struct drm_device *dev = &dev_priv->drm;
4480         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4481         struct drm_atomic_state *state;
4482         int ret;
4483
4484         /* reset doesn't touch the display */
4485         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4486                 return;
4487
4488         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4489         if (!state)
4490                 goto unlock;
4491
4492         /* reset doesn't touch the display */
4493         if (!gpu_reset_clobbers_display(dev_priv)) {
4494                 /* for testing only restore the display */
4495                 ret = __intel_display_resume(dev, state, ctx);
4496                 if (ret)
4497                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4498         } else {
4499                 /*
4500                  * The display has been reset as well,
4501                  * so need a full re-initialization.
4502                  */
4503                 intel_pps_unlock_regs_wa(dev_priv);
4504                 intel_modeset_init_hw(dev_priv);
4505                 intel_init_clock_gating(dev_priv);
4506
4507                 spin_lock_irq(&dev_priv->irq_lock);
4508                 if (dev_priv->display.hpd_irq_setup)
4509                         dev_priv->display.hpd_irq_setup(dev_priv);
4510                 spin_unlock_irq(&dev_priv->irq_lock);
4511
4512                 ret = __intel_display_resume(dev, state, ctx);
4513                 if (ret)
4514                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4515
4516                 intel_hpd_init(dev_priv);
4517         }
4518
4519         drm_atomic_state_put(state);
4520 unlock:
4521         drm_modeset_drop_locks(ctx);
4522         drm_modeset_acquire_fini(ctx);
4523         mutex_unlock(&dev->mode_config.mutex);
4524
4525         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4526 }
4527
4528 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4529 {
4530         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4531         enum pipe pipe = crtc->pipe;
4532         u32 tmp;
4533
4534         tmp = I915_READ(PIPE_CHICKEN(pipe));
4535
4536         /*
4537          * Display WA #1153: icl
4538          * enable hardware to bypass the alpha math
4539          * and rounding for per-pixel values 00 and 0xff
4540          */
4541         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4542         /*
4543          * Display WA # 1605353570: icl
4544          * Set the pixel rounding bit to 1 for allowing
4545          * passthrough of Frame buffer pixels unmodified
4546          * across pipe
4547          */
4548         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4549         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4550 }
4551
4552 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4553 {
4554         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4555         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4556         u32 trans_ddi_func_ctl2_val;
4557         u8 master_select;
4558
4559         /*
4560          * Configure the master select and enable Transcoder Port Sync for
4561          * Slave CRTCs transcoder.
4562          */
4563         if (crtc_state->master_transcoder == INVALID_TRANSCODER)
4564                 return;
4565
4566         if (crtc_state->master_transcoder == TRANSCODER_EDP)
4567                 master_select = 0;
4568         else
4569                 master_select = crtc_state->master_transcoder + 1;
4570
4571         /* Set the master select bits for Tranascoder Port Sync */
4572         trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
4573                                    PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
4574                 PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
4575         /* Enable Transcoder Port Sync */
4576         trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
4577
4578         I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
4579                    trans_ddi_func_ctl2_val);
4580 }
4581
4582 static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
4583 {
4584         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4585         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4586         i915_reg_t reg;
4587         u32 trans_ddi_func_ctl2_val;
4588
4589         if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
4590                 return;
4591
4592         DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
4593                       transcoder_name(old_crtc_state->cpu_transcoder));
4594
4595         reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
4596         trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
4597                                     PORT_SYNC_MODE_MASTER_SELECT_MASK);
4598         I915_WRITE(reg, trans_ddi_func_ctl2_val);
4599 }
4600
4601 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4602 {
4603         struct drm_device *dev = crtc->base.dev;
4604         struct drm_i915_private *dev_priv = to_i915(dev);
4605         enum pipe pipe = crtc->pipe;
4606         i915_reg_t reg;
4607         u32 temp;
4608
4609         /* enable normal train */
4610         reg = FDI_TX_CTL(pipe);
4611         temp = I915_READ(reg);
4612         if (IS_IVYBRIDGE(dev_priv)) {
4613                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4614                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4615         } else {
4616                 temp &= ~FDI_LINK_TRAIN_NONE;
4617                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4618         }
4619         I915_WRITE(reg, temp);
4620
4621         reg = FDI_RX_CTL(pipe);
4622         temp = I915_READ(reg);
4623         if (HAS_PCH_CPT(dev_priv)) {
4624                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4625                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4626         } else {
4627                 temp &= ~FDI_LINK_TRAIN_NONE;
4628                 temp |= FDI_LINK_TRAIN_NONE;
4629         }
4630         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4631
4632         /* wait one idle pattern time */
4633         POSTING_READ(reg);
4634         udelay(1000);
4635
4636         /* IVB wants error correction enabled */
4637         if (IS_IVYBRIDGE(dev_priv))
4638                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4639                            FDI_FE_ERRC_ENABLE);
4640 }
4641
4642 /* The FDI link training functions for ILK/Ibexpeak. */
4643 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4644                                     const struct intel_crtc_state *crtc_state)
4645 {
4646         struct drm_device *dev = crtc->base.dev;
4647         struct drm_i915_private *dev_priv = to_i915(dev);
4648         enum pipe pipe = crtc->pipe;
4649         i915_reg_t reg;
4650         u32 temp, tries;
4651
4652         /* FDI needs bits from pipe first */
4653         assert_pipe_enabled(dev_priv, pipe);
4654
4655         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4656            for train result */
4657         reg = FDI_RX_IMR(pipe);
4658         temp = I915_READ(reg);
4659         temp &= ~FDI_RX_SYMBOL_LOCK;
4660         temp &= ~FDI_RX_BIT_LOCK;
4661         I915_WRITE(reg, temp);
4662         I915_READ(reg);
4663         udelay(150);
4664
4665         /* enable CPU FDI TX and PCH FDI RX */
4666         reg = FDI_TX_CTL(pipe);
4667         temp = I915_READ(reg);
4668         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4669         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4670         temp &= ~FDI_LINK_TRAIN_NONE;
4671         temp |= FDI_LINK_TRAIN_PATTERN_1;
4672         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4673
4674         reg = FDI_RX_CTL(pipe);
4675         temp = I915_READ(reg);
4676         temp &= ~FDI_LINK_TRAIN_NONE;
4677         temp |= FDI_LINK_TRAIN_PATTERN_1;
4678         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4679
4680         POSTING_READ(reg);
4681         udelay(150);
4682
4683         /* Ironlake workaround, enable clock pointer after FDI enable*/
4684         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4685         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4686                    FDI_RX_PHASE_SYNC_POINTER_EN);
4687
4688         reg = FDI_RX_IIR(pipe);
4689         for (tries = 0; tries < 5; tries++) {
4690                 temp = I915_READ(reg);
4691                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4692
4693                 if ((temp & FDI_RX_BIT_LOCK)) {
4694                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4695                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4696                         break;
4697                 }
4698         }
4699         if (tries == 5)
4700                 DRM_ERROR("FDI train 1 fail!\n");
4701
4702         /* Train 2 */
4703         reg = FDI_TX_CTL(pipe);
4704         temp = I915_READ(reg);
4705         temp &= ~FDI_LINK_TRAIN_NONE;
4706         temp |= FDI_LINK_TRAIN_PATTERN_2;
4707         I915_WRITE(reg, temp);
4708
4709         reg = FDI_RX_CTL(pipe);
4710         temp = I915_READ(reg);
4711         temp &= ~FDI_LINK_TRAIN_NONE;
4712         temp |= FDI_LINK_TRAIN_PATTERN_2;
4713         I915_WRITE(reg, temp);
4714
4715         POSTING_READ(reg);
4716         udelay(150);
4717
4718         reg = FDI_RX_IIR(pipe);
4719         for (tries = 0; tries < 5; tries++) {
4720                 temp = I915_READ(reg);
4721                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4722
4723                 if (temp & FDI_RX_SYMBOL_LOCK) {
4724                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4725                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4726                         break;
4727                 }
4728         }
4729         if (tries == 5)
4730                 DRM_ERROR("FDI train 2 fail!\n");
4731
4732         DRM_DEBUG_KMS("FDI train done\n");
4733
4734 }
4735
4736 static const int snb_b_fdi_train_param[] = {
4737         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4738         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4739         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4740         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4741 };
4742
4743 /* The FDI link training functions for SNB/Cougarpoint. */
4744 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4745                                 const struct intel_crtc_state *crtc_state)
4746 {
4747         struct drm_device *dev = crtc->base.dev;
4748         struct drm_i915_private *dev_priv = to_i915(dev);
4749         enum pipe pipe = crtc->pipe;
4750         i915_reg_t reg;
4751         u32 temp, i, retry;
4752
4753         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4754            for train result */
4755         reg = FDI_RX_IMR(pipe);
4756         temp = I915_READ(reg);
4757         temp &= ~FDI_RX_SYMBOL_LOCK;
4758         temp &= ~FDI_RX_BIT_LOCK;
4759         I915_WRITE(reg, temp);
4760
4761         POSTING_READ(reg);
4762         udelay(150);
4763
4764         /* enable CPU FDI TX and PCH FDI RX */
4765         reg = FDI_TX_CTL(pipe);
4766         temp = I915_READ(reg);
4767         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4768         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4769         temp &= ~FDI_LINK_TRAIN_NONE;
4770         temp |= FDI_LINK_TRAIN_PATTERN_1;
4771         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4772         /* SNB-B */
4773         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4774         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4775
4776         I915_WRITE(FDI_RX_MISC(pipe),
4777                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4778
4779         reg = FDI_RX_CTL(pipe);
4780         temp = I915_READ(reg);
4781         if (HAS_PCH_CPT(dev_priv)) {
4782                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4783                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4784         } else {
4785                 temp &= ~FDI_LINK_TRAIN_NONE;
4786                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4787         }
4788         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4789
4790         POSTING_READ(reg);
4791         udelay(150);
4792
4793         for (i = 0; i < 4; i++) {
4794                 reg = FDI_TX_CTL(pipe);
4795                 temp = I915_READ(reg);
4796                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4797                 temp |= snb_b_fdi_train_param[i];
4798                 I915_WRITE(reg, temp);
4799
4800                 POSTING_READ(reg);
4801                 udelay(500);
4802
4803                 for (retry = 0; retry < 5; retry++) {
4804                         reg = FDI_RX_IIR(pipe);
4805                         temp = I915_READ(reg);
4806                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4807                         if (temp & FDI_RX_BIT_LOCK) {
4808                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4809                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4810                                 break;
4811                         }
4812                         udelay(50);
4813                 }
4814                 if (retry < 5)
4815                         break;
4816         }
4817         if (i == 4)
4818                 DRM_ERROR("FDI train 1 fail!\n");
4819
4820         /* Train 2 */
4821         reg = FDI_TX_CTL(pipe);
4822         temp = I915_READ(reg);
4823         temp &= ~FDI_LINK_TRAIN_NONE;
4824         temp |= FDI_LINK_TRAIN_PATTERN_2;
4825         if (IS_GEN(dev_priv, 6)) {
4826                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4827                 /* SNB-B */
4828                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4829         }
4830         I915_WRITE(reg, temp);
4831
4832         reg = FDI_RX_CTL(pipe);
4833         temp = I915_READ(reg);
4834         if (HAS_PCH_CPT(dev_priv)) {
4835                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4836                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4837         } else {
4838                 temp &= ~FDI_LINK_TRAIN_NONE;
4839                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4840         }
4841         I915_WRITE(reg, temp);
4842
4843         POSTING_READ(reg);
4844         udelay(150);
4845
4846         for (i = 0; i < 4; i++) {
4847                 reg = FDI_TX_CTL(pipe);
4848                 temp = I915_READ(reg);
4849                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4850                 temp |= snb_b_fdi_train_param[i];
4851                 I915_WRITE(reg, temp);
4852
4853                 POSTING_READ(reg);
4854                 udelay(500);
4855
4856                 for (retry = 0; retry < 5; retry++) {
4857                         reg = FDI_RX_IIR(pipe);
4858                         temp = I915_READ(reg);
4859                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4860                         if (temp & FDI_RX_SYMBOL_LOCK) {
4861                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4862                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4863                                 break;
4864                         }
4865                         udelay(50);
4866                 }
4867                 if (retry < 5)
4868                         break;
4869         }
4870         if (i == 4)
4871                 DRM_ERROR("FDI train 2 fail!\n");
4872
4873         DRM_DEBUG_KMS("FDI train done.\n");
4874 }
4875
4876 /* Manual link training for Ivy Bridge A0 parts */
4877 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4878                                       const struct intel_crtc_state *crtc_state)
4879 {
4880         struct drm_device *dev = crtc->base.dev;
4881         struct drm_i915_private *dev_priv = to_i915(dev);
4882         enum pipe pipe = crtc->pipe;
4883         i915_reg_t reg;
4884         u32 temp, i, j;
4885
4886         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4887            for train result */
4888         reg = FDI_RX_IMR(pipe);
4889         temp = I915_READ(reg);
4890         temp &= ~FDI_RX_SYMBOL_LOCK;
4891         temp &= ~FDI_RX_BIT_LOCK;
4892         I915_WRITE(reg, temp);
4893
4894         POSTING_READ(reg);
4895         udelay(150);
4896
4897         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4898                       I915_READ(FDI_RX_IIR(pipe)));
4899
4900         /* Try each vswing and preemphasis setting twice before moving on */
4901         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4902                 /* disable first in case we need to retry */
4903                 reg = FDI_TX_CTL(pipe);
4904                 temp = I915_READ(reg);
4905                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4906                 temp &= ~FDI_TX_ENABLE;
4907                 I915_WRITE(reg, temp);
4908
4909                 reg = FDI_RX_CTL(pipe);
4910                 temp = I915_READ(reg);
4911                 temp &= ~FDI_LINK_TRAIN_AUTO;
4912                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4913                 temp &= ~FDI_RX_ENABLE;
4914                 I915_WRITE(reg, temp);
4915
4916                 /* enable CPU FDI TX and PCH FDI RX */
4917                 reg = FDI_TX_CTL(pipe);
4918                 temp = I915_READ(reg);
4919                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4920                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4921                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4922                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4923                 temp |= snb_b_fdi_train_param[j/2];
4924                 temp |= FDI_COMPOSITE_SYNC;
4925                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4926
4927                 I915_WRITE(FDI_RX_MISC(pipe),
4928                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4929
4930                 reg = FDI_RX_CTL(pipe);
4931                 temp = I915_READ(reg);
4932                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4933                 temp |= FDI_COMPOSITE_SYNC;
4934                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4935
4936                 POSTING_READ(reg);
4937                 udelay(1); /* should be 0.5us */
4938
4939                 for (i = 0; i < 4; i++) {
4940                         reg = FDI_RX_IIR(pipe);
4941                         temp = I915_READ(reg);
4942                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4943
4944                         if (temp & FDI_RX_BIT_LOCK ||
4945                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4946                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4947                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4948                                               i);
4949                                 break;
4950                         }
4951                         udelay(1); /* should be 0.5us */
4952                 }
4953                 if (i == 4) {
4954                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4955                         continue;
4956                 }
4957
4958                 /* Train 2 */
4959                 reg = FDI_TX_CTL(pipe);
4960                 temp = I915_READ(reg);
4961                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4962                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4963                 I915_WRITE(reg, temp);
4964
4965                 reg = FDI_RX_CTL(pipe);
4966                 temp = I915_READ(reg);
4967                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4968                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4969                 I915_WRITE(reg, temp);
4970
4971                 POSTING_READ(reg);
4972                 udelay(2); /* should be 1.5us */
4973
4974                 for (i = 0; i < 4; i++) {
4975                         reg = FDI_RX_IIR(pipe);
4976                         temp = I915_READ(reg);
4977                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4978
4979                         if (temp & FDI_RX_SYMBOL_LOCK ||
4980                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4981                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4982                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4983                                               i);
4984                                 goto train_done;
4985                         }
4986                         udelay(2); /* should be 1.5us */
4987                 }
4988                 if (i == 4)
4989                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4990         }
4991
4992 train_done:
4993         DRM_DEBUG_KMS("FDI train done.\n");
4994 }
4995
4996 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4997 {
4998         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4999         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5000         enum pipe pipe = intel_crtc->pipe;
5001         i915_reg_t reg;
5002         u32 temp;
5003
5004         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5005         reg = FDI_RX_CTL(pipe);
5006         temp = I915_READ(reg);
5007         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5008         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5009         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5010         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
5011
5012         POSTING_READ(reg);
5013         udelay(200);
5014
5015         /* Switch from Rawclk to PCDclk */
5016         temp = I915_READ(reg);
5017         I915_WRITE(reg, temp | FDI_PCDCLK);
5018
5019         POSTING_READ(reg);
5020         udelay(200);
5021
5022         /* Enable CPU FDI TX PLL, always on for Ironlake */
5023         reg = FDI_TX_CTL(pipe);
5024         temp = I915_READ(reg);
5025         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5026                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5027
5028                 POSTING_READ(reg);
5029                 udelay(100);
5030         }
5031 }
5032
5033 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
5034 {
5035         struct drm_device *dev = intel_crtc->base.dev;
5036         struct drm_i915_private *dev_priv = to_i915(dev);
5037         enum pipe pipe = intel_crtc->pipe;
5038         i915_reg_t reg;
5039         u32 temp;
5040
5041         /* Switch from PCDclk to Rawclk */
5042         reg = FDI_RX_CTL(pipe);
5043         temp = I915_READ(reg);
5044         I915_WRITE(reg, temp & ~FDI_PCDCLK);
5045
5046         /* Disable CPU FDI TX PLL */
5047         reg = FDI_TX_CTL(pipe);
5048         temp = I915_READ(reg);
5049         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
5050
5051         POSTING_READ(reg);
5052         udelay(100);
5053
5054         reg = FDI_RX_CTL(pipe);
5055         temp = I915_READ(reg);
5056         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
5057
5058         /* Wait for the clocks to turn off. */
5059         POSTING_READ(reg);
5060         udelay(100);
5061 }
5062
5063 static void ironlake_fdi_disable(struct drm_crtc *crtc)
5064 {
5065         struct drm_device *dev = crtc->dev;
5066         struct drm_i915_private *dev_priv = to_i915(dev);
5067         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5068         enum pipe pipe = intel_crtc->pipe;
5069         i915_reg_t reg;
5070         u32 temp;
5071
5072         /* disable CPU FDI tx and PCH FDI rx */
5073         reg = FDI_TX_CTL(pipe);
5074         temp = I915_READ(reg);
5075         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
5076         POSTING_READ(reg);
5077
5078         reg = FDI_RX_CTL(pipe);
5079         temp = I915_READ(reg);
5080         temp &= ~(0x7 << 16);
5081         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5082         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
5083
5084         POSTING_READ(reg);
5085         udelay(100);
5086
5087         /* Ironlake workaround, disable clock pointer after downing FDI */
5088         if (HAS_PCH_IBX(dev_priv))
5089                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
5090
5091         /* still set train pattern 1 */
5092         reg = FDI_TX_CTL(pipe);
5093         temp = I915_READ(reg);
5094         temp &= ~FDI_LINK_TRAIN_NONE;
5095         temp |= FDI_LINK_TRAIN_PATTERN_1;
5096         I915_WRITE(reg, temp);
5097
5098         reg = FDI_RX_CTL(pipe);
5099         temp = I915_READ(reg);
5100         if (HAS_PCH_CPT(dev_priv)) {
5101                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5102                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5103         } else {
5104                 temp &= ~FDI_LINK_TRAIN_NONE;
5105                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5106         }
5107         /* BPC in FDI rx is consistent with that in PIPECONF */
5108         temp &= ~(0x07 << 16);
5109         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5110         I915_WRITE(reg, temp);
5111
5112         POSTING_READ(reg);
5113         udelay(100);
5114 }
5115
5116 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5117 {
5118         struct drm_crtc *crtc;
5119         bool cleanup_done;
5120
5121         drm_for_each_crtc(crtc, &dev_priv->drm) {
5122                 struct drm_crtc_commit *commit;
5123                 spin_lock(&crtc->commit_lock);
5124                 commit = list_first_entry_or_null(&crtc->commit_list,
5125                                                   struct drm_crtc_commit, commit_entry);
5126                 cleanup_done = commit ?
5127                         try_wait_for_completion(&commit->cleanup_done) : true;
5128                 spin_unlock(&crtc->commit_lock);
5129
5130                 if (cleanup_done)
5131                         continue;
5132
5133                 drm_crtc_wait_one_vblank(crtc);
5134
5135                 return true;
5136         }
5137
5138         return false;
5139 }
5140
5141 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5142 {
5143         u32 temp;
5144
5145         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
5146
5147         mutex_lock(&dev_priv->sb_lock);
5148
5149         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5150         temp |= SBI_SSCCTL_DISABLE;
5151         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5152
5153         mutex_unlock(&dev_priv->sb_lock);
5154 }
5155
5156 /* Program iCLKIP clock to the desired frequency */
5157 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5158 {
5159         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5160         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5161         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5162         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5163         u32 temp;
5164
5165         lpt_disable_iclkip(dev_priv);
5166
5167         /* The iCLK virtual clock root frequency is in MHz,
5168          * but the adjusted_mode->crtc_clock in in KHz. To get the
5169          * divisors, it is necessary to divide one by another, so we
5170          * convert the virtual clock precision to KHz here for higher
5171          * precision.
5172          */
5173         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5174                 u32 iclk_virtual_root_freq = 172800 * 1000;
5175                 u32 iclk_pi_range = 64;
5176                 u32 desired_divisor;
5177
5178                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5179                                                     clock << auxdiv);
5180                 divsel = (desired_divisor / iclk_pi_range) - 2;
5181                 phaseinc = desired_divisor % iclk_pi_range;
5182
5183                 /*
5184                  * Near 20MHz is a corner case which is
5185                  * out of range for the 7-bit divisor
5186                  */
5187                 if (divsel <= 0x7f)
5188                         break;
5189         }
5190
5191         /* This should not happen with any sane values */
5192         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5193                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5194         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5195                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5196
5197         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5198                         clock,
5199                         auxdiv,
5200                         divsel,
5201                         phasedir,
5202                         phaseinc);
5203
5204         mutex_lock(&dev_priv->sb_lock);
5205
5206         /* Program SSCDIVINTPHASE6 */
5207         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5208         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5209         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5210         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5211         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5212         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5213         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5214         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5215
5216         /* Program SSCAUXDIV */
5217         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5218         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5219         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5220         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5221
5222         /* Enable modulator and associated divider */
5223         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5224         temp &= ~SBI_SSCCTL_DISABLE;
5225         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5226
5227         mutex_unlock(&dev_priv->sb_lock);
5228
5229         /* Wait for initialization time */
5230         udelay(24);
5231
5232         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5233 }
5234
5235 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5236 {
5237         u32 divsel, phaseinc, auxdiv;
5238         u32 iclk_virtual_root_freq = 172800 * 1000;
5239         u32 iclk_pi_range = 64;
5240         u32 desired_divisor;
5241         u32 temp;
5242
5243         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5244                 return 0;
5245
5246         mutex_lock(&dev_priv->sb_lock);
5247
5248         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5249         if (temp & SBI_SSCCTL_DISABLE) {
5250                 mutex_unlock(&dev_priv->sb_lock);
5251                 return 0;
5252         }
5253
5254         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5255         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5256                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5257         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5258                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5259
5260         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5261         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5262                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5263
5264         mutex_unlock(&dev_priv->sb_lock);
5265
5266         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5267
5268         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5269                                  desired_divisor << auxdiv);
5270 }
5271
5272 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5273                                                 enum pipe pch_transcoder)
5274 {
5275         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5276         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5277         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5278
5279         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5280                    I915_READ(HTOTAL(cpu_transcoder)));
5281         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5282                    I915_READ(HBLANK(cpu_transcoder)));
5283         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5284                    I915_READ(HSYNC(cpu_transcoder)));
5285
5286         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5287                    I915_READ(VTOTAL(cpu_transcoder)));
5288         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5289                    I915_READ(VBLANK(cpu_transcoder)));
5290         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5291                    I915_READ(VSYNC(cpu_transcoder)));
5292         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5293                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
5294 }
5295
5296 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5297 {
5298         u32 temp;
5299
5300         temp = I915_READ(SOUTH_CHICKEN1);
5301         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5302                 return;
5303
5304         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5305         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5306
5307         temp &= ~FDI_BC_BIFURCATION_SELECT;
5308         if (enable)
5309                 temp |= FDI_BC_BIFURCATION_SELECT;
5310
5311         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5312         I915_WRITE(SOUTH_CHICKEN1, temp);
5313         POSTING_READ(SOUTH_CHICKEN1);
5314 }
5315
5316 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5317 {
5318         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5319         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5320
5321         switch (crtc->pipe) {
5322         case PIPE_A:
5323                 break;
5324         case PIPE_B:
5325                 if (crtc_state->fdi_lanes > 2)
5326                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5327                 else
5328                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5329
5330                 break;
5331         case PIPE_C:
5332                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5333
5334                 break;
5335         default:
5336                 BUG();
5337         }
5338 }
5339
5340 /*
5341  * Finds the encoder associated with the given CRTC. This can only be
5342  * used when we know that the CRTC isn't feeding multiple encoders!
5343  */
5344 static struct intel_encoder *
5345 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5346                            const struct intel_crtc_state *crtc_state)
5347 {
5348         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5349         const struct drm_connector_state *connector_state;
5350         const struct drm_connector *connector;
5351         struct intel_encoder *encoder = NULL;
5352         int num_encoders = 0;
5353         int i;
5354
5355         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5356                 if (connector_state->crtc != &crtc->base)
5357                         continue;
5358
5359                 encoder = to_intel_encoder(connector_state->best_encoder);
5360                 num_encoders++;
5361         }
5362
5363         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5364              num_encoders, pipe_name(crtc->pipe));
5365
5366         return encoder;
5367 }
5368
5369 /*
5370  * Enable PCH resources required for PCH ports:
5371  *   - PCH PLLs
5372  *   - FDI training & RX/TX
5373  *   - update transcoder timings
5374  *   - DP transcoding bits
5375  *   - transcoder
5376  */
5377 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5378                                 const struct intel_crtc_state *crtc_state)
5379 {
5380         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5381         struct drm_device *dev = crtc->base.dev;
5382         struct drm_i915_private *dev_priv = to_i915(dev);
5383         enum pipe pipe = crtc->pipe;
5384         u32 temp;
5385
5386         assert_pch_transcoder_disabled(dev_priv, pipe);
5387
5388         if (IS_IVYBRIDGE(dev_priv))
5389                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5390
5391         /* Write the TU size bits before fdi link training, so that error
5392          * detection works. */
5393         I915_WRITE(FDI_RX_TUSIZE1(pipe),
5394                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5395
5396         /* For PCH output, training FDI link */
5397         dev_priv->display.fdi_link_train(crtc, crtc_state);
5398
5399         /* We need to program the right clock selection before writing the pixel
5400          * mutliplier into the DPLL. */
5401         if (HAS_PCH_CPT(dev_priv)) {
5402                 u32 sel;
5403
5404                 temp = I915_READ(PCH_DPLL_SEL);
5405                 temp |= TRANS_DPLL_ENABLE(pipe);
5406                 sel = TRANS_DPLLB_SEL(pipe);
5407                 if (crtc_state->shared_dpll ==
5408                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5409                         temp |= sel;
5410                 else
5411                         temp &= ~sel;
5412                 I915_WRITE(PCH_DPLL_SEL, temp);
5413         }
5414
5415         /* XXX: pch pll's can be enabled any time before we enable the PCH
5416          * transcoder, and we actually should do this to not upset any PCH
5417          * transcoder that already use the clock when we share it.
5418          *
5419          * Note that enable_shared_dpll tries to do the right thing, but
5420          * get_shared_dpll unconditionally resets the pll - we need that to have
5421          * the right LVDS enable sequence. */
5422         intel_enable_shared_dpll(crtc_state);
5423
5424         /* set transcoder timing, panel must allow it */
5425         assert_panel_unlocked(dev_priv, pipe);
5426         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5427
5428         intel_fdi_normal_train(crtc);
5429
5430         /* For PCH DP, enable TRANS_DP_CTL */
5431         if (HAS_PCH_CPT(dev_priv) &&
5432             intel_crtc_has_dp_encoder(crtc_state)) {
5433                 const struct drm_display_mode *adjusted_mode =
5434                         &crtc_state->hw.adjusted_mode;
5435                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5436                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5437                 enum port port;
5438
5439                 temp = I915_READ(reg);
5440                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5441                           TRANS_DP_SYNC_MASK |
5442                           TRANS_DP_BPC_MASK);
5443                 temp |= TRANS_DP_OUTPUT_ENABLE;
5444                 temp |= bpc << 9; /* same format but at 11:9 */
5445
5446                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5447                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5448                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5449                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5450
5451                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5452                 WARN_ON(port < PORT_B || port > PORT_D);
5453                 temp |= TRANS_DP_PORT_SEL(port);
5454
5455                 I915_WRITE(reg, temp);
5456         }
5457
5458         ironlake_enable_pch_transcoder(crtc_state);
5459 }
5460
5461 static void lpt_pch_enable(const struct intel_atomic_state *state,
5462                            const struct intel_crtc_state *crtc_state)
5463 {
5464         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5465         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5466         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5467
5468         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5469
5470         lpt_program_iclkip(crtc_state);
5471
5472         /* Set transcoder timing. */
5473         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5474
5475         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5476 }
5477
5478 static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
5479 {
5480         struct drm_i915_private *dev_priv = to_i915(dev);
5481         i915_reg_t dslreg = PIPEDSL(pipe);
5482         u32 temp;
5483
5484         temp = I915_READ(dslreg);
5485         udelay(500);
5486         if (wait_for(I915_READ(dslreg) != temp, 5)) {
5487                 if (wait_for(I915_READ(dslreg) != temp, 5))
5488                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5489         }
5490 }
5491
5492 /*
5493  * The hardware phase 0.0 refers to the center of the pixel.
5494  * We want to start from the top/left edge which is phase
5495  * -0.5. That matches how the hardware calculates the scaling
5496  * factors (from top-left of the first pixel to bottom-right
5497  * of the last pixel, as opposed to the pixel centers).
5498  *
5499  * For 4:2:0 subsampled chroma planes we obviously have to
5500  * adjust that so that the chroma sample position lands in
5501  * the right spot.
5502  *
5503  * Note that for packed YCbCr 4:2:2 formats there is no way to
5504  * control chroma siting. The hardware simply replicates the
5505  * chroma samples for both of the luma samples, and thus we don't
5506  * actually get the expected MPEG2 chroma siting convention :(
5507  * The same behaviour is observed on pre-SKL platforms as well.
5508  *
5509  * Theory behind the formula (note that we ignore sub-pixel
5510  * source coordinates):
5511  * s = source sample position
5512  * d = destination sample position
5513  *
5514  * Downscaling 4:1:
5515  * -0.5
5516  * | 0.0
5517  * | |     1.5 (initial phase)
5518  * | |     |
5519  * v v     v
5520  * | s | s | s | s |
5521  * |       d       |
5522  *
5523  * Upscaling 1:4:
5524  * -0.5
5525  * | -0.375 (initial phase)
5526  * | |     0.0
5527  * | |     |
5528  * v v     v
5529  * |       s       |
5530  * | d | d | d | d |
5531  */
5532 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5533 {
5534         int phase = -0x8000;
5535         u16 trip = 0;
5536
5537         if (chroma_cosited)
5538                 phase += (sub - 1) * 0x8000 / sub;
5539
5540         phase += scale / (2 * sub);
5541
5542         /*
5543          * Hardware initial phase limited to [-0.5:1.5].
5544          * Since the max hardware scale factor is 3.0, we
5545          * should never actually excdeed 1.0 here.
5546          */
5547         WARN_ON(phase < -0x8000 || phase > 0x18000);
5548
5549         if (phase < 0)
5550                 phase = 0x10000 + phase;
5551         else
5552                 trip = PS_PHASE_TRIP;
5553
5554         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5555 }
5556
5557 #define SKL_MIN_SRC_W 8
5558 #define SKL_MAX_SRC_W 4096
5559 #define SKL_MIN_SRC_H 8
5560 #define SKL_MAX_SRC_H 4096
5561 #define SKL_MIN_DST_W 8
5562 #define SKL_MAX_DST_W 4096
5563 #define SKL_MIN_DST_H 8
5564 #define SKL_MAX_DST_H 4096
5565 #define ICL_MAX_SRC_W 5120
5566 #define ICL_MAX_SRC_H 4096
5567 #define ICL_MAX_DST_W 5120
5568 #define ICL_MAX_DST_H 4096
5569 #define SKL_MIN_YUV_420_SRC_W 16
5570 #define SKL_MIN_YUV_420_SRC_H 16
5571
5572 static int
5573 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5574                   unsigned int scaler_user, int *scaler_id,
5575                   int src_w, int src_h, int dst_w, int dst_h,
5576                   const struct drm_format_info *format, bool need_scaler)
5577 {
5578         struct intel_crtc_scaler_state *scaler_state =
5579                 &crtc_state->scaler_state;
5580         struct intel_crtc *intel_crtc =
5581                 to_intel_crtc(crtc_state->uapi.crtc);
5582         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5583         const struct drm_display_mode *adjusted_mode =
5584                 &crtc_state->hw.adjusted_mode;
5585
5586         /*
5587          * Src coordinates are already rotated by 270 degrees for
5588          * the 90/270 degree plane rotation cases (to match the
5589          * GTT mapping), hence no need to account for rotation here.
5590          */
5591         if (src_w != dst_w || src_h != dst_h)
5592                 need_scaler = true;
5593
5594         /*
5595          * Scaling/fitting not supported in IF-ID mode in GEN9+
5596          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5597          * Once NV12 is enabled, handle it here while allocating scaler
5598          * for NV12.
5599          */
5600         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
5601             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5602                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5603                 return -EINVAL;
5604         }
5605
5606         /*
5607          * if plane is being disabled or scaler is no more required or force detach
5608          *  - free scaler binded to this plane/crtc
5609          *  - in order to do this, update crtc->scaler_usage
5610          *
5611          * Here scaler state in crtc_state is set free so that
5612          * scaler can be assigned to other user. Actual register
5613          * update to free the scaler is done in plane/panel-fit programming.
5614          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5615          */
5616         if (force_detach || !need_scaler) {
5617                 if (*scaler_id >= 0) {
5618                         scaler_state->scaler_users &= ~(1 << scaler_user);
5619                         scaler_state->scalers[*scaler_id].in_use = 0;
5620
5621                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5622                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5623                                 intel_crtc->pipe, scaler_user, *scaler_id,
5624                                 scaler_state->scaler_users);
5625                         *scaler_id = -1;
5626                 }
5627                 return 0;
5628         }
5629
5630         if (format && drm_format_info_is_yuv_semiplanar(format) &&
5631             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5632                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5633                 return -EINVAL;
5634         }
5635
5636         /* range checks */
5637         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5638             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5639             (INTEL_GEN(dev_priv) >= 11 &&
5640              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5641               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5642             (INTEL_GEN(dev_priv) < 11 &&
5643              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5644               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5645                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5646                         "size is out of scaler range\n",
5647                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5648                 return -EINVAL;
5649         }
5650
5651         /* mark this plane as a scaler user in crtc_state */
5652         scaler_state->scaler_users |= (1 << scaler_user);
5653         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5654                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5655                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5656                 scaler_state->scaler_users);
5657
5658         return 0;
5659 }
5660
5661 /**
5662  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5663  *
5664  * @state: crtc's scaler state
5665  *
5666  * Return
5667  *     0 - scaler_usage updated successfully
5668  *    error - requested scaling cannot be supported or other error condition
5669  */
5670 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5671 {
5672         const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
5673         bool need_scaler = false;
5674
5675         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5676                 need_scaler = true;
5677
5678         return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
5679                                  &state->scaler_state.scaler_id,
5680                                  state->pipe_src_w, state->pipe_src_h,
5681                                  adjusted_mode->crtc_hdisplay,
5682                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5683 }
5684
5685 /**
5686  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5687  * @crtc_state: crtc's scaler state
5688  * @plane_state: atomic plane state to update
5689  *
5690  * Return
5691  *     0 - scaler_usage updated successfully
5692  *    error - requested scaling cannot be supported or other error condition
5693  */
5694 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5695                                    struct intel_plane_state *plane_state)
5696 {
5697         struct intel_plane *intel_plane =
5698                 to_intel_plane(plane_state->uapi.plane);
5699         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5700         struct drm_framebuffer *fb = plane_state->hw.fb;
5701         int ret;
5702         bool force_detach = !fb || !plane_state->uapi.visible;
5703         bool need_scaler = false;
5704
5705         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5706         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5707             fb && drm_format_info_is_yuv_semiplanar(fb->format))
5708                 need_scaler = true;
5709
5710         ret = skl_update_scaler(crtc_state, force_detach,
5711                                 drm_plane_index(&intel_plane->base),
5712                                 &plane_state->scaler_id,
5713                                 drm_rect_width(&plane_state->uapi.src) >> 16,
5714                                 drm_rect_height(&plane_state->uapi.src) >> 16,
5715                                 drm_rect_width(&plane_state->uapi.dst),
5716                                 drm_rect_height(&plane_state->uapi.dst),
5717                                 fb ? fb->format : NULL, need_scaler);
5718
5719         if (ret || plane_state->scaler_id < 0)
5720                 return ret;
5721
5722         /* check colorkey */
5723         if (plane_state->ckey.flags) {
5724                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5725                               intel_plane->base.base.id,
5726                               intel_plane->base.name);
5727                 return -EINVAL;
5728         }
5729
5730         /* Check src format */
5731         switch (fb->format->format) {
5732         case DRM_FORMAT_RGB565:
5733         case DRM_FORMAT_XBGR8888:
5734         case DRM_FORMAT_XRGB8888:
5735         case DRM_FORMAT_ABGR8888:
5736         case DRM_FORMAT_ARGB8888:
5737         case DRM_FORMAT_XRGB2101010:
5738         case DRM_FORMAT_XBGR2101010:
5739         case DRM_FORMAT_ARGB2101010:
5740         case DRM_FORMAT_ABGR2101010:
5741         case DRM_FORMAT_YUYV:
5742         case DRM_FORMAT_YVYU:
5743         case DRM_FORMAT_UYVY:
5744         case DRM_FORMAT_VYUY:
5745         case DRM_FORMAT_NV12:
5746         case DRM_FORMAT_P010:
5747         case DRM_FORMAT_P012:
5748         case DRM_FORMAT_P016:
5749         case DRM_FORMAT_Y210:
5750         case DRM_FORMAT_Y212:
5751         case DRM_FORMAT_Y216:
5752         case DRM_FORMAT_XVYU2101010:
5753         case DRM_FORMAT_XVYU12_16161616:
5754         case DRM_FORMAT_XVYU16161616:
5755                 break;
5756         case DRM_FORMAT_XBGR16161616F:
5757         case DRM_FORMAT_ABGR16161616F:
5758         case DRM_FORMAT_XRGB16161616F:
5759         case DRM_FORMAT_ARGB16161616F:
5760                 if (INTEL_GEN(dev_priv) >= 11)
5761                         break;
5762                 /* fall through */
5763         default:
5764                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5765                               intel_plane->base.base.id, intel_plane->base.name,
5766                               fb->base.id, fb->format->format);
5767                 return -EINVAL;
5768         }
5769
5770         return 0;
5771 }
5772
5773 static void skylake_scaler_disable(struct intel_crtc *crtc)
5774 {
5775         int i;
5776
5777         for (i = 0; i < crtc->num_scalers; i++)
5778                 skl_detach_scaler(crtc, i);
5779 }
5780
5781 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5782 {
5783         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5784         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5785         enum pipe pipe = crtc->pipe;
5786         const struct intel_crtc_scaler_state *scaler_state =
5787                 &crtc_state->scaler_state;
5788
5789         if (crtc_state->pch_pfit.enabled) {
5790                 u16 uv_rgb_hphase, uv_rgb_vphase;
5791                 int pfit_w, pfit_h, hscale, vscale;
5792                 int id;
5793
5794                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5795                         return;
5796
5797                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5798                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5799
5800                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5801                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5802
5803                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5804                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5805
5806                 id = scaler_state->scaler_id;
5807                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5808                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5809                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5810                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5811                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5812                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5813                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5814                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5815         }
5816 }
5817
5818 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5819 {
5820         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5821         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5822         enum pipe pipe = crtc->pipe;
5823
5824         if (crtc_state->pch_pfit.enabled) {
5825                 /* Force use of hard-coded filter coefficients
5826                  * as some pre-programmed values are broken,
5827                  * e.g. x201.
5828                  */
5829                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5830                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5831                                                  PF_PIPE_SEL_IVB(pipe));
5832                 else
5833                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5834                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5835                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5836         }
5837 }
5838
5839 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5840 {
5841         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5842         struct drm_device *dev = crtc->base.dev;
5843         struct drm_i915_private *dev_priv = to_i915(dev);
5844
5845         if (!crtc_state->ips_enabled)
5846                 return;
5847
5848         /*
5849          * We can only enable IPS after we enable a plane and wait for a vblank
5850          * This function is called from post_plane_update, which is run after
5851          * a vblank wait.
5852          */
5853         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5854
5855         if (IS_BROADWELL(dev_priv)) {
5856                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5857                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5858                 /* Quoting Art Runyan: "its not safe to expect any particular
5859                  * value in IPS_CTL bit 31 after enabling IPS through the
5860                  * mailbox." Moreover, the mailbox may return a bogus state,
5861                  * so we need to just enable it and continue on.
5862                  */
5863         } else {
5864                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5865                 /* The bit only becomes 1 in the next vblank, so this wait here
5866                  * is essentially intel_wait_for_vblank. If we don't have this
5867                  * and don't wait for vblanks until the end of crtc_enable, then
5868                  * the HW state readout code will complain that the expected
5869                  * IPS_CTL value is not the one we read. */
5870                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5871                         DRM_ERROR("Timed out waiting for IPS enable\n");
5872         }
5873 }
5874
5875 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5876 {
5877         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5878         struct drm_device *dev = crtc->base.dev;
5879         struct drm_i915_private *dev_priv = to_i915(dev);
5880
5881         if (!crtc_state->ips_enabled)
5882                 return;
5883
5884         if (IS_BROADWELL(dev_priv)) {
5885                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5886                 /*
5887                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5888                  * 42ms timeout value leads to occasional timeouts so use 100ms
5889                  * instead.
5890                  */
5891                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5892                         DRM_ERROR("Timed out waiting for IPS disable\n");
5893         } else {
5894                 I915_WRITE(IPS_CTL, 0);
5895                 POSTING_READ(IPS_CTL);
5896         }
5897
5898         /* We need to wait for a vblank before we can disable the plane. */
5899         intel_wait_for_vblank(dev_priv, crtc->pipe);
5900 }
5901
5902 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5903 {
5904         if (intel_crtc->overlay)
5905                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5906
5907         /* Let userspace switch the overlay on again. In most cases userspace
5908          * has to recompute where to put it anyway.
5909          */
5910 }
5911
5912 /**
5913  * intel_post_enable_primary - Perform operations after enabling primary plane
5914  * @crtc: the CRTC whose primary plane was just enabled
5915  * @new_crtc_state: the enabling state
5916  *
5917  * Performs potentially sleeping operations that must be done after the primary
5918  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5919  * called due to an explicit primary plane update, or due to an implicit
5920  * re-enable that is caused when a sprite plane is updated to no longer
5921  * completely hide the primary plane.
5922  */
5923 static void
5924 intel_post_enable_primary(struct drm_crtc *crtc,
5925                           const struct intel_crtc_state *new_crtc_state)
5926 {
5927         struct drm_device *dev = crtc->dev;
5928         struct drm_i915_private *dev_priv = to_i915(dev);
5929         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5930         enum pipe pipe = intel_crtc->pipe;
5931
5932         /*
5933          * Gen2 reports pipe underruns whenever all planes are disabled.
5934          * So don't enable underrun reporting before at least some planes
5935          * are enabled.
5936          * FIXME: Need to fix the logic to work when we turn off all planes
5937          * but leave the pipe running.
5938          */
5939         if (IS_GEN(dev_priv, 2))
5940                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5941
5942         /* Underruns don't always raise interrupts, so check manually. */
5943         intel_check_cpu_fifo_underruns(dev_priv);
5944         intel_check_pch_fifo_underruns(dev_priv);
5945 }
5946
5947 /* FIXME get rid of this and use pre_plane_update */
5948 static void
5949 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5950 {
5951         struct drm_device *dev = crtc->dev;
5952         struct drm_i915_private *dev_priv = to_i915(dev);
5953         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5954         enum pipe pipe = intel_crtc->pipe;
5955
5956         /*
5957          * Gen2 reports pipe underruns whenever all planes are disabled.
5958          * So disable underrun reporting before all the planes get disabled.
5959          */
5960         if (IS_GEN(dev_priv, 2))
5961                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5962
5963         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5964
5965         /*
5966          * Vblank time updates from the shadow to live plane control register
5967          * are blocked if the memory self-refresh mode is active at that
5968          * moment. So to make sure the plane gets truly disabled, disable
5969          * first the self-refresh mode. The self-refresh enable bit in turn
5970          * will be checked/applied by the HW only at the next frame start
5971          * event which is after the vblank start event, so we need to have a
5972          * wait-for-vblank between disabling the plane and the pipe.
5973          */
5974         if (HAS_GMCH(dev_priv) &&
5975             intel_set_memory_cxsr(dev_priv, false))
5976                 intel_wait_for_vblank(dev_priv, pipe);
5977 }
5978
5979 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5980                                        const struct intel_crtc_state *new_crtc_state)
5981 {
5982         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5983         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5984
5985         if (!old_crtc_state->ips_enabled)
5986                 return false;
5987
5988         if (needs_modeset(new_crtc_state))
5989                 return true;
5990
5991         /*
5992          * Workaround : Do not read or write the pipe palette/gamma data while
5993          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5994          *
5995          * Disable IPS before we program the LUT.
5996          */
5997         if (IS_HASWELL(dev_priv) &&
5998             (new_crtc_state->uapi.color_mgmt_changed ||
5999              new_crtc_state->update_pipe) &&
6000             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6001                 return true;
6002
6003         return !new_crtc_state->ips_enabled;
6004 }
6005
6006 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6007                                        const struct intel_crtc_state *new_crtc_state)
6008 {
6009         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6010         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6011
6012         if (!new_crtc_state->ips_enabled)
6013                 return false;
6014
6015         if (needs_modeset(new_crtc_state))
6016                 return true;
6017
6018         /*
6019          * Workaround : Do not read or write the pipe palette/gamma data while
6020          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6021          *
6022          * Re-enable IPS after the LUT has been programmed.
6023          */
6024         if (IS_HASWELL(dev_priv) &&
6025             (new_crtc_state->uapi.color_mgmt_changed ||
6026              new_crtc_state->update_pipe) &&
6027             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6028                 return true;
6029
6030         /*
6031          * We can't read out IPS on broadwell, assume the worst and
6032          * forcibly enable IPS on the first fastset.
6033          */
6034         if (new_crtc_state->update_pipe &&
6035             old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
6036                 return true;
6037
6038         return !old_crtc_state->ips_enabled;
6039 }
6040
6041 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
6042                           const struct intel_crtc_state *crtc_state)
6043 {
6044         if (!crtc_state->nv12_planes)
6045                 return false;
6046
6047         /* WA Display #0827: Gen9:all */
6048         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6049                 return true;
6050
6051         return false;
6052 }
6053
6054 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
6055                                const struct intel_crtc_state *crtc_state)
6056 {
6057         /* Wa_2006604312:icl */
6058         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
6059                 return true;
6060
6061         return false;
6062 }
6063
6064 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
6065 {
6066         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6067         struct drm_device *dev = crtc->base.dev;
6068         struct drm_i915_private *dev_priv = to_i915(dev);
6069         struct drm_atomic_state *state = old_crtc_state->uapi.state;
6070         struct intel_crtc_state *pipe_config =
6071                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
6072                                                 crtc);
6073         struct drm_plane *primary = crtc->base.primary;
6074         struct drm_plane_state *old_primary_state =
6075                 drm_atomic_get_old_plane_state(state, primary);
6076
6077         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
6078
6079         if (pipe_config->update_wm_post && pipe_config->hw.active)
6080                 intel_update_watermarks(crtc);
6081
6082         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
6083                 hsw_enable_ips(pipe_config);
6084
6085         if (old_primary_state) {
6086                 struct drm_plane_state *new_primary_state =
6087                         drm_atomic_get_new_plane_state(state, primary);
6088
6089                 intel_fbc_post_update(crtc);
6090
6091                 if (new_primary_state->visible &&
6092                     (needs_modeset(pipe_config) ||
6093                      !old_primary_state->visible))
6094                         intel_post_enable_primary(&crtc->base, pipe_config);
6095         }
6096
6097         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
6098             !needs_nv12_wa(dev_priv, pipe_config))
6099                 skl_wa_827(dev_priv, crtc->pipe, false);
6100
6101         if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6102             !needs_scalerclk_wa(dev_priv, pipe_config))
6103                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
6104 }
6105
6106 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
6107                                    struct intel_crtc_state *pipe_config)
6108 {
6109         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6110         struct drm_device *dev = crtc->base.dev;
6111         struct drm_i915_private *dev_priv = to_i915(dev);
6112         struct drm_atomic_state *state = old_crtc_state->uapi.state;
6113         struct drm_plane *primary = crtc->base.primary;
6114         struct drm_plane_state *old_primary_state =
6115                 drm_atomic_get_old_plane_state(state, primary);
6116         bool modeset = needs_modeset(pipe_config);
6117         struct intel_atomic_state *intel_state =
6118                 to_intel_atomic_state(state);
6119
6120         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
6121                 hsw_disable_ips(old_crtc_state);
6122
6123         if (old_primary_state) {
6124                 struct intel_plane_state *new_primary_state =
6125                         intel_atomic_get_new_plane_state(intel_state,
6126                                                          to_intel_plane(primary));
6127
6128                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
6129                 /*
6130                  * Gen2 reports pipe underruns whenever all planes are disabled.
6131                  * So disable underrun reporting before all the planes get disabled.
6132                  */
6133                 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
6134                     (modeset || !new_primary_state->uapi.visible))
6135                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
6136         }
6137
6138         /* Display WA 827 */
6139         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
6140             needs_nv12_wa(dev_priv, pipe_config))
6141                 skl_wa_827(dev_priv, crtc->pipe, true);
6142
6143         /* Wa_2006604312:icl */
6144         if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6145             needs_scalerclk_wa(dev_priv, pipe_config))
6146                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
6147
6148         /*
6149          * Vblank time updates from the shadow to live plane control register
6150          * are blocked if the memory self-refresh mode is active at that
6151          * moment. So to make sure the plane gets truly disabled, disable
6152          * first the self-refresh mode. The self-refresh enable bit in turn
6153          * will be checked/applied by the HW only at the next frame start
6154          * event which is after the vblank start event, so we need to have a
6155          * wait-for-vblank between disabling the plane and the pipe.
6156          */
6157         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6158             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6159                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6160
6161         /*
6162          * IVB workaround: must disable low power watermarks for at least
6163          * one frame before enabling scaling.  LP watermarks can be re-enabled
6164          * when scaling is disabled.
6165          *
6166          * WaCxSRDisabledForSpriteScaling:ivb
6167          */
6168         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
6169             old_crtc_state->hw.active)
6170                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6171
6172         /*
6173          * If we're doing a modeset, we're done.  No need to do any pre-vblank
6174          * watermark programming here.
6175          */
6176         if (needs_modeset(pipe_config))
6177                 return;
6178
6179         /*
6180          * For platforms that support atomic watermarks, program the
6181          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6182          * will be the intermediate values that are safe for both pre- and
6183          * post- vblank; when vblank happens, the 'active' values will be set
6184          * to the final 'target' values and we'll do this again to get the
6185          * optimal watermarks.  For gen9+ platforms, the values we program here
6186          * will be the final target values which will get automatically latched
6187          * at vblank time; no further programming will be necessary.
6188          *
6189          * If a platform hasn't been transitioned to atomic watermarks yet,
6190          * we'll continue to update watermarks the old way, if flags tell
6191          * us to.
6192          */
6193         if (dev_priv->display.initial_watermarks != NULL)
6194                 dev_priv->display.initial_watermarks(intel_state,
6195                                                      pipe_config);
6196         else if (pipe_config->update_wm_pre)
6197                 intel_update_watermarks(crtc);
6198 }
6199
6200 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6201                                       struct intel_crtc *crtc)
6202 {
6203         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6204         const struct intel_crtc_state *new_crtc_state =
6205                 intel_atomic_get_new_crtc_state(state, crtc);
6206         unsigned int update_mask = new_crtc_state->update_planes;
6207         const struct intel_plane_state *old_plane_state;
6208         struct intel_plane *plane;
6209         unsigned fb_bits = 0;
6210         int i;
6211
6212         intel_crtc_dpms_overlay_disable(crtc);
6213
6214         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6215                 if (crtc->pipe != plane->pipe ||
6216                     !(update_mask & BIT(plane->id)))
6217                         continue;
6218
6219                 intel_disable_plane(plane, new_crtc_state);
6220
6221                 if (old_plane_state->uapi.visible)
6222                         fb_bits |= plane->frontbuffer_bit;
6223         }
6224
6225         intel_frontbuffer_flip(dev_priv, fb_bits);
6226 }
6227
6228 /*
6229  * intel_connector_primary_encoder - get the primary encoder for a connector
6230  * @connector: connector for which to return the encoder
6231  *
6232  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6233  * all connectors to their encoder, except for DP-MST connectors which have
6234  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6235  * pointed to by as many DP-MST connectors as there are pipes.
6236  */
6237 static struct intel_encoder *
6238 intel_connector_primary_encoder(struct intel_connector *connector)
6239 {
6240         struct intel_encoder *encoder;
6241
6242         if (connector->mst_port)
6243                 return &dp_to_dig_port(connector->mst_port)->base;
6244
6245         encoder = intel_attached_encoder(&connector->base);
6246         WARN_ON(!encoder);
6247
6248         return encoder;
6249 }
6250
6251 static bool
6252 intel_connector_needs_modeset(struct intel_atomic_state *state,
6253                               const struct drm_connector_state *old_conn_state,
6254                               const struct drm_connector_state *new_conn_state)
6255 {
6256         struct intel_crtc *old_crtc = old_conn_state->crtc ?
6257                                       to_intel_crtc(old_conn_state->crtc) : NULL;
6258         struct intel_crtc *new_crtc = new_conn_state->crtc ?
6259                                       to_intel_crtc(new_conn_state->crtc) : NULL;
6260
6261         return new_crtc != old_crtc ||
6262                (new_crtc &&
6263                 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6264 }
6265
6266 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6267 {
6268         struct drm_connector_state *old_conn_state;
6269         struct drm_connector_state *new_conn_state;
6270         struct drm_connector *conn;
6271         int i;
6272
6273         for_each_oldnew_connector_in_state(&state->base, conn,
6274                                            old_conn_state, new_conn_state, i) {
6275                 struct intel_encoder *encoder;
6276                 struct intel_crtc *crtc;
6277
6278                 if (!intel_connector_needs_modeset(state,
6279                                                    old_conn_state,
6280                                                    new_conn_state))
6281                         continue;
6282
6283                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6284                 if (!encoder->update_prepare)
6285                         continue;
6286
6287                 crtc = new_conn_state->crtc ?
6288                         to_intel_crtc(new_conn_state->crtc) : NULL;
6289                 encoder->update_prepare(state, encoder, crtc);
6290         }
6291 }
6292
6293 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6294 {
6295         struct drm_connector_state *old_conn_state;
6296         struct drm_connector_state *new_conn_state;
6297         struct drm_connector *conn;
6298         int i;
6299
6300         for_each_oldnew_connector_in_state(&state->base, conn,
6301                                            old_conn_state, new_conn_state, i) {
6302                 struct intel_encoder *encoder;
6303                 struct intel_crtc *crtc;
6304
6305                 if (!intel_connector_needs_modeset(state,
6306                                                    old_conn_state,
6307                                                    new_conn_state))
6308                         continue;
6309
6310                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6311                 if (!encoder->update_complete)
6312                         continue;
6313
6314                 crtc = new_conn_state->crtc ?
6315                         to_intel_crtc(new_conn_state->crtc) : NULL;
6316                 encoder->update_complete(state, encoder, crtc);
6317         }
6318 }
6319
6320 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6321                                           struct intel_crtc *crtc)
6322 {
6323         const struct intel_crtc_state *crtc_state =
6324                 intel_atomic_get_new_crtc_state(state, crtc);
6325         const struct drm_connector_state *conn_state;
6326         struct drm_connector *conn;
6327         int i;
6328
6329         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6330                 struct intel_encoder *encoder =
6331                         to_intel_encoder(conn_state->best_encoder);
6332
6333                 if (conn_state->crtc != &crtc->base)
6334                         continue;
6335
6336                 if (encoder->pre_pll_enable)
6337                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6338         }
6339 }
6340
6341 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6342                                       struct intel_crtc *crtc)
6343 {
6344         const struct intel_crtc_state *crtc_state =
6345                 intel_atomic_get_new_crtc_state(state, crtc);
6346         const struct drm_connector_state *conn_state;
6347         struct drm_connector *conn;
6348         int i;
6349
6350         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6351                 struct intel_encoder *encoder =
6352                         to_intel_encoder(conn_state->best_encoder);
6353
6354                 if (conn_state->crtc != &crtc->base)
6355                         continue;
6356
6357                 if (encoder->pre_enable)
6358                         encoder->pre_enable(encoder, crtc_state, conn_state);
6359         }
6360 }
6361
6362 static void intel_encoders_enable(struct intel_atomic_state *state,
6363                                   struct intel_crtc *crtc)
6364 {
6365         const struct intel_crtc_state *crtc_state =
6366                 intel_atomic_get_new_crtc_state(state, crtc);
6367         const struct drm_connector_state *conn_state;
6368         struct drm_connector *conn;
6369         int i;
6370
6371         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6372                 struct intel_encoder *encoder =
6373                         to_intel_encoder(conn_state->best_encoder);
6374
6375                 if (conn_state->crtc != &crtc->base)
6376                         continue;
6377
6378                 if (encoder->enable)
6379                         encoder->enable(encoder, crtc_state, conn_state);
6380                 intel_opregion_notify_encoder(encoder, true);
6381         }
6382 }
6383
6384 static void intel_encoders_disable(struct intel_atomic_state *state,
6385                                    struct intel_crtc *crtc)
6386 {
6387         const struct intel_crtc_state *old_crtc_state =
6388                 intel_atomic_get_old_crtc_state(state, crtc);
6389         const struct drm_connector_state *old_conn_state;
6390         struct drm_connector *conn;
6391         int i;
6392
6393         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6394                 struct intel_encoder *encoder =
6395                         to_intel_encoder(old_conn_state->best_encoder);
6396
6397                 if (old_conn_state->crtc != &crtc->base)
6398                         continue;
6399
6400                 intel_opregion_notify_encoder(encoder, false);
6401                 if (encoder->disable)
6402                         encoder->disable(encoder, old_crtc_state, old_conn_state);
6403         }
6404 }
6405
6406 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6407                                         struct intel_crtc *crtc)
6408 {
6409         const struct intel_crtc_state *old_crtc_state =
6410                 intel_atomic_get_old_crtc_state(state, crtc);
6411         const struct drm_connector_state *old_conn_state;
6412         struct drm_connector *conn;
6413         int i;
6414
6415         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6416                 struct intel_encoder *encoder =
6417                         to_intel_encoder(old_conn_state->best_encoder);
6418
6419                 if (old_conn_state->crtc != &crtc->base)
6420                         continue;
6421
6422                 if (encoder->post_disable)
6423                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6424         }
6425 }
6426
6427 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6428                                             struct intel_crtc *crtc)
6429 {
6430         const struct intel_crtc_state *old_crtc_state =
6431                 intel_atomic_get_old_crtc_state(state, crtc);
6432         const struct drm_connector_state *old_conn_state;
6433         struct drm_connector *conn;
6434         int i;
6435
6436         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6437                 struct intel_encoder *encoder =
6438                         to_intel_encoder(old_conn_state->best_encoder);
6439
6440                 if (old_conn_state->crtc != &crtc->base)
6441                         continue;
6442
6443                 if (encoder->post_pll_disable)
6444                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6445         }
6446 }
6447
6448 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6449                                        struct intel_crtc *crtc)
6450 {
6451         const struct intel_crtc_state *crtc_state =
6452                 intel_atomic_get_new_crtc_state(state, crtc);
6453         const struct drm_connector_state *conn_state;
6454         struct drm_connector *conn;
6455         int i;
6456
6457         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6458                 struct intel_encoder *encoder =
6459                         to_intel_encoder(conn_state->best_encoder);
6460
6461                 if (conn_state->crtc != &crtc->base)
6462                         continue;
6463
6464                 if (encoder->update_pipe)
6465                         encoder->update_pipe(encoder, crtc_state, conn_state);
6466         }
6467 }
6468
6469 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6470 {
6471         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6472         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6473
6474         plane->disable_plane(plane, crtc_state);
6475 }
6476
6477 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6478                                  struct intel_atomic_state *state)
6479 {
6480         struct drm_crtc *crtc = pipe_config->uapi.crtc;
6481         struct drm_device *dev = crtc->dev;
6482         struct drm_i915_private *dev_priv = to_i915(dev);
6483         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6484         enum pipe pipe = intel_crtc->pipe;
6485
6486         if (WARN_ON(intel_crtc->active))
6487                 return;
6488
6489         /*
6490          * Sometimes spurious CPU pipe underruns happen during FDI
6491          * training, at least with VGA+HDMI cloning. Suppress them.
6492          *
6493          * On ILK we get an occasional spurious CPU pipe underruns
6494          * between eDP port A enable and vdd enable. Also PCH port
6495          * enable seems to result in the occasional CPU pipe underrun.
6496          *
6497          * Spurious PCH underruns also occur during PCH enabling.
6498          */
6499         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6500         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6501
6502         if (pipe_config->has_pch_encoder)
6503                 intel_prepare_shared_dpll(pipe_config);
6504
6505         if (intel_crtc_has_dp_encoder(pipe_config))
6506                 intel_dp_set_m_n(pipe_config, M1_N1);
6507
6508         intel_set_pipe_timings(pipe_config);
6509         intel_set_pipe_src_size(pipe_config);
6510
6511         if (pipe_config->has_pch_encoder) {
6512                 intel_cpu_transcoder_set_m_n(pipe_config,
6513                                              &pipe_config->fdi_m_n, NULL);
6514         }
6515
6516         ironlake_set_pipeconf(pipe_config);
6517
6518         intel_crtc->active = true;
6519
6520         intel_encoders_pre_enable(state, intel_crtc);
6521
6522         if (pipe_config->has_pch_encoder) {
6523                 /* Note: FDI PLL enabling _must_ be done before we enable the
6524                  * cpu pipes, hence this is separate from all the other fdi/pch
6525                  * enabling. */
6526                 ironlake_fdi_pll_enable(pipe_config);
6527         } else {
6528                 assert_fdi_tx_disabled(dev_priv, pipe);
6529                 assert_fdi_rx_disabled(dev_priv, pipe);
6530         }
6531
6532         ironlake_pfit_enable(pipe_config);
6533
6534         /*
6535          * On ILK+ LUT must be loaded before the pipe is running but with
6536          * clocks enabled
6537          */
6538         intel_color_load_luts(pipe_config);
6539         intel_color_commit(pipe_config);
6540         /* update DSPCNTR to configure gamma for pipe bottom color */
6541         intel_disable_primary_plane(pipe_config);
6542
6543         if (dev_priv->display.initial_watermarks != NULL)
6544                 dev_priv->display.initial_watermarks(state, pipe_config);
6545         intel_enable_pipe(pipe_config);
6546
6547         if (pipe_config->has_pch_encoder)
6548                 ironlake_pch_enable(state, pipe_config);
6549
6550         assert_vblank_disabled(crtc);
6551         intel_crtc_vblank_on(pipe_config);
6552
6553         intel_encoders_enable(state, intel_crtc);
6554
6555         if (HAS_PCH_CPT(dev_priv))
6556                 cpt_verify_modeset(dev, intel_crtc->pipe);
6557
6558         /*
6559          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6560          * And a second vblank wait is needed at least on ILK with
6561          * some interlaced HDMI modes. Let's do the double wait always
6562          * in case there are more corner cases we don't know about.
6563          */
6564         if (pipe_config->has_pch_encoder) {
6565                 intel_wait_for_vblank(dev_priv, pipe);
6566                 intel_wait_for_vblank(dev_priv, pipe);
6567         }
6568         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6569         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6570 }
6571
6572 /* IPS only exists on ULT machines and is tied to pipe A. */
6573 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6574 {
6575         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6576 }
6577
6578 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6579                                             enum pipe pipe, bool apply)
6580 {
6581         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6582         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6583
6584         if (apply)
6585                 val |= mask;
6586         else
6587                 val &= ~mask;
6588
6589         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6590 }
6591
6592 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6593 {
6594         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6595         enum pipe pipe = crtc->pipe;
6596         u32 val;
6597
6598         val = MBUS_DBOX_A_CREDIT(2);
6599
6600         if (INTEL_GEN(dev_priv) >= 12) {
6601                 val |= MBUS_DBOX_BW_CREDIT(2);
6602                 val |= MBUS_DBOX_B_CREDIT(12);
6603         } else {
6604                 val |= MBUS_DBOX_BW_CREDIT(1);
6605                 val |= MBUS_DBOX_B_CREDIT(8);
6606         }
6607
6608         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6609 }
6610
6611 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6612 {
6613         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6614         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6615         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6616         u32 val;
6617
6618         val = I915_READ(reg);
6619         val &= ~HSW_FRAME_START_DELAY_MASK;
6620         val |= HSW_FRAME_START_DELAY(0);
6621         I915_WRITE(reg, val);
6622 }
6623
6624 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6625                                 struct intel_atomic_state *state)
6626 {
6627         struct drm_crtc *crtc = pipe_config->uapi.crtc;
6628         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6629         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6630         enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
6631         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6632         bool psl_clkgate_wa;
6633
6634         if (WARN_ON(intel_crtc->active))
6635                 return;
6636
6637         intel_encoders_pre_pll_enable(state, intel_crtc);
6638
6639         if (pipe_config->shared_dpll)
6640                 intel_enable_shared_dpll(pipe_config);
6641
6642         intel_encoders_pre_enable(state, intel_crtc);
6643
6644         if (intel_crtc_has_dp_encoder(pipe_config))
6645                 intel_dp_set_m_n(pipe_config, M1_N1);
6646
6647         if (!transcoder_is_dsi(cpu_transcoder))
6648                 intel_set_pipe_timings(pipe_config);
6649
6650         if (INTEL_GEN(dev_priv) >= 11)
6651                 icl_enable_trans_port_sync(pipe_config);
6652
6653         intel_set_pipe_src_size(pipe_config);
6654
6655         if (cpu_transcoder != TRANSCODER_EDP &&
6656             !transcoder_is_dsi(cpu_transcoder)) {
6657                 I915_WRITE(PIPE_MULT(cpu_transcoder),
6658                            pipe_config->pixel_multiplier - 1);
6659         }
6660
6661         if (pipe_config->has_pch_encoder) {
6662                 intel_cpu_transcoder_set_m_n(pipe_config,
6663                                              &pipe_config->fdi_m_n, NULL);
6664         }
6665
6666         if (!transcoder_is_dsi(cpu_transcoder)) {
6667                 hsw_set_frame_start_delay(pipe_config);
6668                 haswell_set_pipeconf(pipe_config);
6669         }
6670
6671         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6672                 bdw_set_pipemisc(pipe_config);
6673
6674         intel_crtc->active = true;
6675
6676         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6677         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6678                          pipe_config->pch_pfit.enabled;
6679         if (psl_clkgate_wa)
6680                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6681
6682         if (INTEL_GEN(dev_priv) >= 9)
6683                 skylake_pfit_enable(pipe_config);
6684         else
6685                 ironlake_pfit_enable(pipe_config);
6686
6687         /*
6688          * On ILK+ LUT must be loaded before the pipe is running but with
6689          * clocks enabled
6690          */
6691         intel_color_load_luts(pipe_config);
6692         intel_color_commit(pipe_config);
6693         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6694         if (INTEL_GEN(dev_priv) < 9)
6695                 intel_disable_primary_plane(pipe_config);
6696
6697         if (INTEL_GEN(dev_priv) >= 11)
6698                 icl_set_pipe_chicken(intel_crtc);
6699
6700         if (!transcoder_is_dsi(cpu_transcoder))
6701                 intel_ddi_enable_transcoder_func(pipe_config);
6702
6703         if (dev_priv->display.initial_watermarks != NULL)
6704                 dev_priv->display.initial_watermarks(state, pipe_config);
6705
6706         if (INTEL_GEN(dev_priv) >= 11)
6707                 icl_pipe_mbus_enable(intel_crtc);
6708
6709         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6710         if (!transcoder_is_dsi(cpu_transcoder))
6711                 intel_enable_pipe(pipe_config);
6712
6713         if (pipe_config->has_pch_encoder)
6714                 lpt_pch_enable(state, pipe_config);
6715
6716         assert_vblank_disabled(crtc);
6717         intel_crtc_vblank_on(pipe_config);
6718
6719         intel_encoders_enable(state, intel_crtc);
6720
6721         if (psl_clkgate_wa) {
6722                 intel_wait_for_vblank(dev_priv, pipe);
6723                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6724         }
6725
6726         /* If we change the relative order between pipe/planes enabling, we need
6727          * to change the workaround. */
6728         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6729         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6730                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6731                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6732         }
6733 }
6734
6735 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6736 {
6737         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6738         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6739         enum pipe pipe = crtc->pipe;
6740
6741         /* To avoid upsetting the power well on haswell only disable the pfit if
6742          * it's in use. The hw state code will make sure we get this right. */
6743         if (old_crtc_state->pch_pfit.enabled) {
6744                 I915_WRITE(PF_CTL(pipe), 0);
6745                 I915_WRITE(PF_WIN_POS(pipe), 0);
6746                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6747         }
6748 }
6749
6750 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6751                                   struct intel_atomic_state *state)
6752 {
6753         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
6754         struct drm_device *dev = crtc->dev;
6755         struct drm_i915_private *dev_priv = to_i915(dev);
6756         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6757         enum pipe pipe = intel_crtc->pipe;
6758
6759         /*
6760          * Sometimes spurious CPU pipe underruns happen when the
6761          * pipe is already disabled, but FDI RX/TX is still enabled.
6762          * Happens at least with VGA+HDMI cloning. Suppress them.
6763          */
6764         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6765         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6766
6767         intel_encoders_disable(state, intel_crtc);
6768
6769         intel_crtc_vblank_off(intel_crtc);
6770
6771         intel_disable_pipe(old_crtc_state);
6772
6773         ironlake_pfit_disable(old_crtc_state);
6774
6775         if (old_crtc_state->has_pch_encoder)
6776                 ironlake_fdi_disable(crtc);
6777
6778         intel_encoders_post_disable(state, intel_crtc);
6779
6780         if (old_crtc_state->has_pch_encoder) {
6781                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6782
6783                 if (HAS_PCH_CPT(dev_priv)) {
6784                         i915_reg_t reg;
6785                         u32 temp;
6786
6787                         /* disable TRANS_DP_CTL */
6788                         reg = TRANS_DP_CTL(pipe);
6789                         temp = I915_READ(reg);
6790                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6791                                   TRANS_DP_PORT_SEL_MASK);
6792                         temp |= TRANS_DP_PORT_SEL_NONE;
6793                         I915_WRITE(reg, temp);
6794
6795                         /* disable DPLL_SEL */
6796                         temp = I915_READ(PCH_DPLL_SEL);
6797                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6798                         I915_WRITE(PCH_DPLL_SEL, temp);
6799                 }
6800
6801                 ironlake_fdi_pll_disable(intel_crtc);
6802         }
6803
6804         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6805         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6806 }
6807
6808 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6809                                  struct intel_atomic_state *state)
6810 {
6811         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
6812         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6813         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6814         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6815
6816         intel_encoders_disable(state, intel_crtc);
6817
6818         intel_crtc_vblank_off(intel_crtc);
6819
6820         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6821         if (!transcoder_is_dsi(cpu_transcoder))
6822                 intel_disable_pipe(old_crtc_state);
6823
6824         if (INTEL_GEN(dev_priv) >= 11)
6825                 icl_disable_transcoder_port_sync(old_crtc_state);
6826
6827         if (!transcoder_is_dsi(cpu_transcoder))
6828                 intel_ddi_disable_transcoder_func(old_crtc_state);
6829
6830         intel_dsc_disable(old_crtc_state);
6831
6832         if (INTEL_GEN(dev_priv) >= 9)
6833                 skylake_scaler_disable(intel_crtc);
6834         else
6835                 ironlake_pfit_disable(old_crtc_state);
6836
6837         intel_encoders_post_disable(state, intel_crtc);
6838
6839         intel_encoders_post_pll_disable(state, intel_crtc);
6840 }
6841
6842 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6843 {
6844         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6845         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6846
6847         if (!crtc_state->gmch_pfit.control)
6848                 return;
6849
6850         /*
6851          * The panel fitter should only be adjusted whilst the pipe is disabled,
6852          * according to register description and PRM.
6853          */
6854         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6855         assert_pipe_disabled(dev_priv, crtc->pipe);
6856
6857         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6858         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6859
6860         /* Border color in case we don't scale up to the full screen. Black by
6861          * default, change to something else for debugging. */
6862         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6863 }
6864
6865 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6866 {
6867         if (phy == PHY_NONE)
6868                 return false;
6869
6870         if (IS_ELKHARTLAKE(dev_priv))
6871                 return phy <= PHY_C;
6872
6873         if (INTEL_GEN(dev_priv) >= 11)
6874                 return phy <= PHY_B;
6875
6876         return false;
6877 }
6878
6879 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6880 {
6881         if (INTEL_GEN(dev_priv) >= 12)
6882                 return phy >= PHY_D && phy <= PHY_I;
6883
6884         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6885                 return phy >= PHY_C && phy <= PHY_F;
6886
6887         return false;
6888 }
6889
6890 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6891 {
6892         if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6893                 return PHY_A;
6894
6895         return (enum phy)port;
6896 }
6897
6898 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6899 {
6900         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6901                 return PORT_TC_NONE;
6902
6903         if (INTEL_GEN(dev_priv) >= 12)
6904                 return port - PORT_D;
6905
6906         return port - PORT_C;
6907 }
6908
6909 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6910 {
6911         switch (port) {
6912         case PORT_A:
6913                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6914         case PORT_B:
6915                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6916         case PORT_C:
6917                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6918         case PORT_D:
6919                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6920         case PORT_E:
6921                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6922         case PORT_F:
6923                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6924         case PORT_G:
6925                 return POWER_DOMAIN_PORT_DDI_G_LANES;
6926         default:
6927                 MISSING_CASE(port);
6928                 return POWER_DOMAIN_PORT_OTHER;
6929         }
6930 }
6931
6932 enum intel_display_power_domain
6933 intel_aux_power_domain(struct intel_digital_port *dig_port)
6934 {
6935         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
6936         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
6937
6938         if (intel_phy_is_tc(dev_priv, phy) &&
6939             dig_port->tc_mode == TC_PORT_TBT_ALT) {
6940                 switch (dig_port->aux_ch) {
6941                 case AUX_CH_C:
6942                         return POWER_DOMAIN_AUX_C_TBT;
6943                 case AUX_CH_D:
6944                         return POWER_DOMAIN_AUX_D_TBT;
6945                 case AUX_CH_E:
6946                         return POWER_DOMAIN_AUX_E_TBT;
6947                 case AUX_CH_F:
6948                         return POWER_DOMAIN_AUX_F_TBT;
6949                 case AUX_CH_G:
6950                         return POWER_DOMAIN_AUX_G_TBT;
6951                 default:
6952                         MISSING_CASE(dig_port->aux_ch);
6953                         return POWER_DOMAIN_AUX_C_TBT;
6954                 }
6955         }
6956
6957         switch (dig_port->aux_ch) {
6958         case AUX_CH_A:
6959                 return POWER_DOMAIN_AUX_A;
6960         case AUX_CH_B:
6961                 return POWER_DOMAIN_AUX_B;
6962         case AUX_CH_C:
6963                 return POWER_DOMAIN_AUX_C;
6964         case AUX_CH_D:
6965                 return POWER_DOMAIN_AUX_D;
6966         case AUX_CH_E:
6967                 return POWER_DOMAIN_AUX_E;
6968         case AUX_CH_F:
6969                 return POWER_DOMAIN_AUX_F;
6970         case AUX_CH_G:
6971                 return POWER_DOMAIN_AUX_G;
6972         default:
6973                 MISSING_CASE(dig_port->aux_ch);
6974                 return POWER_DOMAIN_AUX_A;
6975         }
6976 }
6977
6978 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6979 {
6980         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6981         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6982         struct drm_encoder *encoder;
6983         enum pipe pipe = crtc->pipe;
6984         u64 mask;
6985         enum transcoder transcoder = crtc_state->cpu_transcoder;
6986
6987         if (!crtc_state->hw.active)
6988                 return 0;
6989
6990         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6991         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6992         if (crtc_state->pch_pfit.enabled ||
6993             crtc_state->pch_pfit.force_thru)
6994                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6995
6996         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
6997                                   crtc_state->uapi.encoder_mask) {
6998                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6999
7000                 mask |= BIT_ULL(intel_encoder->power_domain);
7001         }
7002
7003         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7004                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7005
7006         if (crtc_state->shared_dpll)
7007                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7008
7009         return mask;
7010 }
7011
7012 static u64
7013 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7014 {
7015         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7016         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7017         enum intel_display_power_domain domain;
7018         u64 domains, new_domains, old_domains;
7019
7020         old_domains = crtc->enabled_power_domains;
7021         crtc->enabled_power_domains = new_domains =
7022                 get_crtc_power_domains(crtc_state);
7023
7024         domains = new_domains & ~old_domains;
7025
7026         for_each_power_domain(domain, domains)
7027                 intel_display_power_get(dev_priv, domain);
7028
7029         return old_domains & ~new_domains;
7030 }
7031
7032 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7033                                       u64 domains)
7034 {
7035         enum intel_display_power_domain domain;
7036
7037         for_each_power_domain(domain, domains)
7038                 intel_display_power_put_unchecked(dev_priv, domain);
7039 }
7040
7041 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
7042                                    struct intel_atomic_state *state)
7043 {
7044         struct drm_crtc *crtc = pipe_config->uapi.crtc;
7045         struct drm_device *dev = crtc->dev;
7046         struct drm_i915_private *dev_priv = to_i915(dev);
7047         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7048         enum pipe pipe = intel_crtc->pipe;
7049
7050         if (WARN_ON(intel_crtc->active))
7051                 return;
7052
7053         if (intel_crtc_has_dp_encoder(pipe_config))
7054                 intel_dp_set_m_n(pipe_config, M1_N1);
7055
7056         intel_set_pipe_timings(pipe_config);
7057         intel_set_pipe_src_size(pipe_config);
7058
7059         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7060                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7061                 I915_WRITE(CHV_CANVAS(pipe), 0);
7062         }
7063
7064         i9xx_set_pipeconf(pipe_config);
7065
7066         intel_crtc->active = true;
7067
7068         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7069
7070         intel_encoders_pre_pll_enable(state, intel_crtc);
7071
7072         if (IS_CHERRYVIEW(dev_priv)) {
7073                 chv_prepare_pll(intel_crtc, pipe_config);
7074                 chv_enable_pll(intel_crtc, pipe_config);
7075         } else {
7076                 vlv_prepare_pll(intel_crtc, pipe_config);
7077                 vlv_enable_pll(intel_crtc, pipe_config);
7078         }
7079
7080         intel_encoders_pre_enable(state, intel_crtc);
7081
7082         i9xx_pfit_enable(pipe_config);
7083
7084         intel_color_load_luts(pipe_config);
7085         intel_color_commit(pipe_config);
7086         /* update DSPCNTR to configure gamma for pipe bottom color */
7087         intel_disable_primary_plane(pipe_config);
7088
7089         dev_priv->display.initial_watermarks(state, pipe_config);
7090         intel_enable_pipe(pipe_config);
7091
7092         assert_vblank_disabled(crtc);
7093         intel_crtc_vblank_on(pipe_config);
7094
7095         intel_encoders_enable(state, intel_crtc);
7096 }
7097
7098 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7099 {
7100         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7101         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7102
7103         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
7104         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
7105 }
7106
7107 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
7108                              struct intel_atomic_state *state)
7109 {
7110         struct drm_crtc *crtc = pipe_config->uapi.crtc;
7111         struct drm_device *dev = crtc->dev;
7112         struct drm_i915_private *dev_priv = to_i915(dev);
7113         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7114         enum pipe pipe = intel_crtc->pipe;
7115
7116         if (WARN_ON(intel_crtc->active))
7117                 return;
7118
7119         i9xx_set_pll_dividers(pipe_config);
7120
7121         if (intel_crtc_has_dp_encoder(pipe_config))
7122                 intel_dp_set_m_n(pipe_config, M1_N1);
7123
7124         intel_set_pipe_timings(pipe_config);
7125         intel_set_pipe_src_size(pipe_config);
7126
7127         i9xx_set_pipeconf(pipe_config);
7128
7129         intel_crtc->active = true;
7130
7131         if (!IS_GEN(dev_priv, 2))
7132                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7133
7134         intel_encoders_pre_enable(state, intel_crtc);
7135
7136         i9xx_enable_pll(intel_crtc, pipe_config);
7137
7138         i9xx_pfit_enable(pipe_config);
7139
7140         intel_color_load_luts(pipe_config);
7141         intel_color_commit(pipe_config);
7142         /* update DSPCNTR to configure gamma for pipe bottom color */
7143         intel_disable_primary_plane(pipe_config);
7144
7145         if (dev_priv->display.initial_watermarks != NULL)
7146                 dev_priv->display.initial_watermarks(state,
7147                                                      pipe_config);
7148         else
7149                 intel_update_watermarks(intel_crtc);
7150         intel_enable_pipe(pipe_config);
7151
7152         assert_vblank_disabled(crtc);
7153         intel_crtc_vblank_on(pipe_config);
7154
7155         intel_encoders_enable(state, intel_crtc);
7156 }
7157
7158 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7159 {
7160         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7161         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7162
7163         if (!old_crtc_state->gmch_pfit.control)
7164                 return;
7165
7166         assert_pipe_disabled(dev_priv, crtc->pipe);
7167
7168         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
7169                       I915_READ(PFIT_CONTROL));
7170         I915_WRITE(PFIT_CONTROL, 0);
7171 }
7172
7173 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
7174                               struct intel_atomic_state *state)
7175 {
7176         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
7177         struct drm_device *dev = crtc->dev;
7178         struct drm_i915_private *dev_priv = to_i915(dev);
7179         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7180         enum pipe pipe = intel_crtc->pipe;
7181
7182         /*
7183          * On gen2 planes are double buffered but the pipe isn't, so we must
7184          * wait for planes to fully turn off before disabling the pipe.
7185          */
7186         if (IS_GEN(dev_priv, 2))
7187                 intel_wait_for_vblank(dev_priv, pipe);
7188
7189         intel_encoders_disable(state, intel_crtc);
7190
7191         intel_crtc_vblank_off(intel_crtc);
7192
7193         intel_disable_pipe(old_crtc_state);
7194
7195         i9xx_pfit_disable(old_crtc_state);
7196
7197         intel_encoders_post_disable(state, intel_crtc);
7198
7199         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7200                 if (IS_CHERRYVIEW(dev_priv))
7201                         chv_disable_pll(dev_priv, pipe);
7202                 else if (IS_VALLEYVIEW(dev_priv))
7203                         vlv_disable_pll(dev_priv, pipe);
7204                 else
7205                         i9xx_disable_pll(old_crtc_state);
7206         }
7207
7208         intel_encoders_post_pll_disable(state, intel_crtc);
7209
7210         if (!IS_GEN(dev_priv, 2))
7211                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7212
7213         if (!dev_priv->display.initial_watermarks)
7214                 intel_update_watermarks(intel_crtc);
7215
7216         /* clock the pipe down to 640x480@60 to potentially save power */
7217         if (IS_I830(dev_priv))
7218                 i830_enable_pipe(dev_priv, pipe);
7219 }
7220
7221 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
7222                                         struct drm_modeset_acquire_ctx *ctx)
7223 {
7224         struct intel_encoder *encoder;
7225         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7226         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
7227         struct intel_bw_state *bw_state =
7228                 to_intel_bw_state(dev_priv->bw_obj.state);
7229         struct intel_crtc_state *crtc_state =
7230                 to_intel_crtc_state(crtc->state);
7231         enum intel_display_power_domain domain;
7232         struct intel_plane *plane;
7233         u64 domains;
7234         struct drm_atomic_state *state;
7235         struct intel_crtc_state *temp_crtc_state;
7236         int ret;
7237
7238         if (!intel_crtc->active)
7239                 return;
7240
7241         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
7242                 const struct intel_plane_state *plane_state =
7243                         to_intel_plane_state(plane->base.state);
7244
7245                 if (plane_state->uapi.visible)
7246                         intel_plane_disable_noatomic(intel_crtc, plane);
7247         }
7248
7249         state = drm_atomic_state_alloc(crtc->dev);
7250         if (!state) {
7251                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7252                               crtc->base.id, crtc->name);
7253                 return;
7254         }
7255
7256         state->acquire_ctx = ctx;
7257
7258         /* Everything's already locked, -EDEADLK can't happen. */
7259         temp_crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7260         ret = drm_atomic_add_affected_connectors(state, crtc);
7261
7262         WARN_ON(IS_ERR(temp_crtc_state) || ret);
7263
7264         dev_priv->display.crtc_disable(temp_crtc_state, to_intel_atomic_state(state));
7265
7266         drm_atomic_state_put(state);
7267
7268         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7269                       crtc->base.id, crtc->name);
7270
7271         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
7272         crtc->state->active = false;
7273         intel_crtc->active = false;
7274         crtc->enabled = false;
7275         crtc->state->connector_mask = 0;
7276         crtc->state->encoder_mask = 0;
7277         intel_crtc_free_hw_state(crtc_state);
7278         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7279
7280         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
7281                 encoder->base.crtc = NULL;
7282
7283         intel_fbc_disable(intel_crtc);
7284         intel_update_watermarks(intel_crtc);
7285         intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
7286
7287         domains = intel_crtc->enabled_power_domains;
7288         for_each_power_domain(domain, domains)
7289                 intel_display_power_put_unchecked(dev_priv, domain);
7290         intel_crtc->enabled_power_domains = 0;
7291
7292         dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
7293         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
7294         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
7295
7296         bw_state->data_rate[intel_crtc->pipe] = 0;
7297         bw_state->num_active_planes[intel_crtc->pipe] = 0;
7298 }
7299
7300 /*
7301  * turn all crtc's off, but do not adjust state
7302  * This has to be paired with a call to intel_modeset_setup_hw_state.
7303  */
7304 int intel_display_suspend(struct drm_device *dev)
7305 {
7306         struct drm_i915_private *dev_priv = to_i915(dev);
7307         struct drm_atomic_state *state;
7308         int ret;
7309
7310         state = drm_atomic_helper_suspend(dev);
7311         ret = PTR_ERR_OR_ZERO(state);
7312         if (ret)
7313                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7314         else
7315                 dev_priv->modeset_restore_state = state;
7316         return ret;
7317 }
7318
7319 void intel_encoder_destroy(struct drm_encoder *encoder)
7320 {
7321         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7322
7323         drm_encoder_cleanup(encoder);
7324         kfree(intel_encoder);
7325 }
7326
7327 /* Cross check the actual hw state with our own modeset state tracking (and it's
7328  * internal consistency). */
7329 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7330                                          struct drm_connector_state *conn_state)
7331 {
7332         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7333
7334         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7335                       connector->base.base.id,
7336                       connector->base.name);
7337
7338         if (connector->get_hw_state(connector)) {
7339                 struct intel_encoder *encoder = connector->encoder;
7340
7341                 I915_STATE_WARN(!crtc_state,
7342                          "connector enabled without attached crtc\n");
7343
7344                 if (!crtc_state)
7345                         return;
7346
7347                 I915_STATE_WARN(!crtc_state->hw.active,
7348                                 "connector is active, but attached crtc isn't\n");
7349
7350                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7351                         return;
7352
7353                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7354                         "atomic encoder doesn't match attached encoder\n");
7355
7356                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7357                         "attached encoder crtc differs from connector crtc\n");
7358         } else {
7359                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7360                                 "attached crtc is active, but connector isn't\n");
7361                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7362                         "best encoder set without crtc!\n");
7363         }
7364 }
7365
7366 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7367 {
7368         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7369                 return crtc_state->fdi_lanes;
7370
7371         return 0;
7372 }
7373
7374 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7375                                      struct intel_crtc_state *pipe_config)
7376 {
7377         struct drm_i915_private *dev_priv = to_i915(dev);
7378         struct drm_atomic_state *state = pipe_config->uapi.state;
7379         struct intel_crtc *other_crtc;
7380         struct intel_crtc_state *other_crtc_state;
7381
7382         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7383                       pipe_name(pipe), pipe_config->fdi_lanes);
7384         if (pipe_config->fdi_lanes > 4) {
7385                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7386                               pipe_name(pipe), pipe_config->fdi_lanes);
7387                 return -EINVAL;
7388         }
7389
7390         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7391                 if (pipe_config->fdi_lanes > 2) {
7392                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7393                                       pipe_config->fdi_lanes);
7394                         return -EINVAL;
7395                 } else {
7396                         return 0;
7397                 }
7398         }
7399
7400         if (INTEL_NUM_PIPES(dev_priv) == 2)
7401                 return 0;
7402
7403         /* Ivybridge 3 pipe is really complicated */
7404         switch (pipe) {
7405         case PIPE_A:
7406                 return 0;
7407         case PIPE_B:
7408                 if (pipe_config->fdi_lanes <= 2)
7409                         return 0;
7410
7411                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7412                 other_crtc_state =
7413                         intel_atomic_get_crtc_state(state, other_crtc);
7414                 if (IS_ERR(other_crtc_state))
7415                         return PTR_ERR(other_crtc_state);
7416
7417                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7418                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7419                                       pipe_name(pipe), pipe_config->fdi_lanes);
7420                         return -EINVAL;
7421                 }
7422                 return 0;
7423         case PIPE_C:
7424                 if (pipe_config->fdi_lanes > 2) {
7425                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7426                                       pipe_name(pipe), pipe_config->fdi_lanes);
7427                         return -EINVAL;
7428                 }
7429
7430                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7431                 other_crtc_state =
7432                         intel_atomic_get_crtc_state(state, other_crtc);
7433                 if (IS_ERR(other_crtc_state))
7434                         return PTR_ERR(other_crtc_state);
7435
7436                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7437                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7438                         return -EINVAL;
7439                 }
7440                 return 0;
7441         default:
7442                 BUG();
7443         }
7444 }
7445
7446 #define RETRY 1
7447 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7448                                        struct intel_crtc_state *pipe_config)
7449 {
7450         struct drm_device *dev = intel_crtc->base.dev;
7451         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7452         int lane, link_bw, fdi_dotclock, ret;
7453         bool needs_recompute = false;
7454
7455 retry:
7456         /* FDI is a binary signal running at ~2.7GHz, encoding
7457          * each output octet as 10 bits. The actual frequency
7458          * is stored as a divider into a 100MHz clock, and the
7459          * mode pixel clock is stored in units of 1KHz.
7460          * Hence the bw of each lane in terms of the mode signal
7461          * is:
7462          */
7463         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7464
7465         fdi_dotclock = adjusted_mode->crtc_clock;
7466
7467         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7468                                            pipe_config->pipe_bpp);
7469
7470         pipe_config->fdi_lanes = lane;
7471
7472         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7473                                link_bw, &pipe_config->fdi_m_n, false, false);
7474
7475         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7476         if (ret == -EDEADLK)
7477                 return ret;
7478
7479         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7480                 pipe_config->pipe_bpp -= 2*3;
7481                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7482                               pipe_config->pipe_bpp);
7483                 needs_recompute = true;
7484                 pipe_config->bw_constrained = true;
7485
7486                 goto retry;
7487         }
7488
7489         if (needs_recompute)
7490                 return RETRY;
7491
7492         return ret;
7493 }
7494
7495 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7496 {
7497         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7498         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7499
7500         /* IPS only exists on ULT machines and is tied to pipe A. */
7501         if (!hsw_crtc_supports_ips(crtc))
7502                 return false;
7503
7504         if (!i915_modparams.enable_ips)
7505                 return false;
7506
7507         if (crtc_state->pipe_bpp > 24)
7508                 return false;
7509
7510         /*
7511          * We compare against max which means we must take
7512          * the increased cdclk requirement into account when
7513          * calculating the new cdclk.
7514          *
7515          * Should measure whether using a lower cdclk w/o IPS
7516          */
7517         if (IS_BROADWELL(dev_priv) &&
7518             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7519                 return false;
7520
7521         return true;
7522 }
7523
7524 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7525 {
7526         struct drm_i915_private *dev_priv =
7527                 to_i915(crtc_state->uapi.crtc->dev);
7528         struct intel_atomic_state *intel_state =
7529                 to_intel_atomic_state(crtc_state->uapi.state);
7530
7531         if (!hsw_crtc_state_ips_capable(crtc_state))
7532                 return false;
7533
7534         /*
7535          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7536          * enabled and disabled dynamically based on package C states,
7537          * user space can't make reliable use of the CRCs, so let's just
7538          * completely disable it.
7539          */
7540         if (crtc_state->crc_enabled)
7541                 return false;
7542
7543         /* IPS should be fine as long as at least one plane is enabled. */
7544         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7545                 return false;
7546
7547         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7548         if (IS_BROADWELL(dev_priv) &&
7549             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7550                 return false;
7551
7552         return true;
7553 }
7554
7555 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7556 {
7557         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7558
7559         /* GDG double wide on either pipe, otherwise pipe A only */
7560         return INTEL_GEN(dev_priv) < 4 &&
7561                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7562 }
7563
7564 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7565 {
7566         u32 pixel_rate;
7567
7568         pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7569
7570         /*
7571          * We only use IF-ID interlacing. If we ever use
7572          * PF-ID we'll need to adjust the pixel_rate here.
7573          */
7574
7575         if (pipe_config->pch_pfit.enabled) {
7576                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7577                 u32 pfit_size = pipe_config->pch_pfit.size;
7578
7579                 pipe_w = pipe_config->pipe_src_w;
7580                 pipe_h = pipe_config->pipe_src_h;
7581
7582                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7583                 pfit_h = pfit_size & 0xFFFF;
7584                 if (pipe_w < pfit_w)
7585                         pipe_w = pfit_w;
7586                 if (pipe_h < pfit_h)
7587                         pipe_h = pfit_h;
7588
7589                 if (WARN_ON(!pfit_w || !pfit_h))
7590                         return pixel_rate;
7591
7592                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7593                                      pfit_w * pfit_h);
7594         }
7595
7596         return pixel_rate;
7597 }
7598
7599 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7600 {
7601         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7602
7603         if (HAS_GMCH(dev_priv))
7604                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7605                 crtc_state->pixel_rate =
7606                         crtc_state->hw.adjusted_mode.crtc_clock;
7607         else
7608                 crtc_state->pixel_rate =
7609                         ilk_pipe_pixel_rate(crtc_state);
7610 }
7611
7612 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7613                                      struct intel_crtc_state *pipe_config)
7614 {
7615         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7616         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7617         int clock_limit = dev_priv->max_dotclk_freq;
7618
7619         if (INTEL_GEN(dev_priv) < 4) {
7620                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7621
7622                 /*
7623                  * Enable double wide mode when the dot clock
7624                  * is > 90% of the (display) core speed.
7625                  */
7626                 if (intel_crtc_supports_double_wide(crtc) &&
7627                     adjusted_mode->crtc_clock > clock_limit) {
7628                         clock_limit = dev_priv->max_dotclk_freq;
7629                         pipe_config->double_wide = true;
7630                 }
7631         }
7632
7633         if (adjusted_mode->crtc_clock > clock_limit) {
7634                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7635                               adjusted_mode->crtc_clock, clock_limit,
7636                               yesno(pipe_config->double_wide));
7637                 return -EINVAL;
7638         }
7639
7640         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7641              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7642              pipe_config->hw.ctm) {
7643                 /*
7644                  * There is only one pipe CSC unit per pipe, and we need that
7645                  * for output conversion from RGB->YCBCR. So if CTM is already
7646                  * applied we can't support YCBCR420 output.
7647                  */
7648                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7649                 return -EINVAL;
7650         }
7651
7652         /*
7653          * Pipe horizontal size must be even in:
7654          * - DVO ganged mode
7655          * - LVDS dual channel mode
7656          * - Double wide pipe
7657          */
7658         if (pipe_config->pipe_src_w & 1) {
7659                 if (pipe_config->double_wide) {
7660                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7661                         return -EINVAL;
7662                 }
7663
7664                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7665                     intel_is_dual_link_lvds(dev_priv)) {
7666                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7667                         return -EINVAL;
7668                 }
7669         }
7670
7671         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7672          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7673          */
7674         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7675                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7676                 return -EINVAL;
7677
7678         intel_crtc_compute_pixel_rate(pipe_config);
7679
7680         if (pipe_config->has_pch_encoder)
7681                 return ironlake_fdi_compute_config(crtc, pipe_config);
7682
7683         return 0;
7684 }
7685
7686 static void
7687 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7688 {
7689         while (*num > DATA_LINK_M_N_MASK ||
7690                *den > DATA_LINK_M_N_MASK) {
7691                 *num >>= 1;
7692                 *den >>= 1;
7693         }
7694 }
7695
7696 static void compute_m_n(unsigned int m, unsigned int n,
7697                         u32 *ret_m, u32 *ret_n,
7698                         bool constant_n)
7699 {
7700         /*
7701          * Several DP dongles in particular seem to be fussy about
7702          * too large link M/N values. Give N value as 0x8000 that
7703          * should be acceptable by specific devices. 0x8000 is the
7704          * specified fixed N value for asynchronous clock mode,
7705          * which the devices expect also in synchronous clock mode.
7706          */
7707         if (constant_n)
7708                 *ret_n = 0x8000;
7709         else
7710                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7711
7712         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7713         intel_reduce_m_n_ratio(ret_m, ret_n);
7714 }
7715
7716 void
7717 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7718                        int pixel_clock, int link_clock,
7719                        struct intel_link_m_n *m_n,
7720                        bool constant_n, bool fec_enable)
7721 {
7722         u32 data_clock = bits_per_pixel * pixel_clock;
7723
7724         if (fec_enable)
7725                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7726
7727         m_n->tu = 64;
7728         compute_m_n(data_clock,
7729                     link_clock * nlanes * 8,
7730                     &m_n->gmch_m, &m_n->gmch_n,
7731                     constant_n);
7732
7733         compute_m_n(pixel_clock, link_clock,
7734                     &m_n->link_m, &m_n->link_n,
7735                     constant_n);
7736 }
7737
7738 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7739 {
7740         /*
7741          * There may be no VBT; and if the BIOS enabled SSC we can
7742          * just keep using it to avoid unnecessary flicker.  Whereas if the
7743          * BIOS isn't using it, don't assume it will work even if the VBT
7744          * indicates as much.
7745          */
7746         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7747                 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
7748                         DREF_SSC1_ENABLE;
7749
7750                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7751                         DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
7752                                       enableddisabled(bios_lvds_use_ssc),
7753                                       enableddisabled(dev_priv->vbt.lvds_use_ssc));
7754                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7755                 }
7756         }
7757 }
7758
7759 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7760 {
7761         if (i915_modparams.panel_use_ssc >= 0)
7762                 return i915_modparams.panel_use_ssc != 0;
7763         return dev_priv->vbt.lvds_use_ssc
7764                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7765 }
7766
7767 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7768 {
7769         return (1 << dpll->n) << 16 | dpll->m2;
7770 }
7771
7772 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7773 {
7774         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7775 }
7776
7777 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7778                                      struct intel_crtc_state *crtc_state,
7779                                      struct dpll *reduced_clock)
7780 {
7781         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7782         u32 fp, fp2 = 0;
7783
7784         if (IS_PINEVIEW(dev_priv)) {
7785                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7786                 if (reduced_clock)
7787                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7788         } else {
7789                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7790                 if (reduced_clock)
7791                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7792         }
7793
7794         crtc_state->dpll_hw_state.fp0 = fp;
7795
7796         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7797             reduced_clock) {
7798                 crtc_state->dpll_hw_state.fp1 = fp2;
7799         } else {
7800                 crtc_state->dpll_hw_state.fp1 = fp;
7801         }
7802 }
7803
7804 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7805                 pipe)
7806 {
7807         u32 reg_val;
7808
7809         /*
7810          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7811          * and set it to a reasonable value instead.
7812          */
7813         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7814         reg_val &= 0xffffff00;
7815         reg_val |= 0x00000030;
7816         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7817
7818         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7819         reg_val &= 0x00ffffff;
7820         reg_val |= 0x8c000000;
7821         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7822
7823         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7824         reg_val &= 0xffffff00;
7825         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7826
7827         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7828         reg_val &= 0x00ffffff;
7829         reg_val |= 0xb0000000;
7830         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7831 }
7832
7833 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7834                                          const struct intel_link_m_n *m_n)
7835 {
7836         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7837         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7838         enum pipe pipe = crtc->pipe;
7839
7840         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7841         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7842         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7843         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7844 }
7845
7846 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7847                                  enum transcoder transcoder)
7848 {
7849         if (IS_HASWELL(dev_priv))
7850                 return transcoder == TRANSCODER_EDP;
7851
7852         /*
7853          * Strictly speaking some registers are available before
7854          * gen7, but we only support DRRS on gen7+
7855          */
7856         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7857 }
7858
7859 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7860                                          const struct intel_link_m_n *m_n,
7861                                          const struct intel_link_m_n *m2_n2)
7862 {
7863         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7864         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7865         enum pipe pipe = crtc->pipe;
7866         enum transcoder transcoder = crtc_state->cpu_transcoder;
7867
7868         if (INTEL_GEN(dev_priv) >= 5) {
7869                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7870                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7871                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7872                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7873                 /*
7874                  *  M2_N2 registers are set only if DRRS is supported
7875                  * (to make sure the registers are not unnecessarily accessed).
7876                  */
7877                 if (m2_n2 && crtc_state->has_drrs &&
7878                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7879                         I915_WRITE(PIPE_DATA_M2(transcoder),
7880                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7881                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7882                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7883                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7884                 }
7885         } else {
7886                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7887                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7888                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7889                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7890         }
7891 }
7892
7893 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7894 {
7895         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7896
7897         if (m_n == M1_N1) {
7898                 dp_m_n = &crtc_state->dp_m_n;
7899                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7900         } else if (m_n == M2_N2) {
7901
7902                 /*
7903                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7904                  * needs to be programmed into M1_N1.
7905                  */
7906                 dp_m_n = &crtc_state->dp_m2_n2;
7907         } else {
7908                 DRM_ERROR("Unsupported divider value\n");
7909                 return;
7910         }
7911
7912         if (crtc_state->has_pch_encoder)
7913                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7914         else
7915                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7916 }
7917
7918 static void vlv_compute_dpll(struct intel_crtc *crtc,
7919                              struct intel_crtc_state *pipe_config)
7920 {
7921         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7922                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7923         if (crtc->pipe != PIPE_A)
7924                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7925
7926         /* DPLL not used with DSI, but still need the rest set up */
7927         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7928                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7929                         DPLL_EXT_BUFFER_ENABLE_VLV;
7930
7931         pipe_config->dpll_hw_state.dpll_md =
7932                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7933 }
7934
7935 static void chv_compute_dpll(struct intel_crtc *crtc,
7936                              struct intel_crtc_state *pipe_config)
7937 {
7938         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7939                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7940         if (crtc->pipe != PIPE_A)
7941                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7942
7943         /* DPLL not used with DSI, but still need the rest set up */
7944         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7945                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7946
7947         pipe_config->dpll_hw_state.dpll_md =
7948                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7949 }
7950
7951 static void vlv_prepare_pll(struct intel_crtc *crtc,
7952                             const struct intel_crtc_state *pipe_config)
7953 {
7954         struct drm_device *dev = crtc->base.dev;
7955         struct drm_i915_private *dev_priv = to_i915(dev);
7956         enum pipe pipe = crtc->pipe;
7957         u32 mdiv;
7958         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7959         u32 coreclk, reg_val;
7960
7961         /* Enable Refclk */
7962         I915_WRITE(DPLL(pipe),
7963                    pipe_config->dpll_hw_state.dpll &
7964                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7965
7966         /* No need to actually set up the DPLL with DSI */
7967         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7968                 return;
7969
7970         vlv_dpio_get(dev_priv);
7971
7972         bestn = pipe_config->dpll.n;
7973         bestm1 = pipe_config->dpll.m1;
7974         bestm2 = pipe_config->dpll.m2;
7975         bestp1 = pipe_config->dpll.p1;
7976         bestp2 = pipe_config->dpll.p2;
7977
7978         /* See eDP HDMI DPIO driver vbios notes doc */
7979
7980         /* PLL B needs special handling */
7981         if (pipe == PIPE_B)
7982                 vlv_pllb_recal_opamp(dev_priv, pipe);
7983
7984         /* Set up Tx target for periodic Rcomp update */
7985         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7986
7987         /* Disable target IRef on PLL */
7988         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7989         reg_val &= 0x00ffffff;
7990         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7991
7992         /* Disable fast lock */
7993         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7994
7995         /* Set idtafcrecal before PLL is enabled */
7996         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7997         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7998         mdiv |= ((bestn << DPIO_N_SHIFT));
7999         mdiv |= (1 << DPIO_K_SHIFT);
8000
8001         /*
8002          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8003          * but we don't support that).
8004          * Note: don't use the DAC post divider as it seems unstable.
8005          */
8006         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8007         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8008
8009         mdiv |= DPIO_ENABLE_CALIBRATION;
8010         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8011
8012         /* Set HBR and RBR LPF coefficients */
8013         if (pipe_config->port_clock == 162000 ||
8014             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8015             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8016                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8017                                  0x009f0003);
8018         else
8019                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8020                                  0x00d0000f);
8021
8022         if (intel_crtc_has_dp_encoder(pipe_config)) {
8023                 /* Use SSC source */
8024                 if (pipe == PIPE_A)
8025                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8026                                          0x0df40000);
8027                 else
8028                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8029                                          0x0df70000);
8030         } else { /* HDMI or VGA */
8031                 /* Use bend source */
8032                 if (pipe == PIPE_A)
8033                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8034                                          0x0df70000);
8035                 else
8036                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8037                                          0x0df40000);
8038         }
8039
8040         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8041         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8042         if (intel_crtc_has_dp_encoder(pipe_config))
8043                 coreclk |= 0x01000000;
8044         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8045
8046         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8047
8048         vlv_dpio_put(dev_priv);
8049 }
8050
8051 static void chv_prepare_pll(struct intel_crtc *crtc,
8052                             const struct intel_crtc_state *pipe_config)
8053 {
8054         struct drm_device *dev = crtc->base.dev;
8055         struct drm_i915_private *dev_priv = to_i915(dev);
8056         enum pipe pipe = crtc->pipe;
8057         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8058         u32 loopfilter, tribuf_calcntr;
8059         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8060         u32 dpio_val;
8061         int vco;
8062
8063         /* Enable Refclk and SSC */
8064         I915_WRITE(DPLL(pipe),
8065                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8066
8067         /* No need to actually set up the DPLL with DSI */
8068         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8069                 return;
8070
8071         bestn = pipe_config->dpll.n;
8072         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8073         bestm1 = pipe_config->dpll.m1;
8074         bestm2 = pipe_config->dpll.m2 >> 22;
8075         bestp1 = pipe_config->dpll.p1;
8076         bestp2 = pipe_config->dpll.p2;
8077         vco = pipe_config->dpll.vco;
8078         dpio_val = 0;
8079         loopfilter = 0;
8080
8081         vlv_dpio_get(dev_priv);
8082
8083         /* p1 and p2 divider */
8084         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8085                         5 << DPIO_CHV_S1_DIV_SHIFT |
8086                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8087                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8088                         1 << DPIO_CHV_K_DIV_SHIFT);
8089
8090         /* Feedback post-divider - m2 */
8091         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8092
8093         /* Feedback refclk divider - n and m1 */
8094         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8095                         DPIO_CHV_M1_DIV_BY_2 |
8096                         1 << DPIO_CHV_N_DIV_SHIFT);
8097
8098         /* M2 fraction division */
8099         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8100
8101         /* M2 fraction division enable */
8102         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8103         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8104         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8105         if (bestm2_frac)
8106                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8107         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8108
8109         /* Program digital lock detect threshold */
8110         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8111         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8112                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8113         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8114         if (!bestm2_frac)
8115                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8116         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8117
8118         /* Loop filter */
8119         if (vco == 5400000) {
8120                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8121                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8122                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8123                 tribuf_calcntr = 0x9;
8124         } else if (vco <= 6200000) {
8125                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8126                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8127                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8128                 tribuf_calcntr = 0x9;
8129         } else if (vco <= 6480000) {
8130                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8131                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8132                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8133                 tribuf_calcntr = 0x8;
8134         } else {
8135                 /* Not supported. Apply the same limits as in the max case */
8136                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8137                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8138                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8139                 tribuf_calcntr = 0;
8140         }
8141         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8142
8143         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8144         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8145         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8146         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8147
8148         /* AFC Recal */
8149         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8150                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8151                         DPIO_AFC_RECAL);
8152
8153         vlv_dpio_put(dev_priv);
8154 }
8155
8156 /**
8157  * vlv_force_pll_on - forcibly enable just the PLL
8158  * @dev_priv: i915 private structure
8159  * @pipe: pipe PLL to enable
8160  * @dpll: PLL configuration
8161  *
8162  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8163  * in cases where we need the PLL enabled even when @pipe is not going to
8164  * be enabled.
8165  */
8166 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8167                      const struct dpll *dpll)
8168 {
8169         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8170         struct intel_crtc_state *pipe_config;
8171
8172         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8173         if (!pipe_config)
8174                 return -ENOMEM;
8175
8176         pipe_config->uapi.crtc = &crtc->base;
8177         pipe_config->pixel_multiplier = 1;
8178         pipe_config->dpll = *dpll;
8179
8180         if (IS_CHERRYVIEW(dev_priv)) {
8181                 chv_compute_dpll(crtc, pipe_config);
8182                 chv_prepare_pll(crtc, pipe_config);
8183                 chv_enable_pll(crtc, pipe_config);
8184         } else {
8185                 vlv_compute_dpll(crtc, pipe_config);
8186                 vlv_prepare_pll(crtc, pipe_config);
8187                 vlv_enable_pll(crtc, pipe_config);
8188         }
8189
8190         kfree(pipe_config);
8191
8192         return 0;
8193 }
8194
8195 /**
8196  * vlv_force_pll_off - forcibly disable just the PLL
8197  * @dev_priv: i915 private structure
8198  * @pipe: pipe PLL to disable
8199  *
8200  * Disable the PLL for @pipe. To be used in cases where we need
8201  * the PLL enabled even when @pipe is not going to be enabled.
8202  */
8203 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8204 {
8205         if (IS_CHERRYVIEW(dev_priv))
8206                 chv_disable_pll(dev_priv, pipe);
8207         else
8208                 vlv_disable_pll(dev_priv, pipe);
8209 }
8210
8211 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8212                               struct intel_crtc_state *crtc_state,
8213                               struct dpll *reduced_clock)
8214 {
8215         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8216         u32 dpll;
8217         struct dpll *clock = &crtc_state->dpll;
8218
8219         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8220
8221         dpll = DPLL_VGA_MODE_DIS;
8222
8223         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8224                 dpll |= DPLLB_MODE_LVDS;
8225         else
8226                 dpll |= DPLLB_MODE_DAC_SERIAL;
8227
8228         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8229             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8230                 dpll |= (crtc_state->pixel_multiplier - 1)
8231                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8232         }
8233
8234         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8235             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8236                 dpll |= DPLL_SDVO_HIGH_SPEED;
8237
8238         if (intel_crtc_has_dp_encoder(crtc_state))
8239                 dpll |= DPLL_SDVO_HIGH_SPEED;
8240
8241         /* compute bitmask from p1 value */
8242         if (IS_PINEVIEW(dev_priv))
8243                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8244         else {
8245                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8246                 if (IS_G4X(dev_priv) && reduced_clock)
8247                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8248         }
8249         switch (clock->p2) {
8250         case 5:
8251                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8252                 break;
8253         case 7:
8254                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8255                 break;
8256         case 10:
8257                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8258                 break;
8259         case 14:
8260                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8261                 break;
8262         }
8263         if (INTEL_GEN(dev_priv) >= 4)
8264                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8265
8266         if (crtc_state->sdvo_tv_clock)
8267                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8268         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8269                  intel_panel_use_ssc(dev_priv))
8270                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8271         else
8272                 dpll |= PLL_REF_INPUT_DREFCLK;
8273
8274         dpll |= DPLL_VCO_ENABLE;
8275         crtc_state->dpll_hw_state.dpll = dpll;
8276
8277         if (INTEL_GEN(dev_priv) >= 4) {
8278                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8279                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8280                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8281         }
8282 }
8283
8284 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8285                               struct intel_crtc_state *crtc_state,
8286                               struct dpll *reduced_clock)
8287 {
8288         struct drm_device *dev = crtc->base.dev;
8289         struct drm_i915_private *dev_priv = to_i915(dev);
8290         u32 dpll;
8291         struct dpll *clock = &crtc_state->dpll;
8292
8293         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8294
8295         dpll = DPLL_VGA_MODE_DIS;
8296
8297         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8298                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8299         } else {
8300                 if (clock->p1 == 2)
8301                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8302                 else
8303                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8304                 if (clock->p2 == 4)
8305                         dpll |= PLL_P2_DIVIDE_BY_4;
8306         }
8307
8308         /*
8309          * Bspec:
8310          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8311          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8312          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8313          *  Enable) must be set to “1” in both the DPLL A Control Register
8314          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8315          *
8316          * For simplicity We simply keep both bits always enabled in
8317          * both DPLLS. The spec says we should disable the DVO 2X clock
8318          * when not needed, but this seems to work fine in practice.
8319          */
8320         if (IS_I830(dev_priv) ||
8321             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8322                 dpll |= DPLL_DVO_2X_MODE;
8323
8324         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8325             intel_panel_use_ssc(dev_priv))
8326                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8327         else
8328                 dpll |= PLL_REF_INPUT_DREFCLK;
8329
8330         dpll |= DPLL_VCO_ENABLE;
8331         crtc_state->dpll_hw_state.dpll = dpll;
8332 }
8333
8334 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8335 {
8336         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8337         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8338         enum pipe pipe = crtc->pipe;
8339         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8340         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8341         u32 crtc_vtotal, crtc_vblank_end;
8342         int vsyncshift = 0;
8343
8344         /* We need to be careful not to changed the adjusted mode, for otherwise
8345          * the hw state checker will get angry at the mismatch. */
8346         crtc_vtotal = adjusted_mode->crtc_vtotal;
8347         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8348
8349         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8350                 /* the chip adds 2 halflines automatically */
8351                 crtc_vtotal -= 1;
8352                 crtc_vblank_end -= 1;
8353
8354                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8355                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8356                 else
8357                         vsyncshift = adjusted_mode->crtc_hsync_start -
8358                                 adjusted_mode->crtc_htotal / 2;
8359                 if (vsyncshift < 0)
8360                         vsyncshift += adjusted_mode->crtc_htotal;
8361         }
8362
8363         if (INTEL_GEN(dev_priv) > 3)
8364                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8365
8366         I915_WRITE(HTOTAL(cpu_transcoder),
8367                    (adjusted_mode->crtc_hdisplay - 1) |
8368                    ((adjusted_mode->crtc_htotal - 1) << 16));
8369         I915_WRITE(HBLANK(cpu_transcoder),
8370                    (adjusted_mode->crtc_hblank_start - 1) |
8371                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
8372         I915_WRITE(HSYNC(cpu_transcoder),
8373                    (adjusted_mode->crtc_hsync_start - 1) |
8374                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
8375
8376         I915_WRITE(VTOTAL(cpu_transcoder),
8377                    (adjusted_mode->crtc_vdisplay - 1) |
8378                    ((crtc_vtotal - 1) << 16));
8379         I915_WRITE(VBLANK(cpu_transcoder),
8380                    (adjusted_mode->crtc_vblank_start - 1) |
8381                    ((crtc_vblank_end - 1) << 16));
8382         I915_WRITE(VSYNC(cpu_transcoder),
8383                    (adjusted_mode->crtc_vsync_start - 1) |
8384                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
8385
8386         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8387          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8388          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8389          * bits. */
8390         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8391             (pipe == PIPE_B || pipe == PIPE_C))
8392                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8393
8394 }
8395
8396 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8397 {
8398         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8399         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8400         enum pipe pipe = crtc->pipe;
8401
8402         /* pipesrc controls the size that is scaled from, which should
8403          * always be the user's requested size.
8404          */
8405         I915_WRITE(PIPESRC(pipe),
8406                    ((crtc_state->pipe_src_w - 1) << 16) |
8407                    (crtc_state->pipe_src_h - 1));
8408 }
8409
8410 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8411 {
8412         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8413         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8414
8415         if (IS_GEN(dev_priv, 2))
8416                 return false;
8417
8418         if (INTEL_GEN(dev_priv) >= 9 ||
8419             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8420                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8421         else
8422                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8423 }
8424
8425 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8426                                    struct intel_crtc_state *pipe_config)
8427 {
8428         struct drm_device *dev = crtc->base.dev;
8429         struct drm_i915_private *dev_priv = to_i915(dev);
8430         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8431         u32 tmp;
8432
8433         tmp = I915_READ(HTOTAL(cpu_transcoder));
8434         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8435         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8436
8437         if (!transcoder_is_dsi(cpu_transcoder)) {
8438                 tmp = I915_READ(HBLANK(cpu_transcoder));
8439                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8440                                                         (tmp & 0xffff) + 1;
8441                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8442                                                 ((tmp >> 16) & 0xffff) + 1;
8443         }
8444         tmp = I915_READ(HSYNC(cpu_transcoder));
8445         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8446         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8447
8448         tmp = I915_READ(VTOTAL(cpu_transcoder));
8449         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8450         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8451
8452         if (!transcoder_is_dsi(cpu_transcoder)) {
8453                 tmp = I915_READ(VBLANK(cpu_transcoder));
8454                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8455                                                         (tmp & 0xffff) + 1;
8456                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8457                                                 ((tmp >> 16) & 0xffff) + 1;
8458         }
8459         tmp = I915_READ(VSYNC(cpu_transcoder));
8460         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8461         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8462
8463         if (intel_pipe_is_interlaced(pipe_config)) {
8464                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8465                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8466                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8467         }
8468 }
8469
8470 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8471                                     struct intel_crtc_state *pipe_config)
8472 {
8473         struct drm_device *dev = crtc->base.dev;
8474         struct drm_i915_private *dev_priv = to_i915(dev);
8475         u32 tmp;
8476
8477         tmp = I915_READ(PIPESRC(crtc->pipe));
8478         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8479         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8480
8481         pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8482         pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8483 }
8484
8485 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8486                                  struct intel_crtc_state *pipe_config)
8487 {
8488         mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8489         mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8490         mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8491         mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8492
8493         mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8494         mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8495         mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8496         mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8497
8498         mode->flags = pipe_config->hw.adjusted_mode.flags;
8499         mode->type = DRM_MODE_TYPE_DRIVER;
8500
8501         mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8502
8503         mode->hsync = drm_mode_hsync(mode);
8504         mode->vrefresh = drm_mode_vrefresh(mode);
8505         drm_mode_set_name(mode);
8506 }
8507
8508 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8509 {
8510         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8511         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8512         u32 pipeconf;
8513
8514         pipeconf = 0;
8515
8516         /* we keep both pipes enabled on 830 */
8517         if (IS_I830(dev_priv))
8518                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8519
8520         if (crtc_state->double_wide)
8521                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8522
8523         /* only g4x and later have fancy bpc/dither controls */
8524         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8525             IS_CHERRYVIEW(dev_priv)) {
8526                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8527                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8528                         pipeconf |= PIPECONF_DITHER_EN |
8529                                     PIPECONF_DITHER_TYPE_SP;
8530
8531                 switch (crtc_state->pipe_bpp) {
8532                 case 18:
8533                         pipeconf |= PIPECONF_6BPC;
8534                         break;
8535                 case 24:
8536                         pipeconf |= PIPECONF_8BPC;
8537                         break;
8538                 case 30:
8539                         pipeconf |= PIPECONF_10BPC;
8540                         break;
8541                 default:
8542                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8543                         BUG();
8544                 }
8545         }
8546
8547         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8548                 if (INTEL_GEN(dev_priv) < 4 ||
8549                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8550                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8551                 else
8552                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8553         } else {
8554                 pipeconf |= PIPECONF_PROGRESSIVE;
8555         }
8556
8557         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8558              crtc_state->limited_color_range)
8559                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8560
8561         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8562
8563         pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8564
8565         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8566         POSTING_READ(PIPECONF(crtc->pipe));
8567 }
8568
8569 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8570                                    struct intel_crtc_state *crtc_state)
8571 {
8572         struct drm_device *dev = crtc->base.dev;
8573         struct drm_i915_private *dev_priv = to_i915(dev);
8574         const struct intel_limit *limit;
8575         int refclk = 48000;
8576
8577         memset(&crtc_state->dpll_hw_state, 0,
8578                sizeof(crtc_state->dpll_hw_state));
8579
8580         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8581                 if (intel_panel_use_ssc(dev_priv)) {
8582                         refclk = dev_priv->vbt.lvds_ssc_freq;
8583                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8584                 }
8585
8586                 limit = &intel_limits_i8xx_lvds;
8587         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8588                 limit = &intel_limits_i8xx_dvo;
8589         } else {
8590                 limit = &intel_limits_i8xx_dac;
8591         }
8592
8593         if (!crtc_state->clock_set &&
8594             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8595                                  refclk, NULL, &crtc_state->dpll)) {
8596                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8597                 return -EINVAL;
8598         }
8599
8600         i8xx_compute_dpll(crtc, crtc_state, NULL);
8601
8602         return 0;
8603 }
8604
8605 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8606                                   struct intel_crtc_state *crtc_state)
8607 {
8608         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8609         const struct intel_limit *limit;
8610         int refclk = 96000;
8611
8612         memset(&crtc_state->dpll_hw_state, 0,
8613                sizeof(crtc_state->dpll_hw_state));
8614
8615         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8616                 if (intel_panel_use_ssc(dev_priv)) {
8617                         refclk = dev_priv->vbt.lvds_ssc_freq;
8618                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8619                 }
8620
8621                 if (intel_is_dual_link_lvds(dev_priv))
8622                         limit = &intel_limits_g4x_dual_channel_lvds;
8623                 else
8624                         limit = &intel_limits_g4x_single_channel_lvds;
8625         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8626                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8627                 limit = &intel_limits_g4x_hdmi;
8628         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8629                 limit = &intel_limits_g4x_sdvo;
8630         } else {
8631                 /* The option is for other outputs */
8632                 limit = &intel_limits_i9xx_sdvo;
8633         }
8634
8635         if (!crtc_state->clock_set &&
8636             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8637                                 refclk, NULL, &crtc_state->dpll)) {
8638                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8639                 return -EINVAL;
8640         }
8641
8642         i9xx_compute_dpll(crtc, crtc_state, NULL);
8643
8644         return 0;
8645 }
8646
8647 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8648                                   struct intel_crtc_state *crtc_state)
8649 {
8650         struct drm_device *dev = crtc->base.dev;
8651         struct drm_i915_private *dev_priv = to_i915(dev);
8652         const struct intel_limit *limit;
8653         int refclk = 96000;
8654
8655         memset(&crtc_state->dpll_hw_state, 0,
8656                sizeof(crtc_state->dpll_hw_state));
8657
8658         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8659                 if (intel_panel_use_ssc(dev_priv)) {
8660                         refclk = dev_priv->vbt.lvds_ssc_freq;
8661                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8662                 }
8663
8664                 limit = &intel_limits_pineview_lvds;
8665         } else {
8666                 limit = &intel_limits_pineview_sdvo;
8667         }
8668
8669         if (!crtc_state->clock_set &&
8670             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8671                                 refclk, NULL, &crtc_state->dpll)) {
8672                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8673                 return -EINVAL;
8674         }
8675
8676         i9xx_compute_dpll(crtc, crtc_state, NULL);
8677
8678         return 0;
8679 }
8680
8681 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8682                                    struct intel_crtc_state *crtc_state)
8683 {
8684         struct drm_device *dev = crtc->base.dev;
8685         struct drm_i915_private *dev_priv = to_i915(dev);
8686         const struct intel_limit *limit;
8687         int refclk = 96000;
8688
8689         memset(&crtc_state->dpll_hw_state, 0,
8690                sizeof(crtc_state->dpll_hw_state));
8691
8692         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8693                 if (intel_panel_use_ssc(dev_priv)) {
8694                         refclk = dev_priv->vbt.lvds_ssc_freq;
8695                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8696                 }
8697
8698                 limit = &intel_limits_i9xx_lvds;
8699         } else {
8700                 limit = &intel_limits_i9xx_sdvo;
8701         }
8702
8703         if (!crtc_state->clock_set &&
8704             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8705                                  refclk, NULL, &crtc_state->dpll)) {
8706                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8707                 return -EINVAL;
8708         }
8709
8710         i9xx_compute_dpll(crtc, crtc_state, NULL);
8711
8712         return 0;
8713 }
8714
8715 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8716                                   struct intel_crtc_state *crtc_state)
8717 {
8718         int refclk = 100000;
8719         const struct intel_limit *limit = &intel_limits_chv;
8720
8721         memset(&crtc_state->dpll_hw_state, 0,
8722                sizeof(crtc_state->dpll_hw_state));
8723
8724         if (!crtc_state->clock_set &&
8725             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8726                                 refclk, NULL, &crtc_state->dpll)) {
8727                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8728                 return -EINVAL;
8729         }
8730
8731         chv_compute_dpll(crtc, crtc_state);
8732
8733         return 0;
8734 }
8735
8736 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8737                                   struct intel_crtc_state *crtc_state)
8738 {
8739         int refclk = 100000;
8740         const struct intel_limit *limit = &intel_limits_vlv;
8741
8742         memset(&crtc_state->dpll_hw_state, 0,
8743                sizeof(crtc_state->dpll_hw_state));
8744
8745         if (!crtc_state->clock_set &&
8746             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8747                                 refclk, NULL, &crtc_state->dpll)) {
8748                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8749                 return -EINVAL;
8750         }
8751
8752         vlv_compute_dpll(crtc, crtc_state);
8753
8754         return 0;
8755 }
8756
8757 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8758 {
8759         if (IS_I830(dev_priv))
8760                 return false;
8761
8762         return INTEL_GEN(dev_priv) >= 4 ||
8763                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8764 }
8765
8766 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8767                                  struct intel_crtc_state *pipe_config)
8768 {
8769         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8770         u32 tmp;
8771
8772         if (!i9xx_has_pfit(dev_priv))
8773                 return;
8774
8775         tmp = I915_READ(PFIT_CONTROL);
8776         if (!(tmp & PFIT_ENABLE))
8777                 return;
8778
8779         /* Check whether the pfit is attached to our pipe. */
8780         if (INTEL_GEN(dev_priv) < 4) {
8781                 if (crtc->pipe != PIPE_B)
8782                         return;
8783         } else {
8784                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8785                         return;
8786         }
8787
8788         pipe_config->gmch_pfit.control = tmp;
8789         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8790 }
8791
8792 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8793                                struct intel_crtc_state *pipe_config)
8794 {
8795         struct drm_device *dev = crtc->base.dev;
8796         struct drm_i915_private *dev_priv = to_i915(dev);
8797         enum pipe pipe = crtc->pipe;
8798         struct dpll clock;
8799         u32 mdiv;
8800         int refclk = 100000;
8801
8802         /* In case of DSI, DPLL will not be used */
8803         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8804                 return;
8805
8806         vlv_dpio_get(dev_priv);
8807         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8808         vlv_dpio_put(dev_priv);
8809
8810         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8811         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8812         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8813         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8814         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8815
8816         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8817 }
8818
8819 static void
8820 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8821                               struct intel_initial_plane_config *plane_config)
8822 {
8823         struct drm_device *dev = crtc->base.dev;
8824         struct drm_i915_private *dev_priv = to_i915(dev);
8825         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8826         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8827         enum pipe pipe;
8828         u32 val, base, offset;
8829         int fourcc, pixel_format;
8830         unsigned int aligned_height;
8831         struct drm_framebuffer *fb;
8832         struct intel_framebuffer *intel_fb;
8833
8834         if (!plane->get_hw_state(plane, &pipe))
8835                 return;
8836
8837         WARN_ON(pipe != crtc->pipe);
8838
8839         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8840         if (!intel_fb) {
8841                 DRM_DEBUG_KMS("failed to alloc fb\n");
8842                 return;
8843         }
8844
8845         fb = &intel_fb->base;
8846
8847         fb->dev = dev;
8848
8849         val = I915_READ(DSPCNTR(i9xx_plane));
8850
8851         if (INTEL_GEN(dev_priv) >= 4) {
8852                 if (val & DISPPLANE_TILED) {
8853                         plane_config->tiling = I915_TILING_X;
8854                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8855                 }
8856
8857                 if (val & DISPPLANE_ROTATE_180)
8858                         plane_config->rotation = DRM_MODE_ROTATE_180;
8859         }
8860
8861         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8862             val & DISPPLANE_MIRROR)
8863                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8864
8865         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8866         fourcc = i9xx_format_to_fourcc(pixel_format);
8867         fb->format = drm_format_info(fourcc);
8868
8869         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8870                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8871                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8872         } else if (INTEL_GEN(dev_priv) >= 4) {
8873                 if (plane_config->tiling)
8874                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8875                 else
8876                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8877                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8878         } else {
8879                 base = I915_READ(DSPADDR(i9xx_plane));
8880         }
8881         plane_config->base = base;
8882
8883         val = I915_READ(PIPESRC(pipe));
8884         fb->width = ((val >> 16) & 0xfff) + 1;
8885         fb->height = ((val >> 0) & 0xfff) + 1;
8886
8887         val = I915_READ(DSPSTRIDE(i9xx_plane));
8888         fb->pitches[0] = val & 0xffffffc0;
8889
8890         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8891
8892         plane_config->size = fb->pitches[0] * aligned_height;
8893
8894         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8895                       crtc->base.name, plane->base.name, fb->width, fb->height,
8896                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8897                       plane_config->size);
8898
8899         plane_config->fb = intel_fb;
8900 }
8901
8902 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8903                                struct intel_crtc_state *pipe_config)
8904 {
8905         struct drm_device *dev = crtc->base.dev;
8906         struct drm_i915_private *dev_priv = to_i915(dev);
8907         enum pipe pipe = crtc->pipe;
8908         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8909         struct dpll clock;
8910         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8911         int refclk = 100000;
8912
8913         /* In case of DSI, DPLL will not be used */
8914         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8915                 return;
8916
8917         vlv_dpio_get(dev_priv);
8918         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8919         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8920         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8921         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8922         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8923         vlv_dpio_put(dev_priv);
8924
8925         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8926         clock.m2 = (pll_dw0 & 0xff) << 22;
8927         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8928                 clock.m2 |= pll_dw2 & 0x3fffff;
8929         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8930         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8931         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8932
8933         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8934 }
8935
8936 static enum intel_output_format
8937 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
8938 {
8939         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8940         u32 tmp;
8941
8942         tmp = I915_READ(PIPEMISC(crtc->pipe));
8943
8944         if (tmp & PIPEMISC_YUV420_ENABLE) {
8945                 /* We support 4:2:0 in full blend mode only */
8946                 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
8947
8948                 return INTEL_OUTPUT_FORMAT_YCBCR420;
8949         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8950                 return INTEL_OUTPUT_FORMAT_YCBCR444;
8951         } else {
8952                 return INTEL_OUTPUT_FORMAT_RGB;
8953         }
8954 }
8955
8956 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8957 {
8958         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8959         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8960         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8961         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8962         u32 tmp;
8963
8964         tmp = I915_READ(DSPCNTR(i9xx_plane));
8965
8966         if (tmp & DISPPLANE_GAMMA_ENABLE)
8967                 crtc_state->gamma_enable = true;
8968
8969         if (!HAS_GMCH(dev_priv) &&
8970             tmp & DISPPLANE_PIPE_CSC_ENABLE)
8971                 crtc_state->csc_enable = true;
8972 }
8973
8974 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8975                                  struct intel_crtc_state *pipe_config)
8976 {
8977         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8978         enum intel_display_power_domain power_domain;
8979         intel_wakeref_t wakeref;
8980         u32 tmp;
8981         bool ret;
8982
8983         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8984         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8985         if (!wakeref)
8986                 return false;
8987
8988         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8989         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8990         pipe_config->shared_dpll = NULL;
8991         pipe_config->master_transcoder = INVALID_TRANSCODER;
8992
8993         ret = false;
8994
8995         tmp = I915_READ(PIPECONF(crtc->pipe));
8996         if (!(tmp & PIPECONF_ENABLE))
8997                 goto out;
8998
8999         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9000             IS_CHERRYVIEW(dev_priv)) {
9001                 switch (tmp & PIPECONF_BPC_MASK) {
9002                 case PIPECONF_6BPC:
9003                         pipe_config->pipe_bpp = 18;
9004                         break;
9005                 case PIPECONF_8BPC:
9006                         pipe_config->pipe_bpp = 24;
9007                         break;
9008                 case PIPECONF_10BPC:
9009                         pipe_config->pipe_bpp = 30;
9010                         break;
9011                 default:
9012                         break;
9013                 }
9014         }
9015
9016         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9017             (tmp & PIPECONF_COLOR_RANGE_SELECT))
9018                 pipe_config->limited_color_range = true;
9019
9020         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9021                 PIPECONF_GAMMA_MODE_SHIFT;
9022
9023         if (IS_CHERRYVIEW(dev_priv))
9024                 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
9025
9026         i9xx_get_pipe_color_config(pipe_config);
9027         intel_color_get_config(pipe_config);
9028
9029         if (INTEL_GEN(dev_priv) < 4)
9030                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9031
9032         intel_get_pipe_timings(crtc, pipe_config);
9033         intel_get_pipe_src_size(crtc, pipe_config);
9034
9035         i9xx_get_pfit_config(crtc, pipe_config);
9036
9037         if (INTEL_GEN(dev_priv) >= 4) {
9038                 /* No way to read it out on pipes B and C */
9039                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9040                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
9041                 else
9042                         tmp = I915_READ(DPLL_MD(crtc->pipe));
9043                 pipe_config->pixel_multiplier =
9044                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9045                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9046                 pipe_config->dpll_hw_state.dpll_md = tmp;
9047         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9048                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9049                 tmp = I915_READ(DPLL(crtc->pipe));
9050                 pipe_config->pixel_multiplier =
9051                         ((tmp & SDVO_MULTIPLIER_MASK)
9052                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9053         } else {
9054                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9055                  * port and will be fixed up in the encoder->get_config
9056                  * function. */
9057                 pipe_config->pixel_multiplier = 1;
9058         }
9059         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
9060         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9061                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
9062                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
9063         } else {
9064                 /* Mask out read-only status bits. */
9065                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9066                                                      DPLL_PORTC_READY_MASK |
9067                                                      DPLL_PORTB_READY_MASK);
9068         }
9069
9070         if (IS_CHERRYVIEW(dev_priv))
9071                 chv_crtc_clock_get(crtc, pipe_config);
9072         else if (IS_VALLEYVIEW(dev_priv))
9073                 vlv_crtc_clock_get(crtc, pipe_config);
9074         else
9075                 i9xx_crtc_clock_get(crtc, pipe_config);
9076
9077         /*
9078          * Normally the dotclock is filled in by the encoder .get_config()
9079          * but in case the pipe is enabled w/o any ports we need a sane
9080          * default.
9081          */
9082         pipe_config->hw.adjusted_mode.crtc_clock =
9083                 pipe_config->port_clock / pipe_config->pixel_multiplier;
9084
9085         ret = true;
9086
9087 out:
9088         intel_display_power_put(dev_priv, power_domain, wakeref);
9089
9090         return ret;
9091 }
9092
9093 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
9094 {
9095         struct intel_encoder *encoder;
9096         int i;
9097         u32 val, final;
9098         bool has_lvds = false;
9099         bool has_cpu_edp = false;
9100         bool has_panel = false;
9101         bool has_ck505 = false;
9102         bool can_ssc = false;
9103         bool using_ssc_source = false;
9104
9105         /* We need to take the global config into account */
9106         for_each_intel_encoder(&dev_priv->drm, encoder) {
9107                 switch (encoder->type) {
9108                 case INTEL_OUTPUT_LVDS:
9109                         has_panel = true;
9110                         has_lvds = true;
9111                         break;
9112                 case INTEL_OUTPUT_EDP:
9113                         has_panel = true;
9114                         if (encoder->port == PORT_A)
9115                                 has_cpu_edp = true;
9116                         break;
9117                 default:
9118                         break;
9119                 }
9120         }
9121
9122         if (HAS_PCH_IBX(dev_priv)) {
9123                 has_ck505 = dev_priv->vbt.display_clock_mode;
9124                 can_ssc = has_ck505;
9125         } else {
9126                 has_ck505 = false;
9127                 can_ssc = true;
9128         }
9129
9130         /* Check if any DPLLs are using the SSC source */
9131         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9132                 u32 temp = I915_READ(PCH_DPLL(i));
9133
9134                 if (!(temp & DPLL_VCO_ENABLE))
9135                         continue;
9136
9137                 if ((temp & PLL_REF_INPUT_MASK) ==
9138                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9139                         using_ssc_source = true;
9140                         break;
9141                 }
9142         }
9143
9144         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9145                       has_panel, has_lvds, has_ck505, using_ssc_source);
9146
9147         /* Ironlake: try to setup display ref clock before DPLL
9148          * enabling. This is only under driver's control after
9149          * PCH B stepping, previous chipset stepping should be
9150          * ignoring this setting.
9151          */
9152         val = I915_READ(PCH_DREF_CONTROL);
9153
9154         /* As we must carefully and slowly disable/enable each source in turn,
9155          * compute the final state we want first and check if we need to
9156          * make any changes at all.
9157          */
9158         final = val;
9159         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9160         if (has_ck505)
9161                 final |= DREF_NONSPREAD_CK505_ENABLE;
9162         else
9163                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9164
9165         final &= ~DREF_SSC_SOURCE_MASK;
9166         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9167         final &= ~DREF_SSC1_ENABLE;
9168
9169         if (has_panel) {
9170                 final |= DREF_SSC_SOURCE_ENABLE;
9171
9172                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9173                         final |= DREF_SSC1_ENABLE;
9174
9175                 if (has_cpu_edp) {
9176                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9177                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9178                         else
9179                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9180                 } else
9181                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9182         } else if (using_ssc_source) {
9183                 final |= DREF_SSC_SOURCE_ENABLE;
9184                 final |= DREF_SSC1_ENABLE;
9185         }
9186
9187         if (final == val)
9188                 return;
9189
9190         /* Always enable nonspread source */
9191         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9192
9193         if (has_ck505)
9194                 val |= DREF_NONSPREAD_CK505_ENABLE;
9195         else
9196                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9197
9198         if (has_panel) {
9199                 val &= ~DREF_SSC_SOURCE_MASK;
9200                 val |= DREF_SSC_SOURCE_ENABLE;
9201
9202                 /* SSC must be turned on before enabling the CPU output  */
9203                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9204                         DRM_DEBUG_KMS("Using SSC on panel\n");
9205                         val |= DREF_SSC1_ENABLE;
9206                 } else
9207                         val &= ~DREF_SSC1_ENABLE;
9208
9209                 /* Get SSC going before enabling the outputs */
9210                 I915_WRITE(PCH_DREF_CONTROL, val);
9211                 POSTING_READ(PCH_DREF_CONTROL);
9212                 udelay(200);
9213
9214                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9215
9216                 /* Enable CPU source on CPU attached eDP */
9217                 if (has_cpu_edp) {
9218                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9219                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
9220                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9221                         } else
9222                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9223                 } else
9224                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9225
9226                 I915_WRITE(PCH_DREF_CONTROL, val);
9227                 POSTING_READ(PCH_DREF_CONTROL);
9228                 udelay(200);
9229         } else {
9230                 DRM_DEBUG_KMS("Disabling CPU source output\n");
9231
9232                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9233
9234                 /* Turn off CPU output */
9235                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9236
9237                 I915_WRITE(PCH_DREF_CONTROL, val);
9238                 POSTING_READ(PCH_DREF_CONTROL);
9239                 udelay(200);
9240
9241                 if (!using_ssc_source) {
9242                         DRM_DEBUG_KMS("Disabling SSC source\n");
9243
9244                         /* Turn off the SSC source */
9245                         val &= ~DREF_SSC_SOURCE_MASK;
9246                         val |= DREF_SSC_SOURCE_DISABLE;
9247
9248                         /* Turn off SSC1 */
9249                         val &= ~DREF_SSC1_ENABLE;
9250
9251                         I915_WRITE(PCH_DREF_CONTROL, val);
9252                         POSTING_READ(PCH_DREF_CONTROL);
9253                         udelay(200);
9254                 }
9255         }
9256
9257         BUG_ON(val != final);
9258 }
9259
9260 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9261 {
9262         u32 tmp;
9263
9264         tmp = I915_READ(SOUTH_CHICKEN2);
9265         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9266         I915_WRITE(SOUTH_CHICKEN2, tmp);
9267
9268         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9269                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9270                 DRM_ERROR("FDI mPHY reset assert timeout\n");
9271
9272         tmp = I915_READ(SOUTH_CHICKEN2);
9273         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9274         I915_WRITE(SOUTH_CHICKEN2, tmp);
9275
9276         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9277                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9278                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9279 }
9280
9281 /* WaMPhyProgramming:hsw */
9282 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9283 {
9284         u32 tmp;
9285
9286         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9287         tmp &= ~(0xFF << 24);
9288         tmp |= (0x12 << 24);
9289         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9290
9291         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9292         tmp |= (1 << 11);
9293         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9294
9295         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9296         tmp |= (1 << 11);
9297         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9298
9299         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9300         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9301         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9302
9303         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9304         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9305         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9306
9307         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9308         tmp &= ~(7 << 13);
9309         tmp |= (5 << 13);
9310         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9311
9312         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9313         tmp &= ~(7 << 13);
9314         tmp |= (5 << 13);
9315         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9316
9317         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9318         tmp &= ~0xFF;
9319         tmp |= 0x1C;
9320         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9321
9322         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9323         tmp &= ~0xFF;
9324         tmp |= 0x1C;
9325         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9326
9327         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9328         tmp &= ~(0xFF << 16);
9329         tmp |= (0x1C << 16);
9330         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9331
9332         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9333         tmp &= ~(0xFF << 16);
9334         tmp |= (0x1C << 16);
9335         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9336
9337         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9338         tmp |= (1 << 27);
9339         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9340
9341         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9342         tmp |= (1 << 27);
9343         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9344
9345         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9346         tmp &= ~(0xF << 28);
9347         tmp |= (4 << 28);
9348         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9349
9350         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9351         tmp &= ~(0xF << 28);
9352         tmp |= (4 << 28);
9353         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9354 }
9355
9356 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9357  * Programming" based on the parameters passed:
9358  * - Sequence to enable CLKOUT_DP
9359  * - Sequence to enable CLKOUT_DP without spread
9360  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9361  */
9362 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9363                                  bool with_spread, bool with_fdi)
9364 {
9365         u32 reg, tmp;
9366
9367         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9368                 with_spread = true;
9369         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9370             with_fdi, "LP PCH doesn't have FDI\n"))
9371                 with_fdi = false;
9372
9373         mutex_lock(&dev_priv->sb_lock);
9374
9375         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9376         tmp &= ~SBI_SSCCTL_DISABLE;
9377         tmp |= SBI_SSCCTL_PATHALT;
9378         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9379
9380         udelay(24);
9381
9382         if (with_spread) {
9383                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9384                 tmp &= ~SBI_SSCCTL_PATHALT;
9385                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9386
9387                 if (with_fdi) {
9388                         lpt_reset_fdi_mphy(dev_priv);
9389                         lpt_program_fdi_mphy(dev_priv);
9390                 }
9391         }
9392
9393         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9394         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9395         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9396         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9397
9398         mutex_unlock(&dev_priv->sb_lock);
9399 }
9400
9401 /* Sequence to disable CLKOUT_DP */
9402 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9403 {
9404         u32 reg, tmp;
9405
9406         mutex_lock(&dev_priv->sb_lock);
9407
9408         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9409         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9410         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9411         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9412
9413         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9414         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9415                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9416                         tmp |= SBI_SSCCTL_PATHALT;
9417                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9418                         udelay(32);
9419                 }
9420                 tmp |= SBI_SSCCTL_DISABLE;
9421                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9422         }
9423
9424         mutex_unlock(&dev_priv->sb_lock);
9425 }
9426
9427 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9428
9429 static const u16 sscdivintphase[] = {
9430         [BEND_IDX( 50)] = 0x3B23,
9431         [BEND_IDX( 45)] = 0x3B23,
9432         [BEND_IDX( 40)] = 0x3C23,
9433         [BEND_IDX( 35)] = 0x3C23,
9434         [BEND_IDX( 30)] = 0x3D23,
9435         [BEND_IDX( 25)] = 0x3D23,
9436         [BEND_IDX( 20)] = 0x3E23,
9437         [BEND_IDX( 15)] = 0x3E23,
9438         [BEND_IDX( 10)] = 0x3F23,
9439         [BEND_IDX(  5)] = 0x3F23,
9440         [BEND_IDX(  0)] = 0x0025,
9441         [BEND_IDX( -5)] = 0x0025,
9442         [BEND_IDX(-10)] = 0x0125,
9443         [BEND_IDX(-15)] = 0x0125,
9444         [BEND_IDX(-20)] = 0x0225,
9445         [BEND_IDX(-25)] = 0x0225,
9446         [BEND_IDX(-30)] = 0x0325,
9447         [BEND_IDX(-35)] = 0x0325,
9448         [BEND_IDX(-40)] = 0x0425,
9449         [BEND_IDX(-45)] = 0x0425,
9450         [BEND_IDX(-50)] = 0x0525,
9451 };
9452
9453 /*
9454  * Bend CLKOUT_DP
9455  * steps -50 to 50 inclusive, in steps of 5
9456  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9457  * change in clock period = -(steps / 10) * 5.787 ps
9458  */
9459 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9460 {
9461         u32 tmp;
9462         int idx = BEND_IDX(steps);
9463
9464         if (WARN_ON(steps % 5 != 0))
9465                 return;
9466
9467         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9468                 return;
9469
9470         mutex_lock(&dev_priv->sb_lock);
9471
9472         if (steps % 10 != 0)
9473                 tmp = 0xAAAAAAAB;
9474         else
9475                 tmp = 0x00000000;
9476         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9477
9478         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9479         tmp &= 0xffff0000;
9480         tmp |= sscdivintphase[idx];
9481         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9482
9483         mutex_unlock(&dev_priv->sb_lock);
9484 }
9485
9486 #undef BEND_IDX
9487
9488 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9489 {
9490         u32 fuse_strap = I915_READ(FUSE_STRAP);
9491         u32 ctl = I915_READ(SPLL_CTL);
9492
9493         if ((ctl & SPLL_PLL_ENABLE) == 0)
9494                 return false;
9495
9496         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9497             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9498                 return true;
9499
9500         if (IS_BROADWELL(dev_priv) &&
9501             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9502                 return true;
9503
9504         return false;
9505 }
9506
9507 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9508                                enum intel_dpll_id id)
9509 {
9510         u32 fuse_strap = I915_READ(FUSE_STRAP);
9511         u32 ctl = I915_READ(WRPLL_CTL(id));
9512
9513         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9514                 return false;
9515
9516         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9517                 return true;
9518
9519         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9520             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9521             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9522                 return true;
9523
9524         return false;
9525 }
9526
9527 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9528 {
9529         struct intel_encoder *encoder;
9530         bool has_fdi = false;
9531
9532         for_each_intel_encoder(&dev_priv->drm, encoder) {
9533                 switch (encoder->type) {
9534                 case INTEL_OUTPUT_ANALOG:
9535                         has_fdi = true;
9536                         break;
9537                 default:
9538                         break;
9539                 }
9540         }
9541
9542         /*
9543          * The BIOS may have decided to use the PCH SSC
9544          * reference so we must not disable it until the
9545          * relevant PLLs have stopped relying on it. We'll
9546          * just leave the PCH SSC reference enabled in case
9547          * any active PLL is using it. It will get disabled
9548          * after runtime suspend if we don't have FDI.
9549          *
9550          * TODO: Move the whole reference clock handling
9551          * to the modeset sequence proper so that we can
9552          * actually enable/disable/reconfigure these things
9553          * safely. To do that we need to introduce a real
9554          * clock hierarchy. That would also allow us to do
9555          * clock bending finally.
9556          */
9557         dev_priv->pch_ssc_use = 0;
9558
9559         if (spll_uses_pch_ssc(dev_priv)) {
9560                 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9561                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9562         }
9563
9564         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9565                 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9566                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9567         }
9568
9569         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9570                 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9571                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9572         }
9573
9574         if (dev_priv->pch_ssc_use)
9575                 return;
9576
9577         if (has_fdi) {
9578                 lpt_bend_clkout_dp(dev_priv, 0);
9579                 lpt_enable_clkout_dp(dev_priv, true, true);
9580         } else {
9581                 lpt_disable_clkout_dp(dev_priv);
9582         }
9583 }
9584
9585 /*
9586  * Initialize reference clocks when the driver loads
9587  */
9588 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9589 {
9590         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9591                 ironlake_init_pch_refclk(dev_priv);
9592         else if (HAS_PCH_LPT(dev_priv))
9593                 lpt_init_pch_refclk(dev_priv);
9594 }
9595
9596 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9597 {
9598         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9599         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9600         enum pipe pipe = crtc->pipe;
9601         u32 val;
9602
9603         val = 0;
9604
9605         switch (crtc_state->pipe_bpp) {
9606         case 18:
9607                 val |= PIPECONF_6BPC;
9608                 break;
9609         case 24:
9610                 val |= PIPECONF_8BPC;
9611                 break;
9612         case 30:
9613                 val |= PIPECONF_10BPC;
9614                 break;
9615         case 36:
9616                 val |= PIPECONF_12BPC;
9617                 break;
9618         default:
9619                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9620                 BUG();
9621         }
9622
9623         if (crtc_state->dither)
9624                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9625
9626         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9627                 val |= PIPECONF_INTERLACED_ILK;
9628         else
9629                 val |= PIPECONF_PROGRESSIVE;
9630
9631         /*
9632          * This would end up with an odd purple hue over
9633          * the entire display. Make sure we don't do it.
9634          */
9635         WARN_ON(crtc_state->limited_color_range &&
9636                 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9637
9638         if (crtc_state->limited_color_range)
9639                 val |= PIPECONF_COLOR_RANGE_SELECT;
9640
9641         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9642                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9643
9644         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9645
9646         val |= PIPECONF_FRAME_START_DELAY(0);
9647
9648         I915_WRITE(PIPECONF(pipe), val);
9649         POSTING_READ(PIPECONF(pipe));
9650 }
9651
9652 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9653 {
9654         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9655         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9656         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9657         u32 val = 0;
9658
9659         if (IS_HASWELL(dev_priv) && crtc_state->dither)
9660                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9661
9662         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9663                 val |= PIPECONF_INTERLACED_ILK;
9664         else
9665                 val |= PIPECONF_PROGRESSIVE;
9666
9667         if (IS_HASWELL(dev_priv) &&
9668             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9669                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9670
9671         I915_WRITE(PIPECONF(cpu_transcoder), val);
9672         POSTING_READ(PIPECONF(cpu_transcoder));
9673 }
9674
9675 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9676 {
9677         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9678         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9679         u32 val = 0;
9680
9681         switch (crtc_state->pipe_bpp) {
9682         case 18:
9683                 val |= PIPEMISC_DITHER_6_BPC;
9684                 break;
9685         case 24:
9686                 val |= PIPEMISC_DITHER_8_BPC;
9687                 break;
9688         case 30:
9689                 val |= PIPEMISC_DITHER_10_BPC;
9690                 break;
9691         case 36:
9692                 val |= PIPEMISC_DITHER_12_BPC;
9693                 break;
9694         default:
9695                 MISSING_CASE(crtc_state->pipe_bpp);
9696                 break;
9697         }
9698
9699         if (crtc_state->dither)
9700                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9701
9702         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9703             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9704                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9705
9706         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9707                 val |= PIPEMISC_YUV420_ENABLE |
9708                         PIPEMISC_YUV420_MODE_FULL_BLEND;
9709
9710         if (INTEL_GEN(dev_priv) >= 11 &&
9711             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9712                                            BIT(PLANE_CURSOR))) == 0)
9713                 val |= PIPEMISC_HDR_MODE_PRECISION;
9714
9715         I915_WRITE(PIPEMISC(crtc->pipe), val);
9716 }
9717
9718 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9719 {
9720         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9721         u32 tmp;
9722
9723         tmp = I915_READ(PIPEMISC(crtc->pipe));
9724
9725         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9726         case PIPEMISC_DITHER_6_BPC:
9727                 return 18;
9728         case PIPEMISC_DITHER_8_BPC:
9729                 return 24;
9730         case PIPEMISC_DITHER_10_BPC:
9731                 return 30;
9732         case PIPEMISC_DITHER_12_BPC:
9733                 return 36;
9734         default:
9735                 MISSING_CASE(tmp);
9736                 return 0;
9737         }
9738 }
9739
9740 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9741 {
9742         /*
9743          * Account for spread spectrum to avoid
9744          * oversubscribing the link. Max center spread
9745          * is 2.5%; use 5% for safety's sake.
9746          */
9747         u32 bps = target_clock * bpp * 21 / 20;
9748         return DIV_ROUND_UP(bps, link_bw * 8);
9749 }
9750
9751 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9752 {
9753         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9754 }
9755
9756 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9757                                   struct intel_crtc_state *crtc_state,
9758                                   struct dpll *reduced_clock)
9759 {
9760         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9761         u32 dpll, fp, fp2;
9762         int factor;
9763
9764         /* Enable autotuning of the PLL clock (if permissible) */
9765         factor = 21;
9766         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9767                 if ((intel_panel_use_ssc(dev_priv) &&
9768                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
9769                     (HAS_PCH_IBX(dev_priv) &&
9770                      intel_is_dual_link_lvds(dev_priv)))
9771                         factor = 25;
9772         } else if (crtc_state->sdvo_tv_clock) {
9773                 factor = 20;
9774         }
9775
9776         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9777
9778         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9779                 fp |= FP_CB_TUNE;
9780
9781         if (reduced_clock) {
9782                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9783
9784                 if (reduced_clock->m < factor * reduced_clock->n)
9785                         fp2 |= FP_CB_TUNE;
9786         } else {
9787                 fp2 = fp;
9788         }
9789
9790         dpll = 0;
9791
9792         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9793                 dpll |= DPLLB_MODE_LVDS;
9794         else
9795                 dpll |= DPLLB_MODE_DAC_SERIAL;
9796
9797         dpll |= (crtc_state->pixel_multiplier - 1)
9798                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9799
9800         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9801             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9802                 dpll |= DPLL_SDVO_HIGH_SPEED;
9803
9804         if (intel_crtc_has_dp_encoder(crtc_state))
9805                 dpll |= DPLL_SDVO_HIGH_SPEED;
9806
9807         /*
9808          * The high speed IO clock is only really required for
9809          * SDVO/HDMI/DP, but we also enable it for CRT to make it
9810          * possible to share the DPLL between CRT and HDMI. Enabling
9811          * the clock needlessly does no real harm, except use up a
9812          * bit of power potentially.
9813          *
9814          * We'll limit this to IVB with 3 pipes, since it has only two
9815          * DPLLs and so DPLL sharing is the only way to get three pipes
9816          * driving PCH ports at the same time. On SNB we could do this,
9817          * and potentially avoid enabling the second DPLL, but it's not
9818          * clear if it''s a win or loss power wise. No point in doing
9819          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9820          */
9821         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
9822             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9823                 dpll |= DPLL_SDVO_HIGH_SPEED;
9824
9825         /* compute bitmask from p1 value */
9826         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9827         /* also FPA1 */
9828         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9829
9830         switch (crtc_state->dpll.p2) {
9831         case 5:
9832                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9833                 break;
9834         case 7:
9835                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9836                 break;
9837         case 10:
9838                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9839                 break;
9840         case 14:
9841                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9842                 break;
9843         }
9844
9845         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9846             intel_panel_use_ssc(dev_priv))
9847                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9848         else
9849                 dpll |= PLL_REF_INPUT_DREFCLK;
9850
9851         dpll |= DPLL_VCO_ENABLE;
9852
9853         crtc_state->dpll_hw_state.dpll = dpll;
9854         crtc_state->dpll_hw_state.fp0 = fp;
9855         crtc_state->dpll_hw_state.fp1 = fp2;
9856 }
9857
9858 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9859                                        struct intel_crtc_state *crtc_state)
9860 {
9861         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9862         struct intel_atomic_state *state =
9863                 to_intel_atomic_state(crtc_state->uapi.state);
9864         const struct intel_limit *limit;
9865         int refclk = 120000;
9866
9867         memset(&crtc_state->dpll_hw_state, 0,
9868                sizeof(crtc_state->dpll_hw_state));
9869
9870         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9871         if (!crtc_state->has_pch_encoder)
9872                 return 0;
9873
9874         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9875                 if (intel_panel_use_ssc(dev_priv)) {
9876                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9877                                       dev_priv->vbt.lvds_ssc_freq);
9878                         refclk = dev_priv->vbt.lvds_ssc_freq;
9879                 }
9880
9881                 if (intel_is_dual_link_lvds(dev_priv)) {
9882                         if (refclk == 100000)
9883                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9884                         else
9885                                 limit = &intel_limits_ironlake_dual_lvds;
9886                 } else {
9887                         if (refclk == 100000)
9888                                 limit = &intel_limits_ironlake_single_lvds_100m;
9889                         else
9890                                 limit = &intel_limits_ironlake_single_lvds;
9891                 }
9892         } else {
9893                 limit = &intel_limits_ironlake_dac;
9894         }
9895
9896         if (!crtc_state->clock_set &&
9897             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9898                                 refclk, NULL, &crtc_state->dpll)) {
9899                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9900                 return -EINVAL;
9901         }
9902
9903         ironlake_compute_dpll(crtc, crtc_state, NULL);
9904
9905         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9906                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9907                               pipe_name(crtc->pipe));
9908                 return -EINVAL;
9909         }
9910
9911         return 0;
9912 }
9913
9914 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9915                                          struct intel_link_m_n *m_n)
9916 {
9917         struct drm_device *dev = crtc->base.dev;
9918         struct drm_i915_private *dev_priv = to_i915(dev);
9919         enum pipe pipe = crtc->pipe;
9920
9921         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9922         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9923         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9924                 & ~TU_SIZE_MASK;
9925         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9926         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9927                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9928 }
9929
9930 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9931                                          enum transcoder transcoder,
9932                                          struct intel_link_m_n *m_n,
9933                                          struct intel_link_m_n *m2_n2)
9934 {
9935         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9936         enum pipe pipe = crtc->pipe;
9937
9938         if (INTEL_GEN(dev_priv) >= 5) {
9939                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9940                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9941                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9942                         & ~TU_SIZE_MASK;
9943                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9944                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9945                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9946
9947                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9948                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9949                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9950                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9951                                         & ~TU_SIZE_MASK;
9952                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9953                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9954                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9955                 }
9956         } else {
9957                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9958                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9959                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9960                         & ~TU_SIZE_MASK;
9961                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9962                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9963                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9964         }
9965 }
9966
9967 void intel_dp_get_m_n(struct intel_crtc *crtc,
9968                       struct intel_crtc_state *pipe_config)
9969 {
9970         if (pipe_config->has_pch_encoder)
9971                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9972         else
9973                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9974                                              &pipe_config->dp_m_n,
9975                                              &pipe_config->dp_m2_n2);
9976 }
9977
9978 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9979                                         struct intel_crtc_state *pipe_config)
9980 {
9981         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9982                                      &pipe_config->fdi_m_n, NULL);
9983 }
9984
9985 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9986                                     struct intel_crtc_state *pipe_config)
9987 {
9988         struct drm_device *dev = crtc->base.dev;
9989         struct drm_i915_private *dev_priv = to_i915(dev);
9990         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9991         u32 ps_ctrl = 0;
9992         int id = -1;
9993         int i;
9994
9995         /* find scaler attached to this pipe */
9996         for (i = 0; i < crtc->num_scalers; i++) {
9997                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9998                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9999                         id = i;
10000                         pipe_config->pch_pfit.enabled = true;
10001                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
10002                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
10003                         scaler_state->scalers[i].in_use = true;
10004                         break;
10005                 }
10006         }
10007
10008         scaler_state->scaler_id = id;
10009         if (id >= 0) {
10010                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10011         } else {
10012                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10013         }
10014 }
10015
10016 static void
10017 skylake_get_initial_plane_config(struct intel_crtc *crtc,
10018                                  struct intel_initial_plane_config *plane_config)
10019 {
10020         struct drm_device *dev = crtc->base.dev;
10021         struct drm_i915_private *dev_priv = to_i915(dev);
10022         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10023         enum plane_id plane_id = plane->id;
10024         enum pipe pipe;
10025         u32 val, base, offset, stride_mult, tiling, alpha;
10026         int fourcc, pixel_format;
10027         unsigned int aligned_height;
10028         struct drm_framebuffer *fb;
10029         struct intel_framebuffer *intel_fb;
10030
10031         if (!plane->get_hw_state(plane, &pipe))
10032                 return;
10033
10034         WARN_ON(pipe != crtc->pipe);
10035
10036         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10037         if (!intel_fb) {
10038                 DRM_DEBUG_KMS("failed to alloc fb\n");
10039                 return;
10040         }
10041
10042         fb = &intel_fb->base;
10043
10044         fb->dev = dev;
10045
10046         val = I915_READ(PLANE_CTL(pipe, plane_id));
10047
10048         if (INTEL_GEN(dev_priv) >= 11)
10049                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10050         else
10051                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10052
10053         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10054                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
10055                 alpha &= PLANE_COLOR_ALPHA_MASK;
10056         } else {
10057                 alpha = val & PLANE_CTL_ALPHA_MASK;
10058         }
10059
10060         fourcc = skl_format_to_fourcc(pixel_format,
10061                                       val & PLANE_CTL_ORDER_RGBX, alpha);
10062         fb->format = drm_format_info(fourcc);
10063
10064         tiling = val & PLANE_CTL_TILED_MASK;
10065         switch (tiling) {
10066         case PLANE_CTL_TILED_LINEAR:
10067                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10068                 break;
10069         case PLANE_CTL_TILED_X:
10070                 plane_config->tiling = I915_TILING_X;
10071                 fb->modifier = I915_FORMAT_MOD_X_TILED;
10072                 break;
10073         case PLANE_CTL_TILED_Y:
10074                 plane_config->tiling = I915_TILING_Y;
10075                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10076                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
10077                 else
10078                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
10079                 break;
10080         case PLANE_CTL_TILED_YF:
10081                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10082                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10083                 else
10084                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10085                 break;
10086         default:
10087                 MISSING_CASE(tiling);
10088                 goto error;
10089         }
10090
10091         /*
10092          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10093          * while i915 HW rotation is clockwise, thats why this swapping.
10094          */
10095         switch (val & PLANE_CTL_ROTATE_MASK) {
10096         case PLANE_CTL_ROTATE_0:
10097                 plane_config->rotation = DRM_MODE_ROTATE_0;
10098                 break;
10099         case PLANE_CTL_ROTATE_90:
10100                 plane_config->rotation = DRM_MODE_ROTATE_270;
10101                 break;
10102         case PLANE_CTL_ROTATE_180:
10103                 plane_config->rotation = DRM_MODE_ROTATE_180;
10104                 break;
10105         case PLANE_CTL_ROTATE_270:
10106                 plane_config->rotation = DRM_MODE_ROTATE_90;
10107                 break;
10108         }
10109
10110         if (INTEL_GEN(dev_priv) >= 10 &&
10111             val & PLANE_CTL_FLIP_HORIZONTAL)
10112                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10113
10114         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10115         plane_config->base = base;
10116
10117         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
10118
10119         val = I915_READ(PLANE_SIZE(pipe, plane_id));
10120         fb->height = ((val >> 16) & 0xffff) + 1;
10121         fb->width = ((val >> 0) & 0xffff) + 1;
10122
10123         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
10124         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10125         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10126
10127         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10128
10129         plane_config->size = fb->pitches[0] * aligned_height;
10130
10131         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10132                       crtc->base.name, plane->base.name, fb->width, fb->height,
10133                       fb->format->cpp[0] * 8, base, fb->pitches[0],
10134                       plane_config->size);
10135
10136         plane_config->fb = intel_fb;
10137         return;
10138
10139 error:
10140         kfree(intel_fb);
10141 }
10142
10143 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
10144                                      struct intel_crtc_state *pipe_config)
10145 {
10146         struct drm_device *dev = crtc->base.dev;
10147         struct drm_i915_private *dev_priv = to_i915(dev);
10148         u32 tmp;
10149
10150         tmp = I915_READ(PF_CTL(crtc->pipe));
10151
10152         if (tmp & PF_ENABLE) {
10153                 pipe_config->pch_pfit.enabled = true;
10154                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
10155                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
10156
10157                 /* We currently do not free assignements of panel fitters on
10158                  * ivb/hsw (since we don't use the higher upscaling modes which
10159                  * differentiates them) so just WARN about this case for now. */
10160                 if (IS_GEN(dev_priv, 7)) {
10161                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10162                                 PF_PIPE_SEL_IVB(crtc->pipe));
10163                 }
10164         }
10165 }
10166
10167 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
10168                                      struct intel_crtc_state *pipe_config)
10169 {
10170         struct drm_device *dev = crtc->base.dev;
10171         struct drm_i915_private *dev_priv = to_i915(dev);
10172         enum intel_display_power_domain power_domain;
10173         intel_wakeref_t wakeref;
10174         u32 tmp;
10175         bool ret;
10176
10177         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10178         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10179         if (!wakeref)
10180                 return false;
10181
10182         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10183         pipe_config->shared_dpll = NULL;
10184         pipe_config->master_transcoder = INVALID_TRANSCODER;
10185
10186         ret = false;
10187         tmp = I915_READ(PIPECONF(crtc->pipe));
10188         if (!(tmp & PIPECONF_ENABLE))
10189                 goto out;
10190
10191         switch (tmp & PIPECONF_BPC_MASK) {
10192         case PIPECONF_6BPC:
10193                 pipe_config->pipe_bpp = 18;
10194                 break;
10195         case PIPECONF_8BPC:
10196                 pipe_config->pipe_bpp = 24;
10197                 break;
10198         case PIPECONF_10BPC:
10199                 pipe_config->pipe_bpp = 30;
10200                 break;
10201         case PIPECONF_12BPC:
10202                 pipe_config->pipe_bpp = 36;
10203                 break;
10204         default:
10205                 break;
10206         }
10207
10208         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10209                 pipe_config->limited_color_range = true;
10210
10211         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10212         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10213         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10214                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10215                 break;
10216         default:
10217                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10218                 break;
10219         }
10220
10221         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10222                 PIPECONF_GAMMA_MODE_SHIFT;
10223
10224         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10225
10226         i9xx_get_pipe_color_config(pipe_config);
10227         intel_color_get_config(pipe_config);
10228
10229         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10230                 struct intel_shared_dpll *pll;
10231                 enum intel_dpll_id pll_id;
10232
10233                 pipe_config->has_pch_encoder = true;
10234
10235                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10236                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10237                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10238
10239                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10240
10241                 if (HAS_PCH_IBX(dev_priv)) {
10242                         /*
10243                          * The pipe->pch transcoder and pch transcoder->pll
10244                          * mapping is fixed.
10245                          */
10246                         pll_id = (enum intel_dpll_id) crtc->pipe;
10247                 } else {
10248                         tmp = I915_READ(PCH_DPLL_SEL);
10249                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10250                                 pll_id = DPLL_ID_PCH_PLL_B;
10251                         else
10252                                 pll_id= DPLL_ID_PCH_PLL_A;
10253                 }
10254
10255                 pipe_config->shared_dpll =
10256                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10257                 pll = pipe_config->shared_dpll;
10258
10259                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10260                                                 &pipe_config->dpll_hw_state));
10261
10262                 tmp = pipe_config->dpll_hw_state.dpll;
10263                 pipe_config->pixel_multiplier =
10264                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10265                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10266
10267                 ironlake_pch_clock_get(crtc, pipe_config);
10268         } else {
10269                 pipe_config->pixel_multiplier = 1;
10270         }
10271
10272         intel_get_pipe_timings(crtc, pipe_config);
10273         intel_get_pipe_src_size(crtc, pipe_config);
10274
10275         ironlake_get_pfit_config(crtc, pipe_config);
10276
10277         ret = true;
10278
10279 out:
10280         intel_display_power_put(dev_priv, power_domain, wakeref);
10281
10282         return ret;
10283 }
10284 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10285                                       struct intel_crtc_state *crtc_state)
10286 {
10287         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10288         struct intel_atomic_state *state =
10289                 to_intel_atomic_state(crtc_state->uapi.state);
10290
10291         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10292             INTEL_GEN(dev_priv) >= 11) {
10293                 struct intel_encoder *encoder =
10294                         intel_get_crtc_new_encoder(state, crtc_state);
10295
10296                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10297                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10298                                       pipe_name(crtc->pipe));
10299                         return -EINVAL;
10300                 }
10301         }
10302
10303         return 0;
10304 }
10305
10306 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10307                                    enum port port,
10308                                    struct intel_crtc_state *pipe_config)
10309 {
10310         enum intel_dpll_id id;
10311         u32 temp;
10312
10313         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10314         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10315
10316         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10317                 return;
10318
10319         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10320 }
10321
10322 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10323                                 enum port port,
10324                                 struct intel_crtc_state *pipe_config)
10325 {
10326         enum phy phy = intel_port_to_phy(dev_priv, port);
10327         enum icl_port_dpll_id port_dpll_id;
10328         enum intel_dpll_id id;
10329         u32 temp;
10330
10331         if (intel_phy_is_combo(dev_priv, phy)) {
10332                 temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10333                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10334                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10335                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10336         } else if (intel_phy_is_tc(dev_priv, phy)) {
10337                 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10338
10339                 if (clk_sel == DDI_CLK_SEL_MG) {
10340                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10341                                                                     port));
10342                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10343                 } else {
10344                         WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10345                         id = DPLL_ID_ICL_TBTPLL;
10346                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10347                 }
10348         } else {
10349                 WARN(1, "Invalid port %x\n", port);
10350                 return;
10351         }
10352
10353         pipe_config->icl_port_dplls[port_dpll_id].pll =
10354                 intel_get_shared_dpll_by_id(dev_priv, id);
10355
10356         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10357 }
10358
10359 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10360                                 enum port port,
10361                                 struct intel_crtc_state *pipe_config)
10362 {
10363         enum intel_dpll_id id;
10364
10365         switch (port) {
10366         case PORT_A:
10367                 id = DPLL_ID_SKL_DPLL0;
10368                 break;
10369         case PORT_B:
10370                 id = DPLL_ID_SKL_DPLL1;
10371                 break;
10372         case PORT_C:
10373                 id = DPLL_ID_SKL_DPLL2;
10374                 break;
10375         default:
10376                 DRM_ERROR("Incorrect port type\n");
10377                 return;
10378         }
10379
10380         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10381 }
10382
10383 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10384                                 enum port port,
10385                                 struct intel_crtc_state *pipe_config)
10386 {
10387         enum intel_dpll_id id;
10388         u32 temp;
10389
10390         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10391         id = temp >> (port * 3 + 1);
10392
10393         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10394                 return;
10395
10396         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10397 }
10398
10399 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10400                                 enum port port,
10401                                 struct intel_crtc_state *pipe_config)
10402 {
10403         enum intel_dpll_id id;
10404         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10405
10406         switch (ddi_pll_sel) {
10407         case PORT_CLK_SEL_WRPLL1:
10408                 id = DPLL_ID_WRPLL1;
10409                 break;
10410         case PORT_CLK_SEL_WRPLL2:
10411                 id = DPLL_ID_WRPLL2;
10412                 break;
10413         case PORT_CLK_SEL_SPLL:
10414                 id = DPLL_ID_SPLL;
10415                 break;
10416         case PORT_CLK_SEL_LCPLL_810:
10417                 id = DPLL_ID_LCPLL_810;
10418                 break;
10419         case PORT_CLK_SEL_LCPLL_1350:
10420                 id = DPLL_ID_LCPLL_1350;
10421                 break;
10422         case PORT_CLK_SEL_LCPLL_2700:
10423                 id = DPLL_ID_LCPLL_2700;
10424                 break;
10425         default:
10426                 MISSING_CASE(ddi_pll_sel);
10427                 /* fall through */
10428         case PORT_CLK_SEL_NONE:
10429                 return;
10430         }
10431
10432         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10433 }
10434
10435 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10436                                      struct intel_crtc_state *pipe_config,
10437                                      u64 *power_domain_mask,
10438                                      intel_wakeref_t *wakerefs)
10439 {
10440         struct drm_device *dev = crtc->base.dev;
10441         struct drm_i915_private *dev_priv = to_i915(dev);
10442         enum intel_display_power_domain power_domain;
10443         unsigned long panel_transcoder_mask = 0;
10444         unsigned long enabled_panel_transcoders = 0;
10445         enum transcoder panel_transcoder;
10446         intel_wakeref_t wf;
10447         u32 tmp;
10448
10449         if (INTEL_GEN(dev_priv) >= 11)
10450                 panel_transcoder_mask |=
10451                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10452
10453         if (HAS_TRANSCODER_EDP(dev_priv))
10454                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10455
10456         /*
10457          * The pipe->transcoder mapping is fixed with the exception of the eDP
10458          * and DSI transcoders handled below.
10459          */
10460         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10461
10462         /*
10463          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10464          * consistency and less surprising code; it's in always on power).
10465          */
10466         for_each_set_bit(panel_transcoder,
10467                          &panel_transcoder_mask,
10468                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10469                 bool force_thru = false;
10470                 enum pipe trans_pipe;
10471
10472                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10473                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10474                         continue;
10475
10476                 /*
10477                  * Log all enabled ones, only use the first one.
10478                  *
10479                  * FIXME: This won't work for two separate DSI displays.
10480                  */
10481                 enabled_panel_transcoders |= BIT(panel_transcoder);
10482                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10483                         continue;
10484
10485                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10486                 default:
10487                         WARN(1, "unknown pipe linked to transcoder %s\n",
10488                              transcoder_name(panel_transcoder));
10489                         /* fall through */
10490                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10491                         force_thru = true;
10492                         /* fall through */
10493                 case TRANS_DDI_EDP_INPUT_A_ON:
10494                         trans_pipe = PIPE_A;
10495                         break;
10496                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10497                         trans_pipe = PIPE_B;
10498                         break;
10499                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10500                         trans_pipe = PIPE_C;
10501                         break;
10502                 }
10503
10504                 if (trans_pipe == crtc->pipe) {
10505                         pipe_config->cpu_transcoder = panel_transcoder;
10506                         pipe_config->pch_pfit.force_thru = force_thru;
10507                 }
10508         }
10509
10510         /*
10511          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10512          */
10513         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10514                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10515
10516         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10517         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10518
10519         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10520         if (!wf)
10521                 return false;
10522
10523         wakerefs[power_domain] = wf;
10524         *power_domain_mask |= BIT_ULL(power_domain);
10525
10526         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10527
10528         return tmp & PIPECONF_ENABLE;
10529 }
10530
10531 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10532                                          struct intel_crtc_state *pipe_config,
10533                                          u64 *power_domain_mask,
10534                                          intel_wakeref_t *wakerefs)
10535 {
10536         struct drm_device *dev = crtc->base.dev;
10537         struct drm_i915_private *dev_priv = to_i915(dev);
10538         enum intel_display_power_domain power_domain;
10539         enum transcoder cpu_transcoder;
10540         intel_wakeref_t wf;
10541         enum port port;
10542         u32 tmp;
10543
10544         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10545                 if (port == PORT_A)
10546                         cpu_transcoder = TRANSCODER_DSI_A;
10547                 else
10548                         cpu_transcoder = TRANSCODER_DSI_C;
10549
10550                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10551                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10552
10553                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10554                 if (!wf)
10555                         continue;
10556
10557                 wakerefs[power_domain] = wf;
10558                 *power_domain_mask |= BIT_ULL(power_domain);
10559
10560                 /*
10561                  * The PLL needs to be enabled with a valid divider
10562                  * configuration, otherwise accessing DSI registers will hang
10563                  * the machine. See BSpec North Display Engine
10564                  * registers/MIPI[BXT]. We can break out here early, since we
10565                  * need the same DSI PLL to be enabled for both DSI ports.
10566                  */
10567                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10568                         break;
10569
10570                 /* XXX: this works for video mode only */
10571                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10572                 if (!(tmp & DPI_ENABLE))
10573                         continue;
10574
10575                 tmp = I915_READ(MIPI_CTRL(port));
10576                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10577                         continue;
10578
10579                 pipe_config->cpu_transcoder = cpu_transcoder;
10580                 break;
10581         }
10582
10583         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10584 }
10585
10586 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10587                                        struct intel_crtc_state *pipe_config)
10588 {
10589         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10590         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
10591         struct intel_shared_dpll *pll;
10592         enum port port;
10593         u32 tmp;
10594
10595         if (transcoder_is_dsi(cpu_transcoder)) {
10596                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
10597                                                 PORT_A : PORT_B;
10598         } else {
10599                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
10600                 if (INTEL_GEN(dev_priv) >= 12)
10601                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10602                 else
10603                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10604         }
10605
10606         if (INTEL_GEN(dev_priv) >= 11)
10607                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10608         else if (IS_CANNONLAKE(dev_priv))
10609                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10610         else if (IS_GEN9_BC(dev_priv))
10611                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10612         else if (IS_GEN9_LP(dev_priv))
10613                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10614         else
10615                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10616
10617         pll = pipe_config->shared_dpll;
10618         if (pll) {
10619                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10620                                                 &pipe_config->dpll_hw_state));
10621         }
10622
10623         /*
10624          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10625          * DDI E. So just check whether this pipe is wired to DDI E and whether
10626          * the PCH transcoder is on.
10627          */
10628         if (INTEL_GEN(dev_priv) < 9 &&
10629             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10630                 pipe_config->has_pch_encoder = true;
10631
10632                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10633                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10634                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10635
10636                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10637         }
10638 }
10639
10640 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
10641                                                  enum transcoder cpu_transcoder)
10642 {
10643         u32 trans_port_sync, master_select;
10644
10645         trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
10646
10647         if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
10648                 return INVALID_TRANSCODER;
10649
10650         master_select = trans_port_sync &
10651                         PORT_SYNC_MODE_MASTER_SELECT_MASK;
10652         if (master_select == 0)
10653                 return TRANSCODER_EDP;
10654         else
10655                 return master_select - 1;
10656 }
10657
10658 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
10659 {
10660         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10661         u32 transcoders;
10662         enum transcoder cpu_transcoder;
10663
10664         crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
10665                                                                   crtc_state->cpu_transcoder);
10666
10667         transcoders = BIT(TRANSCODER_A) |
10668                 BIT(TRANSCODER_B) |
10669                 BIT(TRANSCODER_C) |
10670                 BIT(TRANSCODER_D);
10671         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
10672                 enum intel_display_power_domain power_domain;
10673                 intel_wakeref_t trans_wakeref;
10674
10675                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10676                 trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
10677                                                                    power_domain);
10678
10679                 if (!trans_wakeref)
10680                         continue;
10681
10682                 if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
10683                     crtc_state->cpu_transcoder)
10684                         crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
10685
10686                 intel_display_power_put(dev_priv, power_domain, trans_wakeref);
10687         }
10688
10689         WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
10690                 crtc_state->sync_mode_slaves_mask);
10691 }
10692
10693 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10694                                     struct intel_crtc_state *pipe_config)
10695 {
10696         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10697         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10698         enum intel_display_power_domain power_domain;
10699         u64 power_domain_mask;
10700         bool active;
10701
10702         intel_crtc_init_scalers(crtc, pipe_config);
10703
10704         pipe_config->master_transcoder = INVALID_TRANSCODER;
10705
10706         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10707         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10708         if (!wf)
10709                 return false;
10710
10711         wakerefs[power_domain] = wf;
10712         power_domain_mask = BIT_ULL(power_domain);
10713
10714         pipe_config->shared_dpll = NULL;
10715
10716         active = hsw_get_transcoder_state(crtc, pipe_config,
10717                                           &power_domain_mask, wakerefs);
10718
10719         if (IS_GEN9_LP(dev_priv) &&
10720             bxt_get_dsi_transcoder_state(crtc, pipe_config,
10721                                          &power_domain_mask, wakerefs)) {
10722                 WARN_ON(active);
10723                 active = true;
10724         }
10725
10726         if (!active)
10727                 goto out;
10728
10729         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10730             INTEL_GEN(dev_priv) >= 11) {
10731                 haswell_get_ddi_port_state(crtc, pipe_config);
10732                 intel_get_pipe_timings(crtc, pipe_config);
10733         }
10734
10735         intel_get_pipe_src_size(crtc, pipe_config);
10736
10737         if (IS_HASWELL(dev_priv)) {
10738                 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10739
10740                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10741                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10742                 else
10743                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10744         } else {
10745                 pipe_config->output_format =
10746                         bdw_get_pipemisc_output_format(crtc);
10747
10748                 /*
10749                  * Currently there is no interface defined to
10750                  * check user preference between RGB/YCBCR444
10751                  * or YCBCR420. So the only possible case for
10752                  * YCBCR444 usage is driving YCBCR420 output
10753                  * with LSPCON, when pipe is configured for
10754                  * YCBCR444 output and LSPCON takes care of
10755                  * downsampling it.
10756                  */
10757                 pipe_config->lspcon_downsampling =
10758                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
10759         }
10760
10761         pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10762
10763         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10764
10765         if (INTEL_GEN(dev_priv) >= 9) {
10766                 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10767
10768                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10769                         pipe_config->gamma_enable = true;
10770
10771                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10772                         pipe_config->csc_enable = true;
10773         } else {
10774                 i9xx_get_pipe_color_config(pipe_config);
10775         }
10776
10777         intel_color_get_config(pipe_config);
10778
10779         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10780         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10781
10782         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10783         if (wf) {
10784                 wakerefs[power_domain] = wf;
10785                 power_domain_mask |= BIT_ULL(power_domain);
10786
10787                 if (INTEL_GEN(dev_priv) >= 9)
10788                         skylake_get_pfit_config(crtc, pipe_config);
10789                 else
10790                         ironlake_get_pfit_config(crtc, pipe_config);
10791         }
10792
10793         if (hsw_crtc_supports_ips(crtc)) {
10794                 if (IS_HASWELL(dev_priv))
10795                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10796                 else {
10797                         /*
10798                          * We cannot readout IPS state on broadwell, set to
10799                          * true so we can set it to a defined state on first
10800                          * commit.
10801                          */
10802                         pipe_config->ips_enabled = true;
10803                 }
10804         }
10805
10806         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10807             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10808                 pipe_config->pixel_multiplier =
10809                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10810         } else {
10811                 pipe_config->pixel_multiplier = 1;
10812         }
10813
10814         if (INTEL_GEN(dev_priv) >= 11 &&
10815             !transcoder_is_dsi(pipe_config->cpu_transcoder))
10816                 icelake_get_trans_port_sync_config(pipe_config);
10817
10818 out:
10819         for_each_power_domain(power_domain, power_domain_mask)
10820                 intel_display_power_put(dev_priv,
10821                                         power_domain, wakerefs[power_domain]);
10822
10823         return active;
10824 }
10825
10826 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10827 {
10828         struct drm_i915_private *dev_priv =
10829                 to_i915(plane_state->uapi.plane->dev);
10830         const struct drm_framebuffer *fb = plane_state->hw.fb;
10831         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10832         u32 base;
10833
10834         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10835                 base = obj->phys_handle->busaddr;
10836         else
10837                 base = intel_plane_ggtt_offset(plane_state);
10838
10839         return base + plane_state->color_plane[0].offset;
10840 }
10841
10842 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10843 {
10844         int x = plane_state->uapi.dst.x1;
10845         int y = plane_state->uapi.dst.y1;
10846         u32 pos = 0;
10847
10848         if (x < 0) {
10849                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10850                 x = -x;
10851         }
10852         pos |= x << CURSOR_X_SHIFT;
10853
10854         if (y < 0) {
10855                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10856                 y = -y;
10857         }
10858         pos |= y << CURSOR_Y_SHIFT;
10859
10860         return pos;
10861 }
10862
10863 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10864 {
10865         const struct drm_mode_config *config =
10866                 &plane_state->uapi.plane->dev->mode_config;
10867         int width = drm_rect_width(&plane_state->uapi.dst);
10868         int height = drm_rect_height(&plane_state->uapi.dst);
10869
10870         return width > 0 && width <= config->cursor_width &&
10871                 height > 0 && height <= config->cursor_height;
10872 }
10873
10874 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10875 {
10876         struct drm_i915_private *dev_priv =
10877                 to_i915(plane_state->uapi.plane->dev);
10878         unsigned int rotation = plane_state->hw.rotation;
10879         int src_x, src_y;
10880         u32 offset;
10881         int ret;
10882
10883         ret = intel_plane_compute_gtt(plane_state);
10884         if (ret)
10885                 return ret;
10886
10887         if (!plane_state->uapi.visible)
10888                 return 0;
10889
10890         src_x = plane_state->uapi.src.x1 >> 16;
10891         src_y = plane_state->uapi.src.y1 >> 16;
10892
10893         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10894         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10895                                                     plane_state, 0);
10896
10897         if (src_x != 0 || src_y != 0) {
10898                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10899                 return -EINVAL;
10900         }
10901
10902         /*
10903          * Put the final coordinates back so that the src
10904          * coordinate checks will see the right values.
10905          */
10906         drm_rect_translate_to(&plane_state->uapi.src,
10907                               src_x << 16, src_y << 16);
10908
10909         /* ILK+ do this automagically in hardware */
10910         if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
10911                 const struct drm_framebuffer *fb = plane_state->hw.fb;
10912                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
10913                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
10914
10915                 offset += (src_h * src_w - 1) * fb->format->cpp[0];
10916         }
10917
10918         plane_state->color_plane[0].offset = offset;
10919         plane_state->color_plane[0].x = src_x;
10920         plane_state->color_plane[0].y = src_y;
10921
10922         return 0;
10923 }
10924
10925 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10926                               struct intel_plane_state *plane_state)
10927 {
10928         const struct drm_framebuffer *fb = plane_state->hw.fb;
10929         int ret;
10930
10931         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10932                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10933                 return -EINVAL;
10934         }
10935
10936         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
10937                                                   &crtc_state->uapi,
10938                                                   DRM_PLANE_HELPER_NO_SCALING,
10939                                                   DRM_PLANE_HELPER_NO_SCALING,
10940                                                   true, true);
10941         if (ret)
10942                 return ret;
10943
10944         /* Use the unclipped src/dst rectangles, which we program to hw */
10945         plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
10946         plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
10947
10948         ret = intel_cursor_check_surface(plane_state);
10949         if (ret)
10950                 return ret;
10951
10952         if (!plane_state->uapi.visible)
10953                 return 0;
10954
10955         ret = intel_plane_check_src_coordinates(plane_state);
10956         if (ret)
10957                 return ret;
10958
10959         return 0;
10960 }
10961
10962 static unsigned int
10963 i845_cursor_max_stride(struct intel_plane *plane,
10964                        u32 pixel_format, u64 modifier,
10965                        unsigned int rotation)
10966 {
10967         return 2048;
10968 }
10969
10970 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10971 {
10972         u32 cntl = 0;
10973
10974         if (crtc_state->gamma_enable)
10975                 cntl |= CURSOR_GAMMA_ENABLE;
10976
10977         return cntl;
10978 }
10979
10980 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10981                            const struct intel_plane_state *plane_state)
10982 {
10983         return CURSOR_ENABLE |
10984                 CURSOR_FORMAT_ARGB |
10985                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10986 }
10987
10988 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10989 {
10990         int width = drm_rect_width(&plane_state->uapi.dst);
10991
10992         /*
10993          * 845g/865g are only limited by the width of their cursors,
10994          * the height is arbitrary up to the precision of the register.
10995          */
10996         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10997 }
10998
10999 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
11000                              struct intel_plane_state *plane_state)
11001 {
11002         const struct drm_framebuffer *fb = plane_state->hw.fb;
11003         int ret;
11004
11005         ret = intel_check_cursor(crtc_state, plane_state);
11006         if (ret)
11007                 return ret;
11008
11009         /* if we want to turn off the cursor ignore width and height */
11010         if (!fb)
11011                 return 0;
11012
11013         /* Check for which cursor types we support */
11014         if (!i845_cursor_size_ok(plane_state)) {
11015                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11016                           drm_rect_width(&plane_state->uapi.dst),
11017                           drm_rect_height(&plane_state->uapi.dst));
11018                 return -EINVAL;
11019         }
11020
11021         WARN_ON(plane_state->uapi.visible &&
11022                 plane_state->color_plane[0].stride != fb->pitches[0]);
11023
11024         switch (fb->pitches[0]) {
11025         case 256:
11026         case 512:
11027         case 1024:
11028         case 2048:
11029                 break;
11030         default:
11031                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
11032                               fb->pitches[0]);
11033                 return -EINVAL;
11034         }
11035
11036         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11037
11038         return 0;
11039 }
11040
11041 static void i845_update_cursor(struct intel_plane *plane,
11042                                const struct intel_crtc_state *crtc_state,
11043                                const struct intel_plane_state *plane_state)
11044 {
11045         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11046         u32 cntl = 0, base = 0, pos = 0, size = 0;
11047         unsigned long irqflags;
11048
11049         if (plane_state && plane_state->uapi.visible) {
11050                 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11051                 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11052
11053                 cntl = plane_state->ctl |
11054                         i845_cursor_ctl_crtc(crtc_state);
11055
11056                 size = (height << 12) | width;
11057
11058                 base = intel_cursor_base(plane_state);
11059                 pos = intel_cursor_position(plane_state);
11060         }
11061
11062         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11063
11064         /* On these chipsets we can only modify the base/size/stride
11065          * whilst the cursor is disabled.
11066          */
11067         if (plane->cursor.base != base ||
11068             plane->cursor.size != size ||
11069             plane->cursor.cntl != cntl) {
11070                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
11071                 I915_WRITE_FW(CURBASE(PIPE_A), base);
11072                 I915_WRITE_FW(CURSIZE, size);
11073                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11074                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
11075
11076                 plane->cursor.base = base;
11077                 plane->cursor.size = size;
11078                 plane->cursor.cntl = cntl;
11079         } else {
11080                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11081         }
11082
11083         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11084 }
11085
11086 static void i845_disable_cursor(struct intel_plane *plane,
11087                                 const struct intel_crtc_state *crtc_state)
11088 {
11089         i845_update_cursor(plane, crtc_state, NULL);
11090 }
11091
11092 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11093                                      enum pipe *pipe)
11094 {
11095         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11096         enum intel_display_power_domain power_domain;
11097         intel_wakeref_t wakeref;
11098         bool ret;
11099
11100         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11101         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11102         if (!wakeref)
11103                 return false;
11104
11105         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11106
11107         *pipe = PIPE_A;
11108
11109         intel_display_power_put(dev_priv, power_domain, wakeref);
11110
11111         return ret;
11112 }
11113
11114 static unsigned int
11115 i9xx_cursor_max_stride(struct intel_plane *plane,
11116                        u32 pixel_format, u64 modifier,
11117                        unsigned int rotation)
11118 {
11119         return plane->base.dev->mode_config.cursor_width * 4;
11120 }
11121
11122 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11123 {
11124         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11125         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11126         u32 cntl = 0;
11127
11128         if (INTEL_GEN(dev_priv) >= 11)
11129                 return cntl;
11130
11131         if (crtc_state->gamma_enable)
11132                 cntl = MCURSOR_GAMMA_ENABLE;
11133
11134         if (crtc_state->csc_enable)
11135                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11136
11137         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11138                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11139
11140         return cntl;
11141 }
11142
11143 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11144                            const struct intel_plane_state *plane_state)
11145 {
11146         struct drm_i915_private *dev_priv =
11147                 to_i915(plane_state->uapi.plane->dev);
11148         u32 cntl = 0;
11149
11150         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11151                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11152
11153         switch (drm_rect_width(&plane_state->uapi.dst)) {
11154         case 64:
11155                 cntl |= MCURSOR_MODE_64_ARGB_AX;
11156                 break;
11157         case 128:
11158                 cntl |= MCURSOR_MODE_128_ARGB_AX;
11159                 break;
11160         case 256:
11161                 cntl |= MCURSOR_MODE_256_ARGB_AX;
11162                 break;
11163         default:
11164                 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11165                 return 0;
11166         }
11167
11168         if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11169                 cntl |= MCURSOR_ROTATE_180;
11170
11171         return cntl;
11172 }
11173
11174 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11175 {
11176         struct drm_i915_private *dev_priv =
11177                 to_i915(plane_state->uapi.plane->dev);
11178         int width = drm_rect_width(&plane_state->uapi.dst);
11179         int height = drm_rect_height(&plane_state->uapi.dst);
11180
11181         if (!intel_cursor_size_ok(plane_state))
11182                 return false;
11183
11184         /* Cursor width is limited to a few power-of-two sizes */
11185         switch (width) {
11186         case 256:
11187         case 128:
11188         case 64:
11189                 break;
11190         default:
11191                 return false;
11192         }
11193
11194         /*
11195          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11196          * height from 8 lines up to the cursor width, when the
11197          * cursor is not rotated. Everything else requires square
11198          * cursors.
11199          */
11200         if (HAS_CUR_FBC(dev_priv) &&
11201             plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11202                 if (height < 8 || height > width)
11203                         return false;
11204         } else {
11205                 if (height != width)
11206                         return false;
11207         }
11208
11209         return true;
11210 }
11211
11212 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11213                              struct intel_plane_state *plane_state)
11214 {
11215         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11216         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11217         const struct drm_framebuffer *fb = plane_state->hw.fb;
11218         enum pipe pipe = plane->pipe;
11219         int ret;
11220
11221         ret = intel_check_cursor(crtc_state, plane_state);
11222         if (ret)
11223                 return ret;
11224
11225         /* if we want to turn off the cursor ignore width and height */
11226         if (!fb)
11227                 return 0;
11228
11229         /* Check for which cursor types we support */
11230         if (!i9xx_cursor_size_ok(plane_state)) {
11231                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11232                           drm_rect_width(&plane_state->uapi.dst),
11233                           drm_rect_height(&plane_state->uapi.dst));
11234                 return -EINVAL;
11235         }
11236
11237         WARN_ON(plane_state->uapi.visible &&
11238                 plane_state->color_plane[0].stride != fb->pitches[0]);
11239
11240         if (fb->pitches[0] !=
11241             drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11242                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
11243                               fb->pitches[0],
11244                               drm_rect_width(&plane_state->uapi.dst));
11245                 return -EINVAL;
11246         }
11247
11248         /*
11249          * There's something wrong with the cursor on CHV pipe C.
11250          * If it straddles the left edge of the screen then
11251          * moving it away from the edge or disabling it often
11252          * results in a pipe underrun, and often that can lead to
11253          * dead pipe (constant underrun reported, and it scans
11254          * out just a solid color). To recover from that, the
11255          * display power well must be turned off and on again.
11256          * Refuse the put the cursor into that compromised position.
11257          */
11258         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11259             plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11260                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
11261                 return -EINVAL;
11262         }
11263
11264         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11265
11266         return 0;
11267 }
11268
11269 static void i9xx_update_cursor(struct intel_plane *plane,
11270                                const struct intel_crtc_state *crtc_state,
11271                                const struct intel_plane_state *plane_state)
11272 {
11273         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11274         enum pipe pipe = plane->pipe;
11275         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11276         unsigned long irqflags;
11277
11278         if (plane_state && plane_state->uapi.visible) {
11279                 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11280                 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11281
11282                 cntl = plane_state->ctl |
11283                         i9xx_cursor_ctl_crtc(crtc_state);
11284
11285                 if (width != height)
11286                         fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11287
11288                 base = intel_cursor_base(plane_state);
11289                 pos = intel_cursor_position(plane_state);
11290         }
11291
11292         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11293
11294         /*
11295          * On some platforms writing CURCNTR first will also
11296          * cause CURPOS to be armed by the CURBASE write.
11297          * Without the CURCNTR write the CURPOS write would
11298          * arm itself. Thus we always update CURCNTR before
11299          * CURPOS.
11300          *
11301          * On other platforms CURPOS always requires the
11302          * CURBASE write to arm the update. Additonally
11303          * a write to any of the cursor register will cancel
11304          * an already armed cursor update. Thus leaving out
11305          * the CURBASE write after CURPOS could lead to a
11306          * cursor that doesn't appear to move, or even change
11307          * shape. Thus we always write CURBASE.
11308          *
11309          * The other registers are armed by by the CURBASE write
11310          * except when the plane is getting enabled at which time
11311          * the CURCNTR write arms the update.
11312          */
11313
11314         if (INTEL_GEN(dev_priv) >= 9)
11315                 skl_write_cursor_wm(plane, crtc_state);
11316
11317         if (plane->cursor.base != base ||
11318             plane->cursor.size != fbc_ctl ||
11319             plane->cursor.cntl != cntl) {
11320                 if (HAS_CUR_FBC(dev_priv))
11321                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
11322                 I915_WRITE_FW(CURCNTR(pipe), cntl);
11323                 I915_WRITE_FW(CURPOS(pipe), pos);
11324                 I915_WRITE_FW(CURBASE(pipe), base);
11325
11326                 plane->cursor.base = base;
11327                 plane->cursor.size = fbc_ctl;
11328                 plane->cursor.cntl = cntl;
11329         } else {
11330                 I915_WRITE_FW(CURPOS(pipe), pos);
11331                 I915_WRITE_FW(CURBASE(pipe), base);
11332         }
11333
11334         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11335 }
11336
11337 static void i9xx_disable_cursor(struct intel_plane *plane,
11338                                 const struct intel_crtc_state *crtc_state)
11339 {
11340         i9xx_update_cursor(plane, crtc_state, NULL);
11341 }
11342
11343 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11344                                      enum pipe *pipe)
11345 {
11346         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11347         enum intel_display_power_domain power_domain;
11348         intel_wakeref_t wakeref;
11349         bool ret;
11350         u32 val;
11351
11352         /*
11353          * Not 100% correct for planes that can move between pipes,
11354          * but that's only the case for gen2-3 which don't have any
11355          * display power wells.
11356          */
11357         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11358         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11359         if (!wakeref)
11360                 return false;
11361
11362         val = I915_READ(CURCNTR(plane->pipe));
11363
11364         ret = val & MCURSOR_MODE;
11365
11366         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11367                 *pipe = plane->pipe;
11368         else
11369                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11370                         MCURSOR_PIPE_SELECT_SHIFT;
11371
11372         intel_display_power_put(dev_priv, power_domain, wakeref);
11373
11374         return ret;
11375 }
11376
11377 /* VESA 640x480x72Hz mode to set on the pipe */
11378 static const struct drm_display_mode load_detect_mode = {
11379         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11380                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11381 };
11382
11383 struct drm_framebuffer *
11384 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11385                          struct drm_mode_fb_cmd2 *mode_cmd)
11386 {
11387         struct intel_framebuffer *intel_fb;
11388         int ret;
11389
11390         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11391         if (!intel_fb)
11392                 return ERR_PTR(-ENOMEM);
11393
11394         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11395         if (ret)
11396                 goto err;
11397
11398         return &intel_fb->base;
11399
11400 err:
11401         kfree(intel_fb);
11402         return ERR_PTR(ret);
11403 }
11404
11405 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11406                                         struct drm_crtc *crtc)
11407 {
11408         struct drm_plane *plane;
11409         struct drm_plane_state *plane_state;
11410         int ret, i;
11411
11412         ret = drm_atomic_add_affected_planes(state, crtc);
11413         if (ret)
11414                 return ret;
11415
11416         for_each_new_plane_in_state(state, plane, plane_state, i) {
11417                 if (plane_state->crtc != crtc)
11418                         continue;
11419
11420                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11421                 if (ret)
11422                         return ret;
11423
11424                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11425         }
11426
11427         return 0;
11428 }
11429
11430 int intel_get_load_detect_pipe(struct drm_connector *connector,
11431                                struct intel_load_detect_pipe *old,
11432                                struct drm_modeset_acquire_ctx *ctx)
11433 {
11434         struct intel_crtc *intel_crtc;
11435         struct intel_encoder *intel_encoder =
11436                 intel_attached_encoder(connector);
11437         struct drm_crtc *possible_crtc;
11438         struct drm_encoder *encoder = &intel_encoder->base;
11439         struct drm_crtc *crtc = NULL;
11440         struct drm_device *dev = encoder->dev;
11441         struct drm_i915_private *dev_priv = to_i915(dev);
11442         struct drm_mode_config *config = &dev->mode_config;
11443         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11444         struct drm_connector_state *connector_state;
11445         struct intel_crtc_state *crtc_state;
11446         int ret, i = -1;
11447
11448         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11449                       connector->base.id, connector->name,
11450                       encoder->base.id, encoder->name);
11451
11452         old->restore_state = NULL;
11453
11454         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11455
11456         /*
11457          * Algorithm gets a little messy:
11458          *
11459          *   - if the connector already has an assigned crtc, use it (but make
11460          *     sure it's on first)
11461          *
11462          *   - try to find the first unused crtc that can drive this connector,
11463          *     and use that if we find one
11464          */
11465
11466         /* See if we already have a CRTC for this connector */
11467         if (connector->state->crtc) {
11468                 crtc = connector->state->crtc;
11469
11470                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11471                 if (ret)
11472                         goto fail;
11473
11474                 /* Make sure the crtc and connector are running */
11475                 goto found;
11476         }
11477
11478         /* Find an unused one (if possible) */
11479         for_each_crtc(dev, possible_crtc) {
11480                 i++;
11481                 if (!(encoder->possible_crtcs & (1 << i)))
11482                         continue;
11483
11484                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11485                 if (ret)
11486                         goto fail;
11487
11488                 if (possible_crtc->state->enable) {
11489                         drm_modeset_unlock(&possible_crtc->mutex);
11490                         continue;
11491                 }
11492
11493                 crtc = possible_crtc;
11494                 break;
11495         }
11496
11497         /*
11498          * If we didn't find an unused CRTC, don't use any.
11499          */
11500         if (!crtc) {
11501                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11502                 ret = -ENODEV;
11503                 goto fail;
11504         }
11505
11506 found:
11507         intel_crtc = to_intel_crtc(crtc);
11508
11509         state = drm_atomic_state_alloc(dev);
11510         restore_state = drm_atomic_state_alloc(dev);
11511         if (!state || !restore_state) {
11512                 ret = -ENOMEM;
11513                 goto fail;
11514         }
11515
11516         state->acquire_ctx = ctx;
11517         restore_state->acquire_ctx = ctx;
11518
11519         connector_state = drm_atomic_get_connector_state(state, connector);
11520         if (IS_ERR(connector_state)) {
11521                 ret = PTR_ERR(connector_state);
11522                 goto fail;
11523         }
11524
11525         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11526         if (ret)
11527                 goto fail;
11528
11529         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11530         if (IS_ERR(crtc_state)) {
11531                 ret = PTR_ERR(crtc_state);
11532                 goto fail;
11533         }
11534
11535         crtc_state->uapi.active = true;
11536
11537         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11538                                            &load_detect_mode);
11539         if (ret)
11540                 goto fail;
11541
11542         ret = intel_modeset_disable_planes(state, crtc);
11543         if (ret)
11544                 goto fail;
11545
11546         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11547         if (!ret)
11548                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11549         if (!ret)
11550                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11551         if (ret) {
11552                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11553                 goto fail;
11554         }
11555
11556         ret = drm_atomic_commit(state);
11557         if (ret) {
11558                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11559                 goto fail;
11560         }
11561
11562         old->restore_state = restore_state;
11563         drm_atomic_state_put(state);
11564
11565         /* let the connector get through one full cycle before testing */
11566         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11567         return true;
11568
11569 fail:
11570         if (state) {
11571                 drm_atomic_state_put(state);
11572                 state = NULL;
11573         }
11574         if (restore_state) {
11575                 drm_atomic_state_put(restore_state);
11576                 restore_state = NULL;
11577         }
11578
11579         if (ret == -EDEADLK)
11580                 return ret;
11581
11582         return false;
11583 }
11584
11585 void intel_release_load_detect_pipe(struct drm_connector *connector,
11586                                     struct intel_load_detect_pipe *old,
11587                                     struct drm_modeset_acquire_ctx *ctx)
11588 {
11589         struct intel_encoder *intel_encoder =
11590                 intel_attached_encoder(connector);
11591         struct drm_encoder *encoder = &intel_encoder->base;
11592         struct drm_atomic_state *state = old->restore_state;
11593         int ret;
11594
11595         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11596                       connector->base.id, connector->name,
11597                       encoder->base.id, encoder->name);
11598
11599         if (!state)
11600                 return;
11601
11602         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11603         if (ret)
11604                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11605         drm_atomic_state_put(state);
11606 }
11607
11608 static int i9xx_pll_refclk(struct drm_device *dev,
11609                            const struct intel_crtc_state *pipe_config)
11610 {
11611         struct drm_i915_private *dev_priv = to_i915(dev);
11612         u32 dpll = pipe_config->dpll_hw_state.dpll;
11613
11614         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11615                 return dev_priv->vbt.lvds_ssc_freq;
11616         else if (HAS_PCH_SPLIT(dev_priv))
11617                 return 120000;
11618         else if (!IS_GEN(dev_priv, 2))
11619                 return 96000;
11620         else
11621                 return 48000;
11622 }
11623
11624 /* Returns the clock of the currently programmed mode of the given pipe. */
11625 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11626                                 struct intel_crtc_state *pipe_config)
11627 {
11628         struct drm_device *dev = crtc->base.dev;
11629         struct drm_i915_private *dev_priv = to_i915(dev);
11630         enum pipe pipe = crtc->pipe;
11631         u32 dpll = pipe_config->dpll_hw_state.dpll;
11632         u32 fp;
11633         struct dpll clock;
11634         int port_clock;
11635         int refclk = i9xx_pll_refclk(dev, pipe_config);
11636
11637         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11638                 fp = pipe_config->dpll_hw_state.fp0;
11639         else
11640                 fp = pipe_config->dpll_hw_state.fp1;
11641
11642         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11643         if (IS_PINEVIEW(dev_priv)) {
11644                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11645                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11646         } else {
11647                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11648                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11649         }
11650
11651         if (!IS_GEN(dev_priv, 2)) {
11652                 if (IS_PINEVIEW(dev_priv))
11653                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11654                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11655                 else
11656                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11657                                DPLL_FPA01_P1_POST_DIV_SHIFT);
11658
11659                 switch (dpll & DPLL_MODE_MASK) {
11660                 case DPLLB_MODE_DAC_SERIAL:
11661                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11662                                 5 : 10;
11663                         break;
11664                 case DPLLB_MODE_LVDS:
11665                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11666                                 7 : 14;
11667                         break;
11668                 default:
11669                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11670                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
11671                         return;
11672                 }
11673
11674                 if (IS_PINEVIEW(dev_priv))
11675                         port_clock = pnv_calc_dpll_params(refclk, &clock);
11676                 else
11677                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
11678         } else {
11679                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11680                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11681
11682                 if (is_lvds) {
11683                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11684                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
11685
11686                         if (lvds & LVDS_CLKB_POWER_UP)
11687                                 clock.p2 = 7;
11688                         else
11689                                 clock.p2 = 14;
11690                 } else {
11691                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
11692                                 clock.p1 = 2;
11693                         else {
11694                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11695                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11696                         }
11697                         if (dpll & PLL_P2_DIVIDE_BY_4)
11698                                 clock.p2 = 4;
11699                         else
11700                                 clock.p2 = 2;
11701                 }
11702
11703                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11704         }
11705
11706         /*
11707          * This value includes pixel_multiplier. We will use
11708          * port_clock to compute adjusted_mode.crtc_clock in the
11709          * encoder's get_config() function.
11710          */
11711         pipe_config->port_clock = port_clock;
11712 }
11713
11714 int intel_dotclock_calculate(int link_freq,
11715                              const struct intel_link_m_n *m_n)
11716 {
11717         /*
11718          * The calculation for the data clock is:
11719          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11720          * But we want to avoid losing precison if possible, so:
11721          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11722          *
11723          * and the link clock is simpler:
11724          * link_clock = (m * link_clock) / n
11725          */
11726
11727         if (!m_n->link_n)
11728                 return 0;
11729
11730         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11731 }
11732
11733 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11734                                    struct intel_crtc_state *pipe_config)
11735 {
11736         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11737
11738         /* read out port_clock from the DPLL */
11739         i9xx_crtc_clock_get(crtc, pipe_config);
11740
11741         /*
11742          * In case there is an active pipe without active ports,
11743          * we may need some idea for the dotclock anyway.
11744          * Calculate one based on the FDI configuration.
11745          */
11746         pipe_config->hw.adjusted_mode.crtc_clock =
11747                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11748                                          &pipe_config->fdi_m_n);
11749 }
11750
11751 /* Returns the currently programmed mode of the given encoder. */
11752 struct drm_display_mode *
11753 intel_encoder_current_mode(struct intel_encoder *encoder)
11754 {
11755         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11756         struct intel_crtc_state *crtc_state;
11757         struct drm_display_mode *mode;
11758         struct intel_crtc *crtc;
11759         enum pipe pipe;
11760
11761         if (!encoder->get_hw_state(encoder, &pipe))
11762                 return NULL;
11763
11764         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11765
11766         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11767         if (!mode)
11768                 return NULL;
11769
11770         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11771         if (!crtc_state) {
11772                 kfree(mode);
11773                 return NULL;
11774         }
11775
11776         crtc_state->uapi.crtc = &crtc->base;
11777
11778         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11779                 kfree(crtc_state);
11780                 kfree(mode);
11781                 return NULL;
11782         }
11783
11784         encoder->get_config(encoder, crtc_state);
11785
11786         intel_mode_from_pipe_config(mode, crtc_state);
11787
11788         kfree(crtc_state);
11789
11790         return mode;
11791 }
11792
11793 static void intel_crtc_destroy(struct drm_crtc *crtc)
11794 {
11795         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11796
11797         drm_crtc_cleanup(crtc);
11798         kfree(intel_crtc);
11799 }
11800
11801 /**
11802  * intel_wm_need_update - Check whether watermarks need updating
11803  * @cur: current plane state
11804  * @new: new plane state
11805  *
11806  * Check current plane state versus the new one to determine whether
11807  * watermarks need to be recalculated.
11808  *
11809  * Returns true or false.
11810  */
11811 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11812                                  struct intel_plane_state *new)
11813 {
11814         /* Update watermarks on tiling or size changes. */
11815         if (new->uapi.visible != cur->uapi.visible)
11816                 return true;
11817
11818         if (!cur->hw.fb || !new->hw.fb)
11819                 return false;
11820
11821         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
11822             cur->hw.rotation != new->hw.rotation ||
11823             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
11824             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
11825             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
11826             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
11827                 return true;
11828
11829         return false;
11830 }
11831
11832 static bool needs_scaling(const struct intel_plane_state *state)
11833 {
11834         int src_w = drm_rect_width(&state->uapi.src) >> 16;
11835         int src_h = drm_rect_height(&state->uapi.src) >> 16;
11836         int dst_w = drm_rect_width(&state->uapi.dst);
11837         int dst_h = drm_rect_height(&state->uapi.dst);
11838
11839         return (src_w != dst_w || src_h != dst_h);
11840 }
11841
11842 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11843                                     struct intel_crtc_state *crtc_state,
11844                                     const struct intel_plane_state *old_plane_state,
11845                                     struct intel_plane_state *plane_state)
11846 {
11847         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11848         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11849         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11850         bool mode_changed = needs_modeset(crtc_state);
11851         bool was_crtc_enabled = old_crtc_state->hw.active;
11852         bool is_crtc_enabled = crtc_state->hw.active;
11853         bool turn_off, turn_on, visible, was_visible;
11854         int ret;
11855
11856         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11857                 ret = skl_update_scaler_plane(crtc_state, plane_state);
11858                 if (ret)
11859                         return ret;
11860         }
11861
11862         was_visible = old_plane_state->uapi.visible;
11863         visible = plane_state->uapi.visible;
11864
11865         if (!was_crtc_enabled && WARN_ON(was_visible))
11866                 was_visible = false;
11867
11868         /*
11869          * Visibility is calculated as if the crtc was on, but
11870          * after scaler setup everything depends on it being off
11871          * when the crtc isn't active.
11872          *
11873          * FIXME this is wrong for watermarks. Watermarks should also
11874          * be computed as if the pipe would be active. Perhaps move
11875          * per-plane wm computation to the .check_plane() hook, and
11876          * only combine the results from all planes in the current place?
11877          */
11878         if (!is_crtc_enabled) {
11879                 plane_state->uapi.visible = visible = false;
11880                 crtc_state->active_planes &= ~BIT(plane->id);
11881                 crtc_state->data_rate[plane->id] = 0;
11882                 crtc_state->min_cdclk[plane->id] = 0;
11883         }
11884
11885         if (!was_visible && !visible)
11886                 return 0;
11887
11888         turn_off = was_visible && (!visible || mode_changed);
11889         turn_on = visible && (!was_visible || mode_changed);
11890
11891         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11892                          crtc->base.base.id, crtc->base.name,
11893                          plane->base.base.id, plane->base.name,
11894                          was_visible, visible,
11895                          turn_off, turn_on, mode_changed);
11896
11897         if (turn_on) {
11898                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11899                         crtc_state->update_wm_pre = true;
11900
11901                 /* must disable cxsr around plane enable/disable */
11902                 if (plane->id != PLANE_CURSOR)
11903                         crtc_state->disable_cxsr = true;
11904         } else if (turn_off) {
11905                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11906                         crtc_state->update_wm_post = true;
11907
11908                 /* must disable cxsr around plane enable/disable */
11909                 if (plane->id != PLANE_CURSOR)
11910                         crtc_state->disable_cxsr = true;
11911         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
11912                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11913                         /* FIXME bollocks */
11914                         crtc_state->update_wm_pre = true;
11915                         crtc_state->update_wm_post = true;
11916                 }
11917         }
11918
11919         if (visible || was_visible)
11920                 crtc_state->fb_bits |= plane->frontbuffer_bit;
11921
11922         /*
11923          * ILK/SNB DVSACNTR/Sprite Enable
11924          * IVB SPR_CTL/Sprite Enable
11925          * "When in Self Refresh Big FIFO mode, a write to enable the
11926          *  plane will be internally buffered and delayed while Big FIFO
11927          *  mode is exiting."
11928          *
11929          * Which means that enabling the sprite can take an extra frame
11930          * when we start in big FIFO mode (LP1+). Thus we need to drop
11931          * down to LP0 and wait for vblank in order to make sure the
11932          * sprite gets enabled on the next vblank after the register write.
11933          * Doing otherwise would risk enabling the sprite one frame after
11934          * we've already signalled flip completion. We can resume LP1+
11935          * once the sprite has been enabled.
11936          *
11937          *
11938          * WaCxSRDisabledForSpriteScaling:ivb
11939          * IVB SPR_SCALE/Scaling Enable
11940          * "Low Power watermarks must be disabled for at least one
11941          *  frame before enabling sprite scaling, and kept disabled
11942          *  until sprite scaling is disabled."
11943          *
11944          * ILK/SNB DVSASCALE/Scaling Enable
11945          * "When in Self Refresh Big FIFO mode, scaling enable will be
11946          *  masked off while Big FIFO mode is exiting."
11947          *
11948          * Despite the w/a only being listed for IVB we assume that
11949          * the ILK/SNB note has similar ramifications, hence we apply
11950          * the w/a on all three platforms.
11951          *
11952          * With experimental results seems this is needed also for primary
11953          * plane, not only sprite plane.
11954          */
11955         if (plane->id != PLANE_CURSOR &&
11956             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11957              IS_IVYBRIDGE(dev_priv)) &&
11958             (turn_on || (!needs_scaling(old_plane_state) &&
11959                          needs_scaling(plane_state))))
11960                 crtc_state->disable_lp_wm = true;
11961
11962         return 0;
11963 }
11964
11965 static bool encoders_cloneable(const struct intel_encoder *a,
11966                                const struct intel_encoder *b)
11967 {
11968         /* masks could be asymmetric, so check both ways */
11969         return a == b || (a->cloneable & (1 << b->type) &&
11970                           b->cloneable & (1 << a->type));
11971 }
11972
11973 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11974                                          struct intel_crtc *crtc,
11975                                          struct intel_encoder *encoder)
11976 {
11977         struct intel_encoder *source_encoder;
11978         struct drm_connector *connector;
11979         struct drm_connector_state *connector_state;
11980         int i;
11981
11982         for_each_new_connector_in_state(state, connector, connector_state, i) {
11983                 if (connector_state->crtc != &crtc->base)
11984                         continue;
11985
11986                 source_encoder =
11987                         to_intel_encoder(connector_state->best_encoder);
11988                 if (!encoders_cloneable(encoder, source_encoder))
11989                         return false;
11990         }
11991
11992         return true;
11993 }
11994
11995 static int icl_add_linked_planes(struct intel_atomic_state *state)
11996 {
11997         struct intel_plane *plane, *linked;
11998         struct intel_plane_state *plane_state, *linked_plane_state;
11999         int i;
12000
12001         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12002                 linked = plane_state->planar_linked_plane;
12003
12004                 if (!linked)
12005                         continue;
12006
12007                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
12008                 if (IS_ERR(linked_plane_state))
12009                         return PTR_ERR(linked_plane_state);
12010
12011                 WARN_ON(linked_plane_state->planar_linked_plane != plane);
12012                 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
12013         }
12014
12015         return 0;
12016 }
12017
12018 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12019 {
12020         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12021         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12022         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12023         struct intel_plane *plane, *linked;
12024         struct intel_plane_state *plane_state;
12025         int i;
12026
12027         if (INTEL_GEN(dev_priv) < 11)
12028                 return 0;
12029
12030         /*
12031          * Destroy all old plane links and make the slave plane invisible
12032          * in the crtc_state->active_planes mask.
12033          */
12034         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12035                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12036                         continue;
12037
12038                 plane_state->planar_linked_plane = NULL;
12039                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
12040                         crtc_state->active_planes &= ~BIT(plane->id);
12041                         crtc_state->update_planes |= BIT(plane->id);
12042                 }
12043
12044                 plane_state->planar_slave = false;
12045         }
12046
12047         if (!crtc_state->nv12_planes)
12048                 return 0;
12049
12050         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12051                 struct intel_plane_state *linked_state = NULL;
12052
12053                 if (plane->pipe != crtc->pipe ||
12054                     !(crtc_state->nv12_planes & BIT(plane->id)))
12055                         continue;
12056
12057                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12058                         if (!icl_is_nv12_y_plane(linked->id))
12059                                 continue;
12060
12061                         if (crtc_state->active_planes & BIT(linked->id))
12062                                 continue;
12063
12064                         linked_state = intel_atomic_get_plane_state(state, linked);
12065                         if (IS_ERR(linked_state))
12066                                 return PTR_ERR(linked_state);
12067
12068                         break;
12069                 }
12070
12071                 if (!linked_state) {
12072                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
12073                                       hweight8(crtc_state->nv12_planes));
12074
12075                         return -EINVAL;
12076                 }
12077
12078                 plane_state->planar_linked_plane = linked;
12079
12080                 linked_state->planar_slave = true;
12081                 linked_state->planar_linked_plane = plane;
12082                 crtc_state->active_planes |= BIT(linked->id);
12083                 crtc_state->update_planes |= BIT(linked->id);
12084                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
12085
12086                 /* Copy parameters to slave plane */
12087                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12088                 linked_state->color_ctl = plane_state->color_ctl;
12089                 linked_state->color_plane[0] = plane_state->color_plane[0];
12090
12091                 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12092                 linked_state->uapi.src = plane_state->uapi.src;
12093                 linked_state->uapi.dst = plane_state->uapi.dst;
12094
12095                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12096                         if (linked->id == PLANE_SPRITE5)
12097                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12098                         else if (linked->id == PLANE_SPRITE4)
12099                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12100                         else
12101                                 MISSING_CASE(linked->id);
12102                 }
12103         }
12104
12105         return 0;
12106 }
12107
12108 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12109 {
12110         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12111         struct intel_atomic_state *state =
12112                 to_intel_atomic_state(new_crtc_state->uapi.state);
12113         const struct intel_crtc_state *old_crtc_state =
12114                 intel_atomic_get_old_crtc_state(state, crtc);
12115
12116         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12117 }
12118
12119 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
12120 {
12121         struct drm_crtc *crtc = crtc_state->uapi.crtc;
12122         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12123         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12124         struct drm_connector *master_connector, *connector;
12125         struct drm_connector_state *connector_state;
12126         struct drm_connector_list_iter conn_iter;
12127         struct drm_crtc *master_crtc = NULL;
12128         struct drm_crtc_state *master_crtc_state;
12129         struct intel_crtc_state *master_pipe_config;
12130         int i, tile_group_id;
12131
12132         if (INTEL_GEN(dev_priv) < 11)
12133                 return 0;
12134
12135         /*
12136          * In case of tiled displays there could be one or more slaves but there is
12137          * only one master. Lets make the CRTC used by the connector corresponding
12138          * to the last horizonal and last vertical tile a master/genlock CRTC.
12139          * All the other CRTCs corresponding to other tiles of the same Tile group
12140          * are the slave CRTCs and hold a pointer to their genlock CRTC.
12141          */
12142         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12143                 if (connector_state->crtc != crtc)
12144                         continue;
12145                 if (!connector->has_tile)
12146                         continue;
12147                 if (crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
12148                     crtc_state->hw.mode.vdisplay != connector->tile_v_size)
12149                         return 0;
12150                 if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12151                     connector->tile_v_loc == connector->num_v_tile - 1)
12152                         continue;
12153                 crtc_state->sync_mode_slaves_mask = 0;
12154                 tile_group_id = connector->tile_group->id;
12155                 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12156                 drm_for_each_connector_iter(master_connector, &conn_iter) {
12157                         struct drm_connector_state *master_conn_state = NULL;
12158
12159                         if (!master_connector->has_tile)
12160                                 continue;
12161                         if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12162                             master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12163                                 continue;
12164                         if (master_connector->tile_group->id != tile_group_id)
12165                                 continue;
12166
12167                         master_conn_state = drm_atomic_get_connector_state(&state->base,
12168                                                                            master_connector);
12169                         if (IS_ERR(master_conn_state)) {
12170                                 drm_connector_list_iter_end(&conn_iter);
12171                                 return PTR_ERR(master_conn_state);
12172                         }
12173                         if (master_conn_state->crtc) {
12174                                 master_crtc = master_conn_state->crtc;
12175                                 break;
12176                         }
12177                 }
12178                 drm_connector_list_iter_end(&conn_iter);
12179
12180                 if (!master_crtc) {
12181                         DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
12182                                       connector_state->crtc->base.id);
12183                         return -EINVAL;
12184                 }
12185
12186                 master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12187                                                               master_crtc);
12188                 if (IS_ERR(master_crtc_state))
12189                         return PTR_ERR(master_crtc_state);
12190
12191                 master_pipe_config = to_intel_crtc_state(master_crtc_state);
12192                 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12193                 master_pipe_config->sync_mode_slaves_mask |=
12194                         BIT(crtc_state->cpu_transcoder);
12195                 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12196                               transcoder_name(crtc_state->master_transcoder),
12197                               crtc_state->uapi.crtc->base.id,
12198                               master_pipe_config->sync_mode_slaves_mask);
12199         }
12200
12201         return 0;
12202 }
12203
12204 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12205                                    struct intel_crtc *crtc)
12206 {
12207         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12208         struct intel_crtc_state *crtc_state =
12209                 intel_atomic_get_new_crtc_state(state, crtc);
12210         bool mode_changed = needs_modeset(crtc_state);
12211         int ret;
12212
12213         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12214             mode_changed && !crtc_state->hw.active)
12215                 crtc_state->update_wm_post = true;
12216
12217         if (mode_changed && crtc_state->hw.enable &&
12218             dev_priv->display.crtc_compute_clock &&
12219             !WARN_ON(crtc_state->shared_dpll)) {
12220                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12221                 if (ret)
12222                         return ret;
12223         }
12224
12225         /*
12226          * May need to update pipe gamma enable bits
12227          * when C8 planes are getting enabled/disabled.
12228          */
12229         if (c8_planes_changed(crtc_state))
12230                 crtc_state->uapi.color_mgmt_changed = true;
12231
12232         if (mode_changed || crtc_state->update_pipe ||
12233             crtc_state->uapi.color_mgmt_changed) {
12234                 ret = intel_color_check(crtc_state);
12235                 if (ret)
12236                         return ret;
12237         }
12238
12239         ret = 0;
12240         if (dev_priv->display.compute_pipe_wm) {
12241                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12242                 if (ret) {
12243                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12244                         return ret;
12245                 }
12246         }
12247
12248         if (dev_priv->display.compute_intermediate_wm) {
12249                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12250                         return 0;
12251
12252                 /*
12253                  * Calculate 'intermediate' watermarks that satisfy both the
12254                  * old state and the new state.  We can program these
12255                  * immediately.
12256                  */
12257                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12258                 if (ret) {
12259                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12260                         return ret;
12261                 }
12262         }
12263
12264         if (INTEL_GEN(dev_priv) >= 9) {
12265                 if (mode_changed || crtc_state->update_pipe)
12266                         ret = skl_update_scaler_crtc(crtc_state);
12267                 if (!ret)
12268                         ret = intel_atomic_setup_scalers(dev_priv, crtc,
12269                                                          crtc_state);
12270         }
12271
12272         if (HAS_IPS(dev_priv))
12273                 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
12274
12275         return ret;
12276 }
12277
12278 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12279 {
12280         struct intel_connector *connector;
12281         struct drm_connector_list_iter conn_iter;
12282
12283         drm_connector_list_iter_begin(dev, &conn_iter);
12284         for_each_intel_connector_iter(connector, &conn_iter) {
12285                 if (connector->base.state->crtc)
12286                         drm_connector_put(&connector->base);
12287
12288                 if (connector->base.encoder) {
12289                         connector->base.state->best_encoder =
12290                                 connector->base.encoder;
12291                         connector->base.state->crtc =
12292                                 connector->base.encoder->crtc;
12293
12294                         drm_connector_get(&connector->base);
12295                 } else {
12296                         connector->base.state->best_encoder = NULL;
12297                         connector->base.state->crtc = NULL;
12298                 }
12299         }
12300         drm_connector_list_iter_end(&conn_iter);
12301 }
12302
12303 static int
12304 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12305                       struct intel_crtc_state *pipe_config)
12306 {
12307         struct drm_connector *connector = conn_state->connector;
12308         const struct drm_display_info *info = &connector->display_info;
12309         int bpp;
12310
12311         switch (conn_state->max_bpc) {
12312         case 6 ... 7:
12313                 bpp = 6 * 3;
12314                 break;
12315         case 8 ... 9:
12316                 bpp = 8 * 3;
12317                 break;
12318         case 10 ... 11:
12319                 bpp = 10 * 3;
12320                 break;
12321         case 12:
12322                 bpp = 12 * 3;
12323                 break;
12324         default:
12325                 return -EINVAL;
12326         }
12327
12328         if (bpp < pipe_config->pipe_bpp) {
12329                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12330                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12331                               connector->base.id, connector->name,
12332                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
12333                               pipe_config->pipe_bpp);
12334
12335                 pipe_config->pipe_bpp = bpp;
12336         }
12337
12338         return 0;
12339 }
12340
12341 static int
12342 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12343                           struct intel_crtc_state *pipe_config)
12344 {
12345         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12346         struct drm_atomic_state *state = pipe_config->uapi.state;
12347         struct drm_connector *connector;
12348         struct drm_connector_state *connector_state;
12349         int bpp, i;
12350
12351         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12352             IS_CHERRYVIEW(dev_priv)))
12353                 bpp = 10*3;
12354         else if (INTEL_GEN(dev_priv) >= 5)
12355                 bpp = 12*3;
12356         else
12357                 bpp = 8*3;
12358
12359         pipe_config->pipe_bpp = bpp;
12360
12361         /* Clamp display bpp to connector max bpp */
12362         for_each_new_connector_in_state(state, connector, connector_state, i) {
12363                 int ret;
12364
12365                 if (connector_state->crtc != &crtc->base)
12366                         continue;
12367
12368                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12369                 if (ret)
12370                         return ret;
12371         }
12372
12373         return 0;
12374 }
12375
12376 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12377 {
12378         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12379                       "type: 0x%x flags: 0x%x\n",
12380                       mode->crtc_clock,
12381                       mode->crtc_hdisplay, mode->crtc_hsync_start,
12382                       mode->crtc_hsync_end, mode->crtc_htotal,
12383                       mode->crtc_vdisplay, mode->crtc_vsync_start,
12384                       mode->crtc_vsync_end, mode->crtc_vtotal,
12385                       mode->type, mode->flags);
12386 }
12387
12388 static inline void
12389 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12390                       const char *id, unsigned int lane_count,
12391                       const struct intel_link_m_n *m_n)
12392 {
12393         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12394                       id, lane_count,
12395                       m_n->gmch_m, m_n->gmch_n,
12396                       m_n->link_m, m_n->link_n, m_n->tu);
12397 }
12398
12399 static void
12400 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12401                      const union hdmi_infoframe *frame)
12402 {
12403         if ((drm_debug & DRM_UT_KMS) == 0)
12404                 return;
12405
12406         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12407 }
12408
12409 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12410
12411 static const char * const output_type_str[] = {
12412         OUTPUT_TYPE(UNUSED),
12413         OUTPUT_TYPE(ANALOG),
12414         OUTPUT_TYPE(DVO),
12415         OUTPUT_TYPE(SDVO),
12416         OUTPUT_TYPE(LVDS),
12417         OUTPUT_TYPE(TVOUT),
12418         OUTPUT_TYPE(HDMI),
12419         OUTPUT_TYPE(DP),
12420         OUTPUT_TYPE(EDP),
12421         OUTPUT_TYPE(DSI),
12422         OUTPUT_TYPE(DDI),
12423         OUTPUT_TYPE(DP_MST),
12424 };
12425
12426 #undef OUTPUT_TYPE
12427
12428 static void snprintf_output_types(char *buf, size_t len,
12429                                   unsigned int output_types)
12430 {
12431         char *str = buf;
12432         int i;
12433
12434         str[0] = '\0';
12435
12436         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12437                 int r;
12438
12439                 if ((output_types & BIT(i)) == 0)
12440                         continue;
12441
12442                 r = snprintf(str, len, "%s%s",
12443                              str != buf ? "," : "", output_type_str[i]);
12444                 if (r >= len)
12445                         break;
12446                 str += r;
12447                 len -= r;
12448
12449                 output_types &= ~BIT(i);
12450         }
12451
12452         WARN_ON_ONCE(output_types != 0);
12453 }
12454
12455 static const char * const output_format_str[] = {
12456         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12457         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12458         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12459         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12460 };
12461
12462 static const char *output_formats(enum intel_output_format format)
12463 {
12464         if (format >= ARRAY_SIZE(output_format_str))
12465                 format = INTEL_OUTPUT_FORMAT_INVALID;
12466         return output_format_str[format];
12467 }
12468
12469 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12470 {
12471         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12472         const struct drm_framebuffer *fb = plane_state->hw.fb;
12473         struct drm_format_name_buf format_name;
12474
12475         if (!fb) {
12476                 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12477                               plane->base.base.id, plane->base.name,
12478                               yesno(plane_state->uapi.visible));
12479                 return;
12480         }
12481
12482         DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12483                       plane->base.base.id, plane->base.name,
12484                       fb->base.id, fb->width, fb->height,
12485                       drm_get_format_name(fb->format->format, &format_name),
12486                       yesno(plane_state->uapi.visible));
12487         DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12488                       plane_state->hw.rotation, plane_state->scaler_id);
12489         if (plane_state->uapi.visible)
12490                 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12491                               DRM_RECT_FP_ARG(&plane_state->uapi.src),
12492                               DRM_RECT_ARG(&plane_state->uapi.dst));
12493 }
12494
12495 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12496                                    struct intel_atomic_state *state,
12497                                    const char *context)
12498 {
12499         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12500         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12501         const struct intel_plane_state *plane_state;
12502         struct intel_plane *plane;
12503         char buf[64];
12504         int i;
12505
12506         DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12507                       crtc->base.base.id, crtc->base.name,
12508                       yesno(pipe_config->hw.enable), context);
12509
12510         if (!pipe_config->hw.enable)
12511                 goto dump_planes;
12512
12513         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12514         DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12515                       yesno(pipe_config->hw.active),
12516                       buf, pipe_config->output_types,
12517                       output_formats(pipe_config->output_format));
12518
12519         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12520                       transcoder_name(pipe_config->cpu_transcoder),
12521                       pipe_config->pipe_bpp, pipe_config->dither);
12522
12523         if (pipe_config->has_pch_encoder)
12524                 intel_dump_m_n_config(pipe_config, "fdi",
12525                                       pipe_config->fdi_lanes,
12526                                       &pipe_config->fdi_m_n);
12527
12528         if (intel_crtc_has_dp_encoder(pipe_config)) {
12529                 intel_dump_m_n_config(pipe_config, "dp m_n",
12530                                 pipe_config->lane_count, &pipe_config->dp_m_n);
12531                 if (pipe_config->has_drrs)
12532                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
12533                                               pipe_config->lane_count,
12534                                               &pipe_config->dp_m2_n2);
12535         }
12536
12537         DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12538                       pipe_config->has_audio, pipe_config->has_infoframe,
12539                       pipe_config->infoframes.enable);
12540
12541         if (pipe_config->infoframes.enable &
12542             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12543                 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12544         if (pipe_config->infoframes.enable &
12545             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12546                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12547         if (pipe_config->infoframes.enable &
12548             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12549                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12550         if (pipe_config->infoframes.enable &
12551             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12552                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12553
12554         DRM_DEBUG_KMS("requested mode:\n");
12555         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12556         DRM_DEBUG_KMS("adjusted mode:\n");
12557         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12558         intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
12559         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12560                       pipe_config->port_clock,
12561                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12562                       pipe_config->pixel_rate);
12563
12564         if (INTEL_GEN(dev_priv) >= 9)
12565                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12566                               crtc->num_scalers,
12567                               pipe_config->scaler_state.scaler_users,
12568                               pipe_config->scaler_state.scaler_id);
12569
12570         if (HAS_GMCH(dev_priv))
12571                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12572                               pipe_config->gmch_pfit.control,
12573                               pipe_config->gmch_pfit.pgm_ratios,
12574                               pipe_config->gmch_pfit.lvds_border_bits);
12575         else
12576                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12577                               pipe_config->pch_pfit.pos,
12578                               pipe_config->pch_pfit.size,
12579                               enableddisabled(pipe_config->pch_pfit.enabled),
12580                               yesno(pipe_config->pch_pfit.force_thru));
12581
12582         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12583                       pipe_config->ips_enabled, pipe_config->double_wide);
12584
12585         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12586
12587         if (IS_CHERRYVIEW(dev_priv))
12588                 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12589                               pipe_config->cgm_mode, pipe_config->gamma_mode,
12590                               pipe_config->gamma_enable, pipe_config->csc_enable);
12591         else
12592                 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12593                               pipe_config->csc_mode, pipe_config->gamma_mode,
12594                               pipe_config->gamma_enable, pipe_config->csc_enable);
12595
12596 dump_planes:
12597         if (!state)
12598                 return;
12599
12600         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12601                 if (plane->pipe == crtc->pipe)
12602                         intel_dump_plane_state(plane_state);
12603         }
12604 }
12605
12606 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12607 {
12608         struct drm_device *dev = state->base.dev;
12609         struct drm_connector *connector;
12610         struct drm_connector_list_iter conn_iter;
12611         unsigned int used_ports = 0;
12612         unsigned int used_mst_ports = 0;
12613         bool ret = true;
12614
12615         /*
12616          * We're going to peek into connector->state,
12617          * hence connection_mutex must be held.
12618          */
12619         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12620
12621         /*
12622          * Walk the connector list instead of the encoder
12623          * list to detect the problem on ddi platforms
12624          * where there's just one encoder per digital port.
12625          */
12626         drm_connector_list_iter_begin(dev, &conn_iter);
12627         drm_for_each_connector_iter(connector, &conn_iter) {
12628                 struct drm_connector_state *connector_state;
12629                 struct intel_encoder *encoder;
12630
12631                 connector_state =
12632                         drm_atomic_get_new_connector_state(&state->base,
12633                                                            connector);
12634                 if (!connector_state)
12635                         connector_state = connector->state;
12636
12637                 if (!connector_state->best_encoder)
12638                         continue;
12639
12640                 encoder = to_intel_encoder(connector_state->best_encoder);
12641
12642                 WARN_ON(!connector_state->crtc);
12643
12644                 switch (encoder->type) {
12645                         unsigned int port_mask;
12646                 case INTEL_OUTPUT_DDI:
12647                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
12648                                 break;
12649                         /* else, fall through */
12650                 case INTEL_OUTPUT_DP:
12651                 case INTEL_OUTPUT_HDMI:
12652                 case INTEL_OUTPUT_EDP:
12653                         port_mask = 1 << encoder->port;
12654
12655                         /* the same port mustn't appear more than once */
12656                         if (used_ports & port_mask)
12657                                 ret = false;
12658
12659                         used_ports |= port_mask;
12660                         break;
12661                 case INTEL_OUTPUT_DP_MST:
12662                         used_mst_ports |=
12663                                 1 << encoder->port;
12664                         break;
12665                 default:
12666                         break;
12667                 }
12668         }
12669         drm_connector_list_iter_end(&conn_iter);
12670
12671         /* can't mix MST and SST/HDMI on the same port */
12672         if (used_ports & used_mst_ports)
12673                 return false;
12674
12675         return ret;
12676 }
12677
12678 static void
12679 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
12680 {
12681         intel_crtc_copy_color_blobs(crtc_state);
12682 }
12683
12684 static void
12685 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
12686 {
12687         crtc_state->hw.enable = crtc_state->uapi.enable;
12688         crtc_state->hw.active = crtc_state->uapi.active;
12689         crtc_state->hw.mode = crtc_state->uapi.mode;
12690         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
12691         intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
12692 }
12693
12694 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
12695 {
12696         crtc_state->uapi.enable = crtc_state->hw.enable;
12697         crtc_state->uapi.active = crtc_state->hw.active;
12698         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
12699
12700         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
12701
12702         /* copy color blobs to uapi */
12703         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
12704                                   crtc_state->hw.degamma_lut);
12705         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
12706                                   crtc_state->hw.gamma_lut);
12707         drm_property_replace_blob(&crtc_state->uapi.ctm,
12708                                   crtc_state->hw.ctm);
12709 }
12710
12711 static int
12712 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
12713 {
12714         struct drm_i915_private *dev_priv =
12715                 to_i915(crtc_state->uapi.crtc->dev);
12716         struct intel_crtc_state *saved_state;
12717
12718         saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12719         if (!saved_state)
12720                 return -ENOMEM;
12721
12722         /* free the old crtc_state->hw members */
12723         intel_crtc_free_hw_state(crtc_state);
12724
12725         /* FIXME: before the switch to atomic started, a new pipe_config was
12726          * kzalloc'd. Code that depends on any field being zero should be
12727          * fixed, so that the crtc_state can be safely duplicated. For now,
12728          * only fields that are know to not cause problems are preserved. */
12729
12730         saved_state->uapi = crtc_state->uapi;
12731         saved_state->scaler_state = crtc_state->scaler_state;
12732         saved_state->shared_dpll = crtc_state->shared_dpll;
12733         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12734         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12735                sizeof(saved_state->icl_port_dplls));
12736         saved_state->crc_enabled = crtc_state->crc_enabled;
12737         if (IS_G4X(dev_priv) ||
12738             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12739                 saved_state->wm = crtc_state->wm;
12740         /*
12741          * Save the slave bitmask which gets filled for master crtc state during
12742          * slave atomic check call.
12743          */
12744         if (is_trans_port_sync_master(crtc_state))
12745                 saved_state->sync_mode_slaves_mask =
12746                         crtc_state->sync_mode_slaves_mask;
12747
12748         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12749         kfree(saved_state);
12750
12751         intel_crtc_copy_uapi_to_hw_state(crtc_state);
12752
12753         return 0;
12754 }
12755
12756 static int
12757 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12758 {
12759         struct drm_crtc *crtc = pipe_config->uapi.crtc;
12760         struct drm_atomic_state *state = pipe_config->uapi.state;
12761         struct intel_encoder *encoder;
12762         struct drm_connector *connector;
12763         struct drm_connector_state *connector_state;
12764         int base_bpp, ret;
12765         int i;
12766         bool retry = true;
12767
12768         pipe_config->cpu_transcoder =
12769                 (enum transcoder) to_intel_crtc(crtc)->pipe;
12770
12771         /*
12772          * Sanitize sync polarity flags based on requested ones. If neither
12773          * positive or negative polarity is requested, treat this as meaning
12774          * negative polarity.
12775          */
12776         if (!(pipe_config->hw.adjusted_mode.flags &
12777               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12778                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12779
12780         if (!(pipe_config->hw.adjusted_mode.flags &
12781               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12782                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12783
12784         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12785                                         pipe_config);
12786         if (ret)
12787                 return ret;
12788
12789         base_bpp = pipe_config->pipe_bpp;
12790
12791         /*
12792          * Determine the real pipe dimensions. Note that stereo modes can
12793          * increase the actual pipe size due to the frame doubling and
12794          * insertion of additional space for blanks between the frame. This
12795          * is stored in the crtc timings. We use the requested mode to do this
12796          * computation to clearly distinguish it from the adjusted mode, which
12797          * can be changed by the connectors in the below retry loop.
12798          */
12799         drm_mode_get_hv_timing(&pipe_config->hw.mode,
12800                                &pipe_config->pipe_src_w,
12801                                &pipe_config->pipe_src_h);
12802
12803         for_each_new_connector_in_state(state, connector, connector_state, i) {
12804                 if (connector_state->crtc != crtc)
12805                         continue;
12806
12807                 encoder = to_intel_encoder(connector_state->best_encoder);
12808
12809                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12810                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12811                         return -EINVAL;
12812                 }
12813
12814                 /*
12815                  * Determine output_types before calling the .compute_config()
12816                  * hooks so that the hooks can use this information safely.
12817                  */
12818                 if (encoder->compute_output_type)
12819                         pipe_config->output_types |=
12820                                 BIT(encoder->compute_output_type(encoder, pipe_config,
12821                                                                  connector_state));
12822                 else
12823                         pipe_config->output_types |= BIT(encoder->type);
12824         }
12825
12826 encoder_retry:
12827         /* Ensure the port clock defaults are reset when retrying. */
12828         pipe_config->port_clock = 0;
12829         pipe_config->pixel_multiplier = 1;
12830
12831         /* Fill in default crtc timings, allow encoders to overwrite them. */
12832         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
12833                               CRTC_STEREO_DOUBLE);
12834
12835         /* Set the crtc_state defaults for trans_port_sync */
12836         pipe_config->master_transcoder = INVALID_TRANSCODER;
12837         ret = icl_add_sync_mode_crtcs(pipe_config);
12838         if (ret) {
12839                 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
12840                               ret);
12841                 return ret;
12842         }
12843
12844         /* Pass our mode to the connectors and the CRTC to give them a chance to
12845          * adjust it according to limitations or connector properties, and also
12846          * a chance to reject the mode entirely.
12847          */
12848         for_each_new_connector_in_state(state, connector, connector_state, i) {
12849                 if (connector_state->crtc != crtc)
12850                         continue;
12851
12852                 encoder = to_intel_encoder(connector_state->best_encoder);
12853                 ret = encoder->compute_config(encoder, pipe_config,
12854                                               connector_state);
12855                 if (ret < 0) {
12856                         if (ret != -EDEADLK)
12857                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12858                                               ret);
12859                         return ret;
12860                 }
12861         }
12862
12863         /* Set default port clock if not overwritten by the encoder. Needs to be
12864          * done afterwards in case the encoder adjusts the mode. */
12865         if (!pipe_config->port_clock)
12866                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
12867                         * pipe_config->pixel_multiplier;
12868
12869         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12870         if (ret == -EDEADLK)
12871                 return ret;
12872         if (ret < 0) {
12873                 DRM_DEBUG_KMS("CRTC fixup failed\n");
12874                 return ret;
12875         }
12876
12877         if (ret == RETRY) {
12878                 if (WARN(!retry, "loop in pipe configuration computation\n"))
12879                         return -EINVAL;
12880
12881                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12882                 retry = false;
12883                 goto encoder_retry;
12884         }
12885
12886         /* Dithering seems to not pass-through bits correctly when it should, so
12887          * only enable it on 6bpc panels and when its not a compliance
12888          * test requesting 6bpc video pattern.
12889          */
12890         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12891                 !pipe_config->dither_force_disable;
12892         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12893                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12894
12895         /*
12896          * Make drm_calc_timestamping_constants in
12897          * drm_atomic_helper_update_legacy_modeset_state() happy
12898          */
12899         pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
12900
12901         return 0;
12902 }
12903
12904 bool intel_fuzzy_clock_check(int clock1, int clock2)
12905 {
12906         int diff;
12907
12908         if (clock1 == clock2)
12909                 return true;
12910
12911         if (!clock1 || !clock2)
12912                 return false;
12913
12914         diff = abs(clock1 - clock2);
12915
12916         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12917                 return true;
12918
12919         return false;
12920 }
12921
12922 static bool
12923 intel_compare_m_n(unsigned int m, unsigned int n,
12924                   unsigned int m2, unsigned int n2,
12925                   bool exact)
12926 {
12927         if (m == m2 && n == n2)
12928                 return true;
12929
12930         if (exact || !m || !n || !m2 || !n2)
12931                 return false;
12932
12933         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12934
12935         if (n > n2) {
12936                 while (n > n2) {
12937                         m2 <<= 1;
12938                         n2 <<= 1;
12939                 }
12940         } else if (n < n2) {
12941                 while (n < n2) {
12942                         m <<= 1;
12943                         n <<= 1;
12944                 }
12945         }
12946
12947         if (n != n2)
12948                 return false;
12949
12950         return intel_fuzzy_clock_check(m, m2);
12951 }
12952
12953 static bool
12954 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12955                        const struct intel_link_m_n *m2_n2,
12956                        bool exact)
12957 {
12958         return m_n->tu == m2_n2->tu &&
12959                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12960                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12961                 intel_compare_m_n(m_n->link_m, m_n->link_n,
12962                                   m2_n2->link_m, m2_n2->link_n, exact);
12963 }
12964
12965 static bool
12966 intel_compare_infoframe(const union hdmi_infoframe *a,
12967                         const union hdmi_infoframe *b)
12968 {
12969         return memcmp(a, b, sizeof(*a)) == 0;
12970 }
12971
12972 static void
12973 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12974                                bool fastset, const char *name,
12975                                const union hdmi_infoframe *a,
12976                                const union hdmi_infoframe *b)
12977 {
12978         if (fastset) {
12979                 if ((drm_debug & DRM_UT_KMS) == 0)
12980                         return;
12981
12982                 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
12983                 DRM_DEBUG_KMS("expected:\n");
12984                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12985                 DRM_DEBUG_KMS("found:\n");
12986                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12987         } else {
12988                 DRM_ERROR("mismatch in %s infoframe\n", name);
12989                 DRM_ERROR("expected:\n");
12990                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12991                 DRM_ERROR("found:\n");
12992                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12993         }
12994 }
12995
12996 static void __printf(4, 5)
12997 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
12998                      const char *name, const char *format, ...)
12999 {
13000         struct va_format vaf;
13001         va_list args;
13002
13003         va_start(args, format);
13004         vaf.fmt = format;
13005         vaf.va = &args;
13006
13007         if (fastset)
13008                 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13009                               crtc->base.base.id, crtc->base.name, name, &vaf);
13010         else
13011                 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
13012                           crtc->base.base.id, crtc->base.name, name, &vaf);
13013
13014         va_end(args);
13015 }
13016
13017 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13018 {
13019         if (i915_modparams.fastboot != -1)
13020                 return i915_modparams.fastboot;
13021
13022         /* Enable fastboot by default on Skylake and newer */
13023         if (INTEL_GEN(dev_priv) >= 9)
13024                 return true;
13025
13026         /* Enable fastboot by default on VLV and CHV */
13027         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13028                 return true;
13029
13030         /* Disabled by default on all others */
13031         return false;
13032 }
13033
13034 static bool
13035 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13036                           const struct intel_crtc_state *pipe_config,
13037                           bool fastset)
13038 {
13039         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13040         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13041         bool ret = true;
13042         u32 bp_gamma = 0;
13043         bool fixup_inherited = fastset &&
13044                 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
13045                 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
13046
13047         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13048                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
13049                 ret = false;
13050         }
13051
13052 #define PIPE_CONF_CHECK_X(name) do { \
13053         if (current_config->name != pipe_config->name) { \
13054                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13055                                      "(expected 0x%08x, found 0x%08x)", \
13056                                      current_config->name, \
13057                                      pipe_config->name); \
13058                 ret = false; \
13059         } \
13060 } while (0)
13061
13062 #define PIPE_CONF_CHECK_I(name) do { \
13063         if (current_config->name != pipe_config->name) { \
13064                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13065                                      "(expected %i, found %i)", \
13066                                      current_config->name, \
13067                                      pipe_config->name); \
13068                 ret = false; \
13069         } \
13070 } while (0)
13071
13072 #define PIPE_CONF_CHECK_BOOL(name) do { \
13073         if (current_config->name != pipe_config->name) { \
13074                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13075                                      "(expected %s, found %s)", \
13076                                      yesno(current_config->name), \
13077                                      yesno(pipe_config->name)); \
13078                 ret = false; \
13079         } \
13080 } while (0)
13081
13082 /*
13083  * Checks state where we only read out the enabling, but not the entire
13084  * state itself (like full infoframes or ELD for audio). These states
13085  * require a full modeset on bootup to fix up.
13086  */
13087 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13088         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13089                 PIPE_CONF_CHECK_BOOL(name); \
13090         } else { \
13091                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13092                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13093                                      yesno(current_config->name), \
13094                                      yesno(pipe_config->name)); \
13095                 ret = false; \
13096         } \
13097 } while (0)
13098
13099 #define PIPE_CONF_CHECK_P(name) do { \
13100         if (current_config->name != pipe_config->name) { \
13101                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13102                                      "(expected %p, found %p)", \
13103                                      current_config->name, \
13104                                      pipe_config->name); \
13105                 ret = false; \
13106         } \
13107 } while (0)
13108
13109 #define PIPE_CONF_CHECK_M_N(name) do { \
13110         if (!intel_compare_link_m_n(&current_config->name, \
13111                                     &pipe_config->name,\
13112                                     !fastset)) { \
13113                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13114                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13115                                      "found tu %i, gmch %i/%i link %i/%i)", \
13116                                      current_config->name.tu, \
13117                                      current_config->name.gmch_m, \
13118                                      current_config->name.gmch_n, \
13119                                      current_config->name.link_m, \
13120                                      current_config->name.link_n, \
13121                                      pipe_config->name.tu, \
13122                                      pipe_config->name.gmch_m, \
13123                                      pipe_config->name.gmch_n, \
13124                                      pipe_config->name.link_m, \
13125                                      pipe_config->name.link_n); \
13126                 ret = false; \
13127         } \
13128 } while (0)
13129
13130 /* This is required for BDW+ where there is only one set of registers for
13131  * switching between high and low RR.
13132  * This macro can be used whenever a comparison has to be made between one
13133  * hw state and multiple sw state variables.
13134  */
13135 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13136         if (!intel_compare_link_m_n(&current_config->name, \
13137                                     &pipe_config->name, !fastset) && \
13138             !intel_compare_link_m_n(&current_config->alt_name, \
13139                                     &pipe_config->name, !fastset)) { \
13140                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13141                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13142                                      "or tu %i gmch %i/%i link %i/%i, " \
13143                                      "found tu %i, gmch %i/%i link %i/%i)", \
13144                                      current_config->name.tu, \
13145                                      current_config->name.gmch_m, \
13146                                      current_config->name.gmch_n, \
13147                                      current_config->name.link_m, \
13148                                      current_config->name.link_n, \
13149                                      current_config->alt_name.tu, \
13150                                      current_config->alt_name.gmch_m, \
13151                                      current_config->alt_name.gmch_n, \
13152                                      current_config->alt_name.link_m, \
13153                                      current_config->alt_name.link_n, \
13154                                      pipe_config->name.tu, \
13155                                      pipe_config->name.gmch_m, \
13156                                      pipe_config->name.gmch_n, \
13157                                      pipe_config->name.link_m, \
13158                                      pipe_config->name.link_n); \
13159                 ret = false; \
13160         } \
13161 } while (0)
13162
13163 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13164         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13165                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13166                                      "(%x) (expected %i, found %i)", \
13167                                      (mask), \
13168                                      current_config->name & (mask), \
13169                                      pipe_config->name & (mask)); \
13170                 ret = false; \
13171         } \
13172 } while (0)
13173
13174 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13175         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13176                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13177                                      "(expected %i, found %i)", \
13178                                      current_config->name, \
13179                                      pipe_config->name); \
13180                 ret = false; \
13181         } \
13182 } while (0)
13183
13184 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13185         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13186                                      &pipe_config->infoframes.name)) { \
13187                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13188                                                &current_config->infoframes.name, \
13189                                                &pipe_config->infoframes.name); \
13190                 ret = false; \
13191         } \
13192 } while (0)
13193
13194 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13195         if (current_config->name1 != pipe_config->name1) { \
13196                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13197                                 "(expected %i, found %i, won't compare lut values)", \
13198                                 current_config->name1, \
13199                                 pipe_config->name1); \
13200                 ret = false;\
13201         } else { \
13202                 if (!intel_color_lut_equal(current_config->name2, \
13203                                         pipe_config->name2, pipe_config->name1, \
13204                                         bit_precision)) { \
13205                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13206                                         "hw_state doesn't match sw_state"); \
13207                         ret = false; \
13208                 } \
13209         } \
13210 } while (0)
13211
13212 #define PIPE_CONF_QUIRK(quirk) \
13213         ((current_config->quirks | pipe_config->quirks) & (quirk))
13214
13215         PIPE_CONF_CHECK_I(cpu_transcoder);
13216
13217         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13218         PIPE_CONF_CHECK_I(fdi_lanes);
13219         PIPE_CONF_CHECK_M_N(fdi_m_n);
13220
13221         PIPE_CONF_CHECK_I(lane_count);
13222         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13223
13224         if (INTEL_GEN(dev_priv) < 8) {
13225                 PIPE_CONF_CHECK_M_N(dp_m_n);
13226
13227                 if (current_config->has_drrs)
13228                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13229         } else
13230                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13231
13232         PIPE_CONF_CHECK_X(output_types);
13233
13234         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13235         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13236         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13237         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13238         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13239         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13240
13241         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13242         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13243         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13244         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13245         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13246         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13247
13248         PIPE_CONF_CHECK_I(pixel_multiplier);
13249         PIPE_CONF_CHECK_I(output_format);
13250         PIPE_CONF_CHECK_I(dc3co_exitline);
13251         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13252         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13253             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13254                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13255
13256         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13257         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13258         PIPE_CONF_CHECK_BOOL(has_infoframe);
13259         PIPE_CONF_CHECK_BOOL(fec_enable);
13260
13261         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13262
13263         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13264                               DRM_MODE_FLAG_INTERLACE);
13265
13266         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13267                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13268                                       DRM_MODE_FLAG_PHSYNC);
13269                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13270                                       DRM_MODE_FLAG_NHSYNC);
13271                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13272                                       DRM_MODE_FLAG_PVSYNC);
13273                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13274                                       DRM_MODE_FLAG_NVSYNC);
13275         }
13276
13277         PIPE_CONF_CHECK_X(gmch_pfit.control);
13278         /* pfit ratios are autocomputed by the hw on gen4+ */
13279         if (INTEL_GEN(dev_priv) < 4)
13280                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13281         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13282
13283         /*
13284          * Changing the EDP transcoder input mux
13285          * (A_ONOFF vs. A_ON) requires a full modeset.
13286          */
13287         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13288
13289         if (!fastset) {
13290                 PIPE_CONF_CHECK_I(pipe_src_w);
13291                 PIPE_CONF_CHECK_I(pipe_src_h);
13292
13293                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13294                 if (current_config->pch_pfit.enabled) {
13295                         PIPE_CONF_CHECK_X(pch_pfit.pos);
13296                         PIPE_CONF_CHECK_X(pch_pfit.size);
13297                 }
13298
13299                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13300                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13301
13302                 PIPE_CONF_CHECK_X(gamma_mode);
13303                 if (IS_CHERRYVIEW(dev_priv))
13304                         PIPE_CONF_CHECK_X(cgm_mode);
13305                 else
13306                         PIPE_CONF_CHECK_X(csc_mode);
13307                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13308                 PIPE_CONF_CHECK_BOOL(csc_enable);
13309
13310                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13311                 if (bp_gamma)
13312                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13313
13314         }
13315
13316         PIPE_CONF_CHECK_BOOL(double_wide);
13317
13318         PIPE_CONF_CHECK_P(shared_dpll);
13319         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13320         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13321         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13322         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13323         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13324         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13325         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13326         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13327         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13328         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13329         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13330         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13331         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13332         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13333         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13334         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13335         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13336         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13337         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13338         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13339         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13340         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13341         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13342         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13343         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13344         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13345         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13346         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13347         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13348         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13349         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13350
13351         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13352         PIPE_CONF_CHECK_X(dsi_pll.div);
13353
13354         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13355                 PIPE_CONF_CHECK_I(pipe_bpp);
13356
13357         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13358         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13359
13360         PIPE_CONF_CHECK_I(min_voltage_level);
13361
13362         PIPE_CONF_CHECK_X(infoframes.enable);
13363         PIPE_CONF_CHECK_X(infoframes.gcp);
13364         PIPE_CONF_CHECK_INFOFRAME(avi);
13365         PIPE_CONF_CHECK_INFOFRAME(spd);
13366         PIPE_CONF_CHECK_INFOFRAME(hdmi);
13367         PIPE_CONF_CHECK_INFOFRAME(drm);
13368
13369         PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
13370         PIPE_CONF_CHECK_I(master_transcoder);
13371
13372 #undef PIPE_CONF_CHECK_X
13373 #undef PIPE_CONF_CHECK_I
13374 #undef PIPE_CONF_CHECK_BOOL
13375 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13376 #undef PIPE_CONF_CHECK_P
13377 #undef PIPE_CONF_CHECK_FLAGS
13378 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13379 #undef PIPE_CONF_CHECK_COLOR_LUT
13380 #undef PIPE_CONF_QUIRK
13381
13382         return ret;
13383 }
13384
13385 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13386                                            const struct intel_crtc_state *pipe_config)
13387 {
13388         if (pipe_config->has_pch_encoder) {
13389                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13390                                                             &pipe_config->fdi_m_n);
13391                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13392
13393                 /*
13394                  * FDI already provided one idea for the dotclock.
13395                  * Yell if the encoder disagrees.
13396                  */
13397                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13398                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13399                      fdi_dotclock, dotclock);
13400         }
13401 }
13402
13403 static void verify_wm_state(struct intel_crtc *crtc,
13404                             struct intel_crtc_state *new_crtc_state)
13405 {
13406         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13407         struct skl_hw_state {
13408                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13409                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13410                 struct skl_ddb_allocation ddb;
13411                 struct skl_pipe_wm wm;
13412         } *hw;
13413         struct skl_ddb_allocation *sw_ddb;
13414         struct skl_pipe_wm *sw_wm;
13415         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13416         const enum pipe pipe = crtc->pipe;
13417         int plane, level, max_level = ilk_wm_max_level(dev_priv);
13418
13419         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13420                 return;
13421
13422         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13423         if (!hw)
13424                 return;
13425
13426         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13427         sw_wm = &new_crtc_state->wm.skl.optimal;
13428
13429         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13430
13431         skl_ddb_get_hw_state(dev_priv, &hw->ddb);
13432         sw_ddb = &dev_priv->wm.skl_hw.ddb;
13433
13434         if (INTEL_GEN(dev_priv) >= 11 &&
13435             hw->ddb.enabled_slices != sw_ddb->enabled_slices)
13436                 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
13437                           sw_ddb->enabled_slices,
13438                           hw->ddb.enabled_slices);
13439
13440         /* planes */
13441         for_each_universal_plane(dev_priv, pipe, plane) {
13442                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13443
13444                 hw_plane_wm = &hw->wm.planes[plane];
13445                 sw_plane_wm = &sw_wm->planes[plane];
13446
13447                 /* Watermarks */
13448                 for (level = 0; level <= max_level; level++) {
13449                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13450                                                 &sw_plane_wm->wm[level]))
13451                                 continue;
13452
13453                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13454                                   pipe_name(pipe), plane + 1, level,
13455                                   sw_plane_wm->wm[level].plane_en,
13456                                   sw_plane_wm->wm[level].plane_res_b,
13457                                   sw_plane_wm->wm[level].plane_res_l,
13458                                   hw_plane_wm->wm[level].plane_en,
13459                                   hw_plane_wm->wm[level].plane_res_b,
13460                                   hw_plane_wm->wm[level].plane_res_l);
13461                 }
13462
13463                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13464                                          &sw_plane_wm->trans_wm)) {
13465                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13466                                   pipe_name(pipe), plane + 1,
13467                                   sw_plane_wm->trans_wm.plane_en,
13468                                   sw_plane_wm->trans_wm.plane_res_b,
13469                                   sw_plane_wm->trans_wm.plane_res_l,
13470                                   hw_plane_wm->trans_wm.plane_en,
13471                                   hw_plane_wm->trans_wm.plane_res_b,
13472                                   hw_plane_wm->trans_wm.plane_res_l);
13473                 }
13474
13475                 /* DDB */
13476                 hw_ddb_entry = &hw->ddb_y[plane];
13477                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13478
13479                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13480                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13481                                   pipe_name(pipe), plane + 1,
13482                                   sw_ddb_entry->start, sw_ddb_entry->end,
13483                                   hw_ddb_entry->start, hw_ddb_entry->end);
13484                 }
13485         }
13486
13487         /*
13488          * cursor
13489          * If the cursor plane isn't active, we may not have updated it's ddb
13490          * allocation. In that case since the ddb allocation will be updated
13491          * once the plane becomes visible, we can skip this check
13492          */
13493         if (1) {
13494                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13495
13496                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13497                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13498
13499                 /* Watermarks */
13500                 for (level = 0; level <= max_level; level++) {
13501                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13502                                                 &sw_plane_wm->wm[level]))
13503                                 continue;
13504
13505                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13506                                   pipe_name(pipe), level,
13507                                   sw_plane_wm->wm[level].plane_en,
13508                                   sw_plane_wm->wm[level].plane_res_b,
13509                                   sw_plane_wm->wm[level].plane_res_l,
13510                                   hw_plane_wm->wm[level].plane_en,
13511                                   hw_plane_wm->wm[level].plane_res_b,
13512                                   hw_plane_wm->wm[level].plane_res_l);
13513                 }
13514
13515                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13516                                          &sw_plane_wm->trans_wm)) {
13517                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13518                                   pipe_name(pipe),
13519                                   sw_plane_wm->trans_wm.plane_en,
13520                                   sw_plane_wm->trans_wm.plane_res_b,
13521                                   sw_plane_wm->trans_wm.plane_res_l,
13522                                   hw_plane_wm->trans_wm.plane_en,
13523                                   hw_plane_wm->trans_wm.plane_res_b,
13524                                   hw_plane_wm->trans_wm.plane_res_l);
13525                 }
13526
13527                 /* DDB */
13528                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13529                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13530
13531                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13532                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13533                                   pipe_name(pipe),
13534                                   sw_ddb_entry->start, sw_ddb_entry->end,
13535                                   hw_ddb_entry->start, hw_ddb_entry->end);
13536                 }
13537         }
13538
13539         kfree(hw);
13540 }
13541
13542 static void
13543 verify_connector_state(struct intel_atomic_state *state,
13544                        struct intel_crtc *crtc)
13545 {
13546         struct drm_connector *connector;
13547         struct drm_connector_state *new_conn_state;
13548         int i;
13549
13550         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13551                 struct drm_encoder *encoder = connector->encoder;
13552                 struct intel_crtc_state *crtc_state = NULL;
13553
13554                 if (new_conn_state->crtc != &crtc->base)
13555                         continue;
13556
13557                 if (crtc)
13558                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13559
13560                 intel_connector_verify_state(crtc_state, new_conn_state);
13561
13562                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13563                      "connector's atomic encoder doesn't match legacy encoder\n");
13564         }
13565 }
13566
13567 static void
13568 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13569 {
13570         struct intel_encoder *encoder;
13571         struct drm_connector *connector;
13572         struct drm_connector_state *old_conn_state, *new_conn_state;
13573         int i;
13574
13575         for_each_intel_encoder(&dev_priv->drm, encoder) {
13576                 bool enabled = false, found = false;
13577                 enum pipe pipe;
13578
13579                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13580                               encoder->base.base.id,
13581                               encoder->base.name);
13582
13583                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13584                                                    new_conn_state, i) {
13585                         if (old_conn_state->best_encoder == &encoder->base)
13586                                 found = true;
13587
13588                         if (new_conn_state->best_encoder != &encoder->base)
13589                                 continue;
13590                         found = enabled = true;
13591
13592                         I915_STATE_WARN(new_conn_state->crtc !=
13593                                         encoder->base.crtc,
13594                              "connector's crtc doesn't match encoder crtc\n");
13595                 }
13596
13597                 if (!found)
13598                         continue;
13599
13600                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13601                      "encoder's enabled state mismatch "
13602                      "(expected %i, found %i)\n",
13603                      !!encoder->base.crtc, enabled);
13604
13605                 if (!encoder->base.crtc) {
13606                         bool active;
13607
13608                         active = encoder->get_hw_state(encoder, &pipe);
13609                         I915_STATE_WARN(active,
13610                              "encoder detached but still enabled on pipe %c.\n",
13611                              pipe_name(pipe));
13612                 }
13613         }
13614 }
13615
13616 static void
13617 verify_crtc_state(struct intel_crtc *crtc,
13618                   struct intel_crtc_state *old_crtc_state,
13619                   struct intel_crtc_state *new_crtc_state)
13620 {
13621         struct drm_device *dev = crtc->base.dev;
13622         struct drm_i915_private *dev_priv = to_i915(dev);
13623         struct intel_encoder *encoder;
13624         struct intel_crtc_state *pipe_config;
13625         struct drm_atomic_state *state;
13626         bool active;
13627
13628         state = old_crtc_state->uapi.state;
13629         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
13630         intel_crtc_free_hw_state(old_crtc_state);
13631
13632         pipe_config = old_crtc_state;
13633         memset(pipe_config, 0, sizeof(*pipe_config));
13634         pipe_config->uapi.crtc = &crtc->base;
13635         pipe_config->uapi.state = state;
13636
13637         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13638
13639         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13640
13641         /* we keep both pipes enabled on 830 */
13642         if (IS_I830(dev_priv))
13643                 active = new_crtc_state->hw.active;
13644
13645         I915_STATE_WARN(new_crtc_state->hw.active != active,
13646                         "crtc active state doesn't match with hw state "
13647                         "(expected %i, found %i)\n",
13648                         new_crtc_state->hw.active, active);
13649
13650         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
13651                         "transitional active state does not match atomic hw state "
13652                         "(expected %i, found %i)\n",
13653                         new_crtc_state->hw.active, crtc->active);
13654
13655         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13656                 enum pipe pipe;
13657
13658                 active = encoder->get_hw_state(encoder, &pipe);
13659                 I915_STATE_WARN(active != new_crtc_state->hw.active,
13660                                 "[ENCODER:%i] active %i with crtc active %i\n",
13661                                 encoder->base.base.id, active,
13662                                 new_crtc_state->hw.active);
13663
13664                 I915_STATE_WARN(active && crtc->pipe != pipe,
13665                                 "Encoder connected to wrong pipe %c\n",
13666                                 pipe_name(pipe));
13667
13668                 if (active)
13669                         encoder->get_config(encoder, pipe_config);
13670         }
13671
13672         intel_crtc_compute_pixel_rate(pipe_config);
13673
13674         if (!new_crtc_state->hw.active)
13675                 return;
13676
13677         intel_pipe_config_sanity_check(dev_priv, pipe_config);
13678
13679         if (!intel_pipe_config_compare(new_crtc_state,
13680                                        pipe_config, false)) {
13681                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13682                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13683                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13684         }
13685 }
13686
13687 static void
13688 intel_verify_planes(struct intel_atomic_state *state)
13689 {
13690         struct intel_plane *plane;
13691         const struct intel_plane_state *plane_state;
13692         int i;
13693
13694         for_each_new_intel_plane_in_state(state, plane,
13695                                           plane_state, i)
13696                 assert_plane(plane, plane_state->planar_slave ||
13697                              plane_state->uapi.visible);
13698 }
13699
13700 static void
13701 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13702                          struct intel_shared_dpll *pll,
13703                          struct intel_crtc *crtc,
13704                          struct intel_crtc_state *new_crtc_state)
13705 {
13706         struct intel_dpll_hw_state dpll_hw_state;
13707         unsigned int crtc_mask;
13708         bool active;
13709
13710         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13711
13712         DRM_DEBUG_KMS("%s\n", pll->info->name);
13713
13714         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13715
13716         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13717                 I915_STATE_WARN(!pll->on && pll->active_mask,
13718                      "pll in active use but not on in sw tracking\n");
13719                 I915_STATE_WARN(pll->on && !pll->active_mask,
13720                      "pll is on but not used by any active crtc\n");
13721                 I915_STATE_WARN(pll->on != active,
13722                      "pll on state mismatch (expected %i, found %i)\n",
13723                      pll->on, active);
13724         }
13725
13726         if (!crtc) {
13727                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13728                                 "more active pll users than references: %x vs %x\n",
13729                                 pll->active_mask, pll->state.crtc_mask);
13730
13731                 return;
13732         }
13733
13734         crtc_mask = drm_crtc_mask(&crtc->base);
13735
13736         if (new_crtc_state->hw.active)
13737                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13738                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13739                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13740         else
13741                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13742                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13743                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13744
13745         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13746                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13747                         crtc_mask, pll->state.crtc_mask);
13748
13749         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13750                                           &dpll_hw_state,
13751                                           sizeof(dpll_hw_state)),
13752                         "pll hw state mismatch\n");
13753 }
13754
13755 static void
13756 verify_shared_dpll_state(struct intel_crtc *crtc,
13757                          struct intel_crtc_state *old_crtc_state,
13758                          struct intel_crtc_state *new_crtc_state)
13759 {
13760         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13761
13762         if (new_crtc_state->shared_dpll)
13763                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13764
13765         if (old_crtc_state->shared_dpll &&
13766             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13767                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13768                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13769
13770                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13771                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13772                                 pipe_name(drm_crtc_index(&crtc->base)));
13773                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13774                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13775                                 pipe_name(drm_crtc_index(&crtc->base)));
13776         }
13777 }
13778
13779 static void
13780 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13781                           struct intel_atomic_state *state,
13782                           struct intel_crtc_state *old_crtc_state,
13783                           struct intel_crtc_state *new_crtc_state)
13784 {
13785         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13786                 return;
13787
13788         verify_wm_state(crtc, new_crtc_state);
13789         verify_connector_state(state, crtc);
13790         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13791         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13792 }
13793
13794 static void
13795 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13796 {
13797         int i;
13798
13799         for (i = 0; i < dev_priv->num_shared_dpll; i++)
13800                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13801 }
13802
13803 static void
13804 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13805                               struct intel_atomic_state *state)
13806 {
13807         verify_encoder_state(dev_priv, state);
13808         verify_connector_state(state, NULL);
13809         verify_disabled_dpll_state(dev_priv);
13810 }
13811
13812 static void
13813 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13814 {
13815         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13816         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13817         const struct drm_display_mode *adjusted_mode =
13818                 &crtc_state->hw.adjusted_mode;
13819
13820         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13821
13822         /*
13823          * The scanline counter increments at the leading edge of hsync.
13824          *
13825          * On most platforms it starts counting from vtotal-1 on the
13826          * first active line. That means the scanline counter value is
13827          * always one less than what we would expect. Ie. just after
13828          * start of vblank, which also occurs at start of hsync (on the
13829          * last active line), the scanline counter will read vblank_start-1.
13830          *
13831          * On gen2 the scanline counter starts counting from 1 instead
13832          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13833          * to keep the value positive), instead of adding one.
13834          *
13835          * On HSW+ the behaviour of the scanline counter depends on the output
13836          * type. For DP ports it behaves like most other platforms, but on HDMI
13837          * there's an extra 1 line difference. So we need to add two instead of
13838          * one to the value.
13839          *
13840          * On VLV/CHV DSI the scanline counter would appear to increment
13841          * approx. 1/3 of a scanline before start of vblank. Unfortunately
13842          * that means we can't tell whether we're in vblank or not while
13843          * we're on that particular line. We must still set scanline_offset
13844          * to 1 so that the vblank timestamps come out correct when we query
13845          * the scanline counter from within the vblank interrupt handler.
13846          * However if queried just before the start of vblank we'll get an
13847          * answer that's slightly in the future.
13848          */
13849         if (IS_GEN(dev_priv, 2)) {
13850                 int vtotal;
13851
13852                 vtotal = adjusted_mode->crtc_vtotal;
13853                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13854                         vtotal /= 2;
13855
13856                 crtc->scanline_offset = vtotal - 1;
13857         } else if (HAS_DDI(dev_priv) &&
13858                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13859                 crtc->scanline_offset = 2;
13860         } else {
13861                 crtc->scanline_offset = 1;
13862         }
13863 }
13864
13865 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13866 {
13867         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13868         struct intel_crtc_state *new_crtc_state;
13869         struct intel_crtc *crtc;
13870         int i;
13871
13872         if (!dev_priv->display.crtc_compute_clock)
13873                 return;
13874
13875         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13876                 if (!needs_modeset(new_crtc_state))
13877                         continue;
13878
13879                 intel_release_shared_dplls(state, crtc);
13880         }
13881 }
13882
13883 /*
13884  * This implements the workaround described in the "notes" section of the mode
13885  * set sequence documentation. When going from no pipes or single pipe to
13886  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13887  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13888  */
13889 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13890 {
13891         struct intel_crtc_state *crtc_state;
13892         struct intel_crtc *crtc;
13893         struct intel_crtc_state *first_crtc_state = NULL;
13894         struct intel_crtc_state *other_crtc_state = NULL;
13895         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13896         int i;
13897
13898         /* look at all crtc's that are going to be enabled in during modeset */
13899         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13900                 if (!crtc_state->hw.active ||
13901                     !needs_modeset(crtc_state))
13902                         continue;
13903
13904                 if (first_crtc_state) {
13905                         other_crtc_state = crtc_state;
13906                         break;
13907                 } else {
13908                         first_crtc_state = crtc_state;
13909                         first_pipe = crtc->pipe;
13910                 }
13911         }
13912
13913         /* No workaround needed? */
13914         if (!first_crtc_state)
13915                 return 0;
13916
13917         /* w/a possibly needed, check how many crtc's are already enabled. */
13918         for_each_intel_crtc(state->base.dev, crtc) {
13919                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13920                 if (IS_ERR(crtc_state))
13921                         return PTR_ERR(crtc_state);
13922
13923                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13924
13925                 if (!crtc_state->hw.active ||
13926                     needs_modeset(crtc_state))
13927                         continue;
13928
13929                 /* 2 or more enabled crtcs means no need for w/a */
13930                 if (enabled_pipe != INVALID_PIPE)
13931                         return 0;
13932
13933                 enabled_pipe = crtc->pipe;
13934         }
13935
13936         if (enabled_pipe != INVALID_PIPE)
13937                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13938         else if (other_crtc_state)
13939                 other_crtc_state->hsw_workaround_pipe = first_pipe;
13940
13941         return 0;
13942 }
13943
13944 static int intel_modeset_checks(struct intel_atomic_state *state)
13945 {
13946         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13947         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13948         struct intel_crtc *crtc;
13949         int ret, i;
13950
13951         /* keep the current setting */
13952         if (!state->cdclk.force_min_cdclk_changed)
13953                 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13954
13955         state->modeset = true;
13956         state->active_pipes = dev_priv->active_pipes;
13957         state->cdclk.logical = dev_priv->cdclk.logical;
13958         state->cdclk.actual = dev_priv->cdclk.actual;
13959
13960         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13961                                             new_crtc_state, i) {
13962                 if (new_crtc_state->hw.active)
13963                         state->active_pipes |= BIT(crtc->pipe);
13964                 else
13965                         state->active_pipes &= ~BIT(crtc->pipe);
13966
13967                 if (old_crtc_state->hw.active != new_crtc_state->hw.active)
13968                         state->active_pipe_changes |= BIT(crtc->pipe);
13969         }
13970
13971         if (state->active_pipe_changes) {
13972                 ret = intel_atomic_lock_global_state(state);
13973                 if (ret)
13974                         return ret;
13975         }
13976
13977         ret = intel_modeset_calc_cdclk(state);
13978         if (ret)
13979                 return ret;
13980
13981         intel_modeset_clear_plls(state);
13982
13983         if (IS_HASWELL(dev_priv))
13984                 return haswell_mode_set_planes_workaround(state);
13985
13986         return 0;
13987 }
13988
13989 /*
13990  * Handle calculation of various watermark data at the end of the atomic check
13991  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13992  * handlers to ensure that all derived state has been updated.
13993  */
13994 static int calc_watermark_data(struct intel_atomic_state *state)
13995 {
13996         struct drm_device *dev = state->base.dev;
13997         struct drm_i915_private *dev_priv = to_i915(dev);
13998
13999         /* Is there platform-specific watermark information to calculate? */
14000         if (dev_priv->display.compute_global_watermarks)
14001                 return dev_priv->display.compute_global_watermarks(state);
14002
14003         return 0;
14004 }
14005
14006 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14007                                      struct intel_crtc_state *new_crtc_state)
14008 {
14009         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14010                 return;
14011
14012         new_crtc_state->uapi.mode_changed = false;
14013         new_crtc_state->update_pipe = true;
14014
14015         /*
14016          * If we're not doing the full modeset we want to
14017          * keep the current M/N values as they may be
14018          * sufficiently different to the computed values
14019          * to cause problems.
14020          *
14021          * FIXME: should really copy more fuzzy state here
14022          */
14023         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14024         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14025         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14026         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14027 }
14028
14029 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14030                                           struct intel_crtc *crtc,
14031                                           u8 plane_ids_mask)
14032 {
14033         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14034         struct intel_plane *plane;
14035
14036         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14037                 struct intel_plane_state *plane_state;
14038
14039                 if ((plane_ids_mask & BIT(plane->id)) == 0)
14040                         continue;
14041
14042                 plane_state = intel_atomic_get_plane_state(state, plane);
14043                 if (IS_ERR(plane_state))
14044                         return PTR_ERR(plane_state);
14045         }
14046
14047         return 0;
14048 }
14049
14050 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14051 {
14052         /* See {hsw,vlv,ivb}_plane_ratio() */
14053         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14054                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14055                 IS_IVYBRIDGE(dev_priv);
14056 }
14057
14058 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14059                                      bool *need_modeset)
14060 {
14061         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14062         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14063         struct intel_plane_state *plane_state;
14064         struct intel_plane *plane;
14065         struct intel_crtc *crtc;
14066         int i, ret;
14067
14068         ret = icl_add_linked_planes(state);
14069         if (ret)
14070                 return ret;
14071
14072         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14073                 ret = intel_plane_atomic_check(state, plane);
14074                 if (ret) {
14075                         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
14076                                          plane->base.base.id, plane->base.name);
14077                         return ret;
14078                 }
14079         }
14080
14081         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14082                                             new_crtc_state, i) {
14083                 u8 old_active_planes, new_active_planes;
14084
14085                 ret = icl_check_nv12_planes(new_crtc_state);
14086                 if (ret)
14087                         return ret;
14088
14089                 /*
14090                  * On some platforms the number of active planes affects
14091                  * the planes' minimum cdclk calculation. Add such planes
14092                  * to the state before we compute the minimum cdclk.
14093                  */
14094                 if (!active_planes_affects_min_cdclk(dev_priv))
14095                         continue;
14096
14097                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14098                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14099
14100                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14101                         continue;
14102
14103                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14104                 if (ret)
14105                         return ret;
14106         }
14107
14108         /*
14109          * active_planes bitmask has been updated, and potentially
14110          * affected planes are part of the state. We can now
14111          * compute the minimum cdclk for each plane.
14112          */
14113         for_each_new_intel_plane_in_state(state, plane, plane_state, i)
14114                 *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
14115
14116         return 0;
14117 }
14118
14119 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14120 {
14121         struct intel_crtc_state *crtc_state;
14122         struct intel_crtc *crtc;
14123         int i;
14124
14125         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14126                 int ret = intel_crtc_atomic_check(state, crtc);
14127                 if (ret) {
14128                         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
14129                                          crtc->base.base.id, crtc->base.name);
14130                         return ret;
14131                 }
14132         }
14133
14134         return 0;
14135 }
14136
14137 /**
14138  * intel_atomic_check - validate state object
14139  * @dev: drm device
14140  * @_state: state to validate
14141  */
14142 static int intel_atomic_check(struct drm_device *dev,
14143                               struct drm_atomic_state *_state)
14144 {
14145         struct drm_i915_private *dev_priv = to_i915(dev);
14146         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14147         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14148         struct intel_crtc *crtc;
14149         int ret, i;
14150         bool any_ms = false;
14151
14152         /* Catch I915_MODE_FLAG_INHERITED */
14153         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14154                                             new_crtc_state, i) {
14155                 if (new_crtc_state->hw.mode.private_flags !=
14156                     old_crtc_state->hw.mode.private_flags)
14157                         new_crtc_state->uapi.mode_changed = true;
14158         }
14159
14160         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14161         if (ret)
14162                 goto fail;
14163
14164         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14165                                             new_crtc_state, i) {
14166                 if (!needs_modeset(new_crtc_state)) {
14167                         /* Light copy */
14168                         intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14169
14170                         continue;
14171                 }
14172
14173                 if (!new_crtc_state->uapi.enable) {
14174                         intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
14175
14176                         any_ms = true;
14177                         continue;
14178                 }
14179
14180                 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14181                 if (ret)
14182                         goto fail;
14183
14184                 ret = intel_modeset_pipe_config(new_crtc_state);
14185                 if (ret)
14186                         goto fail;
14187
14188                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14189
14190                 if (needs_modeset(new_crtc_state))
14191                         any_ms = true;
14192         }
14193
14194         if (any_ms && !check_digital_port_conflicts(state)) {
14195                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
14196                 ret = EINVAL;
14197                 goto fail;
14198         }
14199
14200         ret = drm_dp_mst_atomic_check(&state->base);
14201         if (ret)
14202                 goto fail;
14203
14204         any_ms |= state->cdclk.force_min_cdclk_changed;
14205
14206         ret = intel_atomic_check_planes(state, &any_ms);
14207         if (ret)
14208                 goto fail;
14209
14210         if (any_ms) {
14211                 ret = intel_modeset_checks(state);
14212                 if (ret)
14213                         goto fail;
14214         } else {
14215                 state->cdclk.logical = dev_priv->cdclk.logical;
14216         }
14217
14218         ret = intel_atomic_check_crtcs(state);
14219         if (ret)
14220                 goto fail;
14221
14222         intel_fbc_choose_crtc(dev_priv, state);
14223         ret = calc_watermark_data(state);
14224         if (ret)
14225                 goto fail;
14226
14227         ret = intel_bw_atomic_check(state);
14228         if (ret)
14229                 goto fail;
14230
14231         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14232                                             new_crtc_state, i) {
14233                 if (!needs_modeset(new_crtc_state) &&
14234                     !new_crtc_state->update_pipe)
14235                         continue;
14236
14237                 intel_dump_pipe_config(new_crtc_state, state,
14238                                        needs_modeset(new_crtc_state) ?
14239                                        "[modeset]" : "[fastset]");
14240         }
14241
14242         return 0;
14243
14244  fail:
14245         if (ret == -EDEADLK)
14246                 return ret;
14247
14248         /*
14249          * FIXME would probably be nice to know which crtc specifically
14250          * caused the failure, in cases where we can pinpoint it.
14251          */
14252         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14253                                             new_crtc_state, i)
14254                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14255
14256         return ret;
14257 }
14258
14259 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14260 {
14261         return drm_atomic_helper_prepare_planes(state->base.dev,
14262                                                 &state->base);
14263 }
14264
14265 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14266 {
14267         struct drm_device *dev = crtc->base.dev;
14268         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14269
14270         if (!vblank->max_vblank_count)
14271                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14272
14273         return crtc->base.funcs->get_vblank_counter(&crtc->base);
14274 }
14275
14276 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14277                                   struct intel_crtc_state *crtc_state)
14278 {
14279         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14280
14281         if (!IS_GEN(dev_priv, 2))
14282                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14283
14284         if (crtc_state->has_pch_encoder) {
14285                 enum pipe pch_transcoder =
14286                         intel_crtc_pch_transcoder(crtc);
14287
14288                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14289         }
14290 }
14291
14292 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14293                                const struct intel_crtc_state *new_crtc_state)
14294 {
14295         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14296         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14297
14298         /*
14299          * Update pipe size and adjust fitter if needed: the reason for this is
14300          * that in compute_mode_changes we check the native mode (not the pfit
14301          * mode) to see if we can flip rather than do a full mode set. In the
14302          * fastboot case, we'll flip, but if we don't update the pipesrc and
14303          * pfit state, we'll end up with a big fb scanned out into the wrong
14304          * sized surface.
14305          */
14306         intel_set_pipe_src_size(new_crtc_state);
14307
14308         /* on skylake this is done by detaching scalers */
14309         if (INTEL_GEN(dev_priv) >= 9) {
14310                 skl_detach_scalers(new_crtc_state);
14311
14312                 if (new_crtc_state->pch_pfit.enabled)
14313                         skylake_pfit_enable(new_crtc_state);
14314         } else if (HAS_PCH_SPLIT(dev_priv)) {
14315                 if (new_crtc_state->pch_pfit.enabled)
14316                         ironlake_pfit_enable(new_crtc_state);
14317                 else if (old_crtc_state->pch_pfit.enabled)
14318                         ironlake_pfit_disable(old_crtc_state);
14319         }
14320
14321         if (INTEL_GEN(dev_priv) >= 11)
14322                 icl_set_pipe_chicken(crtc);
14323 }
14324
14325 static void commit_pipe_config(struct intel_atomic_state *state,
14326                                struct intel_crtc_state *old_crtc_state,
14327                                struct intel_crtc_state *new_crtc_state)
14328 {
14329         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14330         bool modeset = needs_modeset(new_crtc_state);
14331
14332         /*
14333          * During modesets pipe configuration was programmed as the
14334          * CRTC was enabled.
14335          */
14336         if (!modeset) {
14337                 if (new_crtc_state->uapi.color_mgmt_changed ||
14338                     new_crtc_state->update_pipe)
14339                         intel_color_commit(new_crtc_state);
14340
14341                 if (INTEL_GEN(dev_priv) >= 9)
14342                         skl_detach_scalers(new_crtc_state);
14343
14344                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14345                         bdw_set_pipemisc(new_crtc_state);
14346
14347                 if (new_crtc_state->update_pipe)
14348                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
14349         }
14350
14351         if (dev_priv->display.atomic_update_watermarks)
14352                 dev_priv->display.atomic_update_watermarks(state,
14353                                                            new_crtc_state);
14354 }
14355
14356 static void intel_update_crtc(struct intel_crtc *crtc,
14357                               struct intel_atomic_state *state,
14358                               struct intel_crtc_state *old_crtc_state,
14359                               struct intel_crtc_state *new_crtc_state)
14360 {
14361         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14362         bool modeset = needs_modeset(new_crtc_state);
14363         struct intel_plane_state *new_plane_state =
14364                 intel_atomic_get_new_plane_state(state,
14365                                                  to_intel_plane(crtc->base.primary));
14366
14367         if (modeset) {
14368                 intel_crtc_update_active_timings(new_crtc_state);
14369
14370                 dev_priv->display.crtc_enable(new_crtc_state, state);
14371
14372                 /* vblanks work again, re-enable pipe CRC. */
14373                 intel_crtc_enable_pipe_crc(crtc);
14374         } else {
14375                 if (new_crtc_state->preload_luts &&
14376                     (new_crtc_state->uapi.color_mgmt_changed ||
14377                      new_crtc_state->update_pipe))
14378                         intel_color_load_luts(new_crtc_state);
14379
14380                 intel_pre_plane_update(old_crtc_state, new_crtc_state);
14381
14382                 if (new_crtc_state->update_pipe)
14383                         intel_encoders_update_pipe(state, crtc);
14384         }
14385
14386         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14387                 intel_fbc_disable(crtc);
14388         else if (new_plane_state)
14389                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14390
14391         /* Perform vblank evasion around commit operation */
14392         intel_pipe_update_start(new_crtc_state);
14393
14394         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14395
14396         if (INTEL_GEN(dev_priv) >= 9)
14397                 skl_update_planes_on_crtc(state, crtc);
14398         else
14399                 i9xx_update_planes_on_crtc(state, crtc);
14400
14401         intel_pipe_update_end(new_crtc_state);
14402
14403         /*
14404          * We usually enable FIFO underrun interrupts as part of the
14405          * CRTC enable sequence during modesets.  But when we inherit a
14406          * valid pipe configuration from the BIOS we need to take care
14407          * of enabling them on the CRTC's first fastset.
14408          */
14409         if (new_crtc_state->update_pipe && !modeset &&
14410             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14411                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14412 }
14413
14414 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
14415 {
14416         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
14417         enum transcoder slave_transcoder;
14418
14419         WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
14420
14421         slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
14422         return intel_get_crtc_for_pipe(dev_priv,
14423                                        (enum pipe)slave_transcoder);
14424 }
14425
14426 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14427                                           struct intel_crtc_state *old_crtc_state,
14428                                           struct intel_crtc_state *new_crtc_state,
14429                                           struct intel_crtc *crtc)
14430 {
14431         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14432
14433         intel_crtc_disable_planes(state, crtc);
14434
14435         /*
14436          * We need to disable pipe CRC before disabling the pipe,
14437          * or we race against vblank off.
14438          */
14439         intel_crtc_disable_pipe_crc(crtc);
14440
14441         dev_priv->display.crtc_disable(old_crtc_state, state);
14442         crtc->active = false;
14443         intel_fbc_disable(crtc);
14444         intel_disable_shared_dpll(old_crtc_state);
14445
14446         /*
14447          * Underruns don't always raise interrupts,
14448          * so check manually.
14449          */
14450         intel_check_cpu_fifo_underruns(dev_priv);
14451         intel_check_pch_fifo_underruns(dev_priv);
14452
14453         /* FIXME unify this for all platforms */
14454         if (!new_crtc_state->hw.active &&
14455             !HAS_GMCH(dev_priv) &&
14456             dev_priv->display.initial_watermarks)
14457                 dev_priv->display.initial_watermarks(state,
14458                                                      new_crtc_state);
14459 }
14460
14461 static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
14462                                                    struct intel_crtc *crtc,
14463                                                    struct intel_crtc_state *old_crtc_state,
14464                                                    struct intel_crtc_state *new_crtc_state)
14465 {
14466         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14467         struct intel_crtc_state *new_slave_crtc_state =
14468                 intel_atomic_get_new_crtc_state(state, slave_crtc);
14469         struct intel_crtc_state *old_slave_crtc_state =
14470                 intel_atomic_get_old_crtc_state(state, slave_crtc);
14471
14472         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14473                 !old_slave_crtc_state);
14474
14475         /* Disable Slave first */
14476         intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
14477         if (old_slave_crtc_state->hw.active)
14478                 intel_old_crtc_state_disables(state,
14479                                               old_slave_crtc_state,
14480                                               new_slave_crtc_state,
14481                                               slave_crtc);
14482
14483         /* Disable Master */
14484         intel_pre_plane_update(old_crtc_state, new_crtc_state);
14485         if (old_crtc_state->hw.active)
14486                 intel_old_crtc_state_disables(state,
14487                                               old_crtc_state,
14488                                               new_crtc_state,
14489                                               crtc);
14490 }
14491
14492 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14493 {
14494         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14495         struct intel_crtc *crtc;
14496         int i;
14497
14498         /*
14499          * Disable CRTC/pipes in reverse order because some features(MST in
14500          * TGL+) requires master and slave relationship between pipes, so it
14501          * should always pick the lowest pipe as master as it will be enabled
14502          * first and disable in the reverse order so the master will be the
14503          * last one to be disabled.
14504          */
14505         for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
14506                                                     new_crtc_state, i) {
14507                 if (!needs_modeset(new_crtc_state))
14508                         continue;
14509
14510                 /* In case of Transcoder port Sync master slave CRTCs can be
14511                  * assigned in any order and we need to make sure that
14512                  * slave CRTCs are disabled first and then master CRTC since
14513                  * Slave vblanks are masked till Master Vblanks.
14514                  */
14515                 if (is_trans_port_sync_mode(new_crtc_state)) {
14516                         if (is_trans_port_sync_master(new_crtc_state))
14517                                 intel_trans_port_sync_modeset_disables(state,
14518                                                                        crtc,
14519                                                                        old_crtc_state,
14520                                                                        new_crtc_state);
14521                         else
14522                                 continue;
14523                 } else {
14524                         intel_pre_plane_update(old_crtc_state, new_crtc_state);
14525
14526                         if (old_crtc_state->hw.active)
14527                                 intel_old_crtc_state_disables(state,
14528                                                               old_crtc_state,
14529                                                               new_crtc_state,
14530                                                               crtc);
14531                 }
14532         }
14533 }
14534
14535 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
14536 {
14537         struct intel_crtc *crtc;
14538         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14539         int i;
14540
14541         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14542                 if (!new_crtc_state->hw.active)
14543                         continue;
14544
14545                 intel_update_crtc(crtc, state, old_crtc_state,
14546                                   new_crtc_state);
14547         }
14548 }
14549
14550 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
14551                                               struct intel_atomic_state *state,
14552                                               struct intel_crtc_state *new_crtc_state)
14553 {
14554         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14555
14556         intel_crtc_update_active_timings(new_crtc_state);
14557         dev_priv->display.crtc_enable(new_crtc_state, state);
14558         intel_crtc_enable_pipe_crc(crtc);
14559 }
14560
14561 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
14562                                        struct intel_atomic_state *state)
14563 {
14564         struct drm_connector *uninitialized_var(conn);
14565         struct drm_connector_state *conn_state;
14566         struct intel_dp *intel_dp;
14567         int i;
14568
14569         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
14570                 if (conn_state->crtc == &crtc->base)
14571                         break;
14572         }
14573         intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
14574         intel_dp_stop_link_train(intel_dp);
14575 }
14576
14577 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
14578                                            struct intel_atomic_state *state)
14579 {
14580         struct intel_crtc_state *new_crtc_state =
14581                 intel_atomic_get_new_crtc_state(state, crtc);
14582         struct intel_crtc_state *old_crtc_state =
14583                 intel_atomic_get_old_crtc_state(state, crtc);
14584         struct intel_plane_state *new_plane_state =
14585                 intel_atomic_get_new_plane_state(state,
14586                                                  to_intel_plane(crtc->base.primary));
14587         bool modeset = needs_modeset(new_crtc_state);
14588
14589         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14590                 intel_fbc_disable(crtc);
14591         else if (new_plane_state)
14592                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14593
14594         /* Perform vblank evasion around commit operation */
14595         intel_pipe_update_start(new_crtc_state);
14596         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14597         skl_update_planes_on_crtc(state, crtc);
14598         intel_pipe_update_end(new_crtc_state);
14599
14600         /*
14601          * We usually enable FIFO underrun interrupts as part of the
14602          * CRTC enable sequence during modesets.  But when we inherit a
14603          * valid pipe configuration from the BIOS we need to take care
14604          * of enabling them on the CRTC's first fastset.
14605          */
14606         if (new_crtc_state->update_pipe && !modeset &&
14607             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14608                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14609 }
14610
14611 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
14612                                                struct intel_atomic_state *state,
14613                                                struct intel_crtc_state *old_crtc_state,
14614                                                struct intel_crtc_state *new_crtc_state)
14615 {
14616         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14617         struct intel_crtc_state *new_slave_crtc_state =
14618                 intel_atomic_get_new_crtc_state(state, slave_crtc);
14619         struct intel_crtc_state *old_slave_crtc_state =
14620                 intel_atomic_get_old_crtc_state(state, slave_crtc);
14621
14622         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14623                 !old_slave_crtc_state);
14624
14625         DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
14626                       crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
14627                       slave_crtc->base.name);
14628
14629         /* Enable seq for slave with with DP_TP_CTL left Idle until the
14630          * master is ready
14631          */
14632         intel_crtc_enable_trans_port_sync(slave_crtc,
14633                                           state,
14634                                           new_slave_crtc_state);
14635
14636         /* Enable seq for master with with DP_TP_CTL left Idle */
14637         intel_crtc_enable_trans_port_sync(crtc,
14638                                           state,
14639                                           new_crtc_state);
14640
14641         /* Set Slave's DP_TP_CTL to Normal */
14642         intel_set_dp_tp_ctl_normal(slave_crtc,
14643                                    state);
14644
14645         /* Set Master's DP_TP_CTL To Normal */
14646         usleep_range(200, 400);
14647         intel_set_dp_tp_ctl_normal(crtc,
14648                                    state);
14649
14650         /* Now do the post crtc enable for all master and slaves */
14651         intel_post_crtc_enable_updates(slave_crtc,
14652                                        state);
14653         intel_post_crtc_enable_updates(crtc,
14654                                        state);
14655 }
14656
14657 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
14658 {
14659         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14660         struct intel_crtc *crtc;
14661         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14662         unsigned int updated = 0;
14663         bool progress;
14664         int i;
14665         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
14666         u8 required_slices = state->wm_results.ddb.enabled_slices;
14667         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
14668
14669         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
14670                 /* ignore allocations for crtc's that have been turned off. */
14671                 if (new_crtc_state->hw.active)
14672                         entries[i] = old_crtc_state->wm.skl.ddb;
14673
14674         /* If 2nd DBuf slice required, enable it here */
14675         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
14676                 icl_dbuf_slices_update(dev_priv, required_slices);
14677
14678         /*
14679          * Whenever the number of active pipes changes, we need to make sure we
14680          * update the pipes in the right order so that their ddb allocations
14681          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
14682          * cause pipe underruns and other bad stuff.
14683          */
14684         do {
14685                 progress = false;
14686
14687                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14688                         enum pipe pipe = crtc->pipe;
14689                         bool vbl_wait = false;
14690                         bool modeset = needs_modeset(new_crtc_state);
14691
14692                         if (updated & BIT(crtc->pipe) || !new_crtc_state->hw.active)
14693                                 continue;
14694
14695                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
14696                                                         entries,
14697                                                         INTEL_NUM_PIPES(dev_priv), i))
14698                                 continue;
14699
14700                         updated |= BIT(pipe);
14701                         entries[i] = new_crtc_state->wm.skl.ddb;
14702
14703                         /*
14704                          * If this is an already active pipe, it's DDB changed,
14705                          * and this isn't the last pipe that needs updating
14706                          * then we need to wait for a vblank to pass for the
14707                          * new ddb allocation to take effect.
14708                          */
14709                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
14710                                                  &old_crtc_state->wm.skl.ddb) &&
14711                             !modeset &&
14712                             state->wm_results.dirty_pipes != updated)
14713                                 vbl_wait = true;
14714
14715                         if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
14716                                 if (is_trans_port_sync_master(new_crtc_state))
14717                                         intel_update_trans_port_sync_crtcs(crtc,
14718                                                                            state,
14719                                                                            old_crtc_state,
14720                                                                            new_crtc_state);
14721                                 else
14722                                         continue;
14723                         } else {
14724                                 intel_update_crtc(crtc, state, old_crtc_state,
14725                                                   new_crtc_state);
14726                         }
14727
14728                         if (vbl_wait)
14729                                 intel_wait_for_vblank(dev_priv, pipe);
14730
14731                         progress = true;
14732                 }
14733         } while (progress);
14734
14735         /* If 2nd DBuf slice is no more required disable it */
14736         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
14737                 icl_dbuf_slices_update(dev_priv, required_slices);
14738 }
14739
14740 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14741 {
14742         struct intel_atomic_state *state, *next;
14743         struct llist_node *freed;
14744
14745         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14746         llist_for_each_entry_safe(state, next, freed, freed)
14747                 drm_atomic_state_put(&state->base);
14748 }
14749
14750 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14751 {
14752         struct drm_i915_private *dev_priv =
14753                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14754
14755         intel_atomic_helper_free_state(dev_priv);
14756 }
14757
14758 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
14759 {
14760         struct wait_queue_entry wait_fence, wait_reset;
14761         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
14762
14763         init_wait_entry(&wait_fence, 0);
14764         init_wait_entry(&wait_reset, 0);
14765         for (;;) {
14766                 prepare_to_wait(&intel_state->commit_ready.wait,
14767                                 &wait_fence, TASK_UNINTERRUPTIBLE);
14768                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14769                                               I915_RESET_MODESET),
14770                                 &wait_reset, TASK_UNINTERRUPTIBLE);
14771
14772
14773                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
14774                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
14775                         break;
14776
14777                 schedule();
14778         }
14779         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
14780         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14781                                   I915_RESET_MODESET),
14782                     &wait_reset);
14783 }
14784
14785 static void intel_atomic_cleanup_work(struct work_struct *work)
14786 {
14787         struct drm_atomic_state *state =
14788                 container_of(work, struct drm_atomic_state, commit_work);
14789         struct drm_i915_private *i915 = to_i915(state->dev);
14790
14791         drm_atomic_helper_cleanup_planes(&i915->drm, state);
14792         drm_atomic_helper_commit_cleanup_done(state);
14793         drm_atomic_state_put(state);
14794
14795         intel_atomic_helper_free_state(i915);
14796 }
14797
14798 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
14799 {
14800         struct drm_device *dev = state->base.dev;
14801         struct drm_i915_private *dev_priv = to_i915(dev);
14802         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14803         struct intel_crtc *crtc;
14804         u64 put_domains[I915_MAX_PIPES] = {};
14805         intel_wakeref_t wakeref = 0;
14806         int i;
14807
14808         intel_atomic_commit_fence_wait(state);
14809
14810         drm_atomic_helper_wait_for_dependencies(&state->base);
14811
14812         if (state->modeset)
14813                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
14814
14815         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14816                                             new_crtc_state, i) {
14817                 if (needs_modeset(new_crtc_state) ||
14818                     new_crtc_state->update_pipe) {
14819
14820                         put_domains[crtc->pipe] =
14821                                 modeset_get_crtc_power_domains(new_crtc_state);
14822                 }
14823         }
14824
14825         intel_commit_modeset_disables(state);
14826
14827         /* FIXME: Eventually get rid of our crtc->config pointer */
14828         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14829                 crtc->config = new_crtc_state;
14830
14831         if (state->modeset) {
14832                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
14833
14834                 intel_set_cdclk_pre_plane_update(dev_priv,
14835                                                  &state->cdclk.actual,
14836                                                  &dev_priv->cdclk.actual,
14837                                                  state->cdclk.pipe);
14838
14839                 /*
14840                  * SKL workaround: bspec recommends we disable the SAGV when we
14841                  * have more then one pipe enabled
14842                  */
14843                 if (!intel_can_enable_sagv(state))
14844                         intel_disable_sagv(dev_priv);
14845
14846                 intel_modeset_verify_disabled(dev_priv, state);
14847         }
14848
14849         /* Complete the events for pipes that have now been disabled */
14850         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14851                 bool modeset = needs_modeset(new_crtc_state);
14852
14853                 /* Complete events for now disable pipes here. */
14854                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
14855                         spin_lock_irq(&dev->event_lock);
14856                         drm_crtc_send_vblank_event(&crtc->base,
14857                                                    new_crtc_state->uapi.event);
14858                         spin_unlock_irq(&dev->event_lock);
14859
14860                         new_crtc_state->uapi.event = NULL;
14861                 }
14862         }
14863
14864         if (state->modeset)
14865                 intel_encoders_update_prepare(state);
14866
14867         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14868         dev_priv->display.commit_modeset_enables(state);
14869
14870         if (state->modeset) {
14871                 intel_encoders_update_complete(state);
14872
14873                 intel_set_cdclk_post_plane_update(dev_priv,
14874                                                   &state->cdclk.actual,
14875                                                   &dev_priv->cdclk.actual,
14876                                                   state->cdclk.pipe);
14877         }
14878
14879         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14880          * already, but still need the state for the delayed optimization. To
14881          * fix this:
14882          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14883          * - schedule that vblank worker _before_ calling hw_done
14884          * - at the start of commit_tail, cancel it _synchrously
14885          * - switch over to the vblank wait helper in the core after that since
14886          *   we don't need out special handling any more.
14887          */
14888         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14889
14890         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14891                 if (new_crtc_state->hw.active &&
14892                     !needs_modeset(new_crtc_state) &&
14893                     !new_crtc_state->preload_luts &&
14894                     (new_crtc_state->uapi.color_mgmt_changed ||
14895                      new_crtc_state->update_pipe))
14896                         intel_color_load_luts(new_crtc_state);
14897         }
14898
14899         /*
14900          * Now that the vblank has passed, we can go ahead and program the
14901          * optimal watermarks on platforms that need two-step watermark
14902          * programming.
14903          *
14904          * TODO: Move this (and other cleanup) to an async worker eventually.
14905          */
14906         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14907                 if (dev_priv->display.optimize_watermarks)
14908                         dev_priv->display.optimize_watermarks(state,
14909                                                               new_crtc_state);
14910         }
14911
14912         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14913                 intel_post_plane_update(old_crtc_state);
14914
14915                 if (put_domains[i])
14916                         modeset_put_power_domains(dev_priv, put_domains[i]);
14917
14918                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14919         }
14920
14921         if (state->modeset)
14922                 intel_verify_planes(state);
14923
14924         if (state->modeset && intel_can_enable_sagv(state))
14925                 intel_enable_sagv(dev_priv);
14926
14927         drm_atomic_helper_commit_hw_done(&state->base);
14928
14929         if (state->modeset) {
14930                 /* As one of the primary mmio accessors, KMS has a high
14931                  * likelihood of triggering bugs in unclaimed access. After we
14932                  * finish modesetting, see if an error has been flagged, and if
14933                  * so enable debugging for the next modeset - and hope we catch
14934                  * the culprit.
14935                  */
14936                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
14937                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
14938         }
14939         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14940
14941         /*
14942          * Defer the cleanup of the old state to a separate worker to not
14943          * impede the current task (userspace for blocking modesets) that
14944          * are executed inline. For out-of-line asynchronous modesets/flips,
14945          * deferring to a new worker seems overkill, but we would place a
14946          * schedule point (cond_resched()) here anyway to keep latencies
14947          * down.
14948          */
14949         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
14950         queue_work(system_highpri_wq, &state->base.commit_work);
14951 }
14952
14953 static void intel_atomic_commit_work(struct work_struct *work)
14954 {
14955         struct intel_atomic_state *state =
14956                 container_of(work, struct intel_atomic_state, base.commit_work);
14957
14958         intel_atomic_commit_tail(state);
14959 }
14960
14961 static int __i915_sw_fence_call
14962 intel_atomic_commit_ready(struct i915_sw_fence *fence,
14963                           enum i915_sw_fence_notify notify)
14964 {
14965         struct intel_atomic_state *state =
14966                 container_of(fence, struct intel_atomic_state, commit_ready);
14967
14968         switch (notify) {
14969         case FENCE_COMPLETE:
14970                 /* we do blocking waits in the worker, nothing to do here */
14971                 break;
14972         case FENCE_FREE:
14973                 {
14974                         struct intel_atomic_helper *helper =
14975                                 &to_i915(state->base.dev)->atomic_helper;
14976
14977                         if (llist_add(&state->freed, &helper->free_list))
14978                                 schedule_work(&helper->free_work);
14979                         break;
14980                 }
14981         }
14982
14983         return NOTIFY_DONE;
14984 }
14985
14986 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
14987 {
14988         struct intel_plane_state *old_plane_state, *new_plane_state;
14989         struct intel_plane *plane;
14990         int i;
14991
14992         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14993                                              new_plane_state, i)
14994                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
14995                                         to_intel_frontbuffer(new_plane_state->hw.fb),
14996                                         plane->frontbuffer_bit);
14997 }
14998
14999 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
15000 {
15001         struct intel_crtc *crtc;
15002
15003         for_each_intel_crtc(&dev_priv->drm, crtc)
15004                 drm_modeset_lock_assert_held(&crtc->base.mutex);
15005 }
15006
15007 static int intel_atomic_commit(struct drm_device *dev,
15008                                struct drm_atomic_state *_state,
15009                                bool nonblock)
15010 {
15011         struct intel_atomic_state *state = to_intel_atomic_state(_state);
15012         struct drm_i915_private *dev_priv = to_i915(dev);
15013         int ret = 0;
15014
15015         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15016
15017         drm_atomic_state_get(&state->base);
15018         i915_sw_fence_init(&state->commit_ready,
15019                            intel_atomic_commit_ready);
15020
15021         /*
15022          * The intel_legacy_cursor_update() fast path takes care
15023          * of avoiding the vblank waits for simple cursor
15024          * movement and flips. For cursor on/off and size changes,
15025          * we want to perform the vblank waits so that watermark
15026          * updates happen during the correct frames. Gen9+ have
15027          * double buffered watermarks and so shouldn't need this.
15028          *
15029          * Unset state->legacy_cursor_update before the call to
15030          * drm_atomic_helper_setup_commit() because otherwise
15031          * drm_atomic_helper_wait_for_flip_done() is a noop and
15032          * we get FIFO underruns because we didn't wait
15033          * for vblank.
15034          *
15035          * FIXME doing watermarks and fb cleanup from a vblank worker
15036          * (assuming we had any) would solve these problems.
15037          */
15038         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15039                 struct intel_crtc_state *new_crtc_state;
15040                 struct intel_crtc *crtc;
15041                 int i;
15042
15043                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15044                         if (new_crtc_state->wm.need_postvbl_update ||
15045                             new_crtc_state->update_wm_post)
15046                                 state->base.legacy_cursor_update = false;
15047         }
15048
15049         ret = intel_atomic_prepare_commit(state);
15050         if (ret) {
15051                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
15052                 i915_sw_fence_commit(&state->commit_ready);
15053                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15054                 return ret;
15055         }
15056
15057         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15058         if (!ret)
15059                 ret = drm_atomic_helper_swap_state(&state->base, true);
15060
15061         if (ret) {
15062                 i915_sw_fence_commit(&state->commit_ready);
15063
15064                 drm_atomic_helper_cleanup_planes(dev, &state->base);
15065                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15066                 return ret;
15067         }
15068         dev_priv->wm.distrust_bios_wm = false;
15069         intel_shared_dpll_swap_state(state);
15070         intel_atomic_track_fbs(state);
15071
15072         if (state->global_state_changed) {
15073                 assert_global_state_locked(dev_priv);
15074
15075                 memcpy(dev_priv->min_cdclk, state->min_cdclk,
15076                        sizeof(state->min_cdclk));
15077                 memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
15078                        sizeof(state->min_voltage_level));
15079                 dev_priv->active_pipes = state->active_pipes;
15080                 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
15081
15082                 intel_cdclk_swap_state(state);
15083         }
15084
15085         drm_atomic_state_get(&state->base);
15086         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15087
15088         i915_sw_fence_commit(&state->commit_ready);
15089         if (nonblock && state->modeset) {
15090                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15091         } else if (nonblock) {
15092                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15093         } else {
15094                 if (state->modeset)
15095                         flush_workqueue(dev_priv->modeset_wq);
15096                 intel_atomic_commit_tail(state);
15097         }
15098
15099         return 0;
15100 }
15101
15102 struct wait_rps_boost {
15103         struct wait_queue_entry wait;
15104
15105         struct drm_crtc *crtc;
15106         struct i915_request *request;
15107 };
15108
15109 static int do_rps_boost(struct wait_queue_entry *_wait,
15110                         unsigned mode, int sync, void *key)
15111 {
15112         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15113         struct i915_request *rq = wait->request;
15114
15115         /*
15116          * If we missed the vblank, but the request is already running it
15117          * is reasonable to assume that it will complete before the next
15118          * vblank without our intervention, so leave RPS alone.
15119          */
15120         if (!i915_request_started(rq))
15121                 intel_rps_boost(rq);
15122         i915_request_put(rq);
15123
15124         drm_crtc_vblank_put(wait->crtc);
15125
15126         list_del(&wait->wait.entry);
15127         kfree(wait);
15128         return 1;
15129 }
15130
15131 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15132                                        struct dma_fence *fence)
15133 {
15134         struct wait_rps_boost *wait;
15135
15136         if (!dma_fence_is_i915(fence))
15137                 return;
15138
15139         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15140                 return;
15141
15142         if (drm_crtc_vblank_get(crtc))
15143                 return;
15144
15145         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15146         if (!wait) {
15147                 drm_crtc_vblank_put(crtc);
15148                 return;
15149         }
15150
15151         wait->request = to_request(dma_fence_get(fence));
15152         wait->crtc = crtc;
15153
15154         wait->wait.func = do_rps_boost;
15155         wait->wait.flags = 0;
15156
15157         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15158 }
15159
15160 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15161 {
15162         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15163         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15164         struct drm_framebuffer *fb = plane_state->hw.fb;
15165         struct i915_vma *vma;
15166
15167         if (plane->id == PLANE_CURSOR &&
15168             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15169                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15170                 const int align = intel_cursor_alignment(dev_priv);
15171                 int err;
15172
15173                 err = i915_gem_object_attach_phys(obj, align);
15174                 if (err)
15175                         return err;
15176         }
15177
15178         vma = intel_pin_and_fence_fb_obj(fb,
15179                                          &plane_state->view,
15180                                          intel_plane_uses_fence(plane_state),
15181                                          &plane_state->flags);
15182         if (IS_ERR(vma))
15183                 return PTR_ERR(vma);
15184
15185         plane_state->vma = vma;
15186
15187         return 0;
15188 }
15189
15190 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15191 {
15192         struct i915_vma *vma;
15193
15194         vma = fetch_and_zero(&old_plane_state->vma);
15195         if (vma)
15196                 intel_unpin_fb_vma(vma, old_plane_state->flags);
15197 }
15198
15199 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15200 {
15201         struct i915_sched_attr attr = {
15202                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15203         };
15204
15205         i915_gem_object_wait_priority(obj, 0, &attr);
15206 }
15207
15208 /**
15209  * intel_prepare_plane_fb - Prepare fb for usage on plane
15210  * @plane: drm plane to prepare for
15211  * @_new_plane_state: the plane state being prepared
15212  *
15213  * Prepares a framebuffer for usage on a display plane.  Generally this
15214  * involves pinning the underlying object and updating the frontbuffer tracking
15215  * bits.  Some older platforms need special physical address handling for
15216  * cursor planes.
15217  *
15218  * Returns 0 on success, negative error code on failure.
15219  */
15220 int
15221 intel_prepare_plane_fb(struct drm_plane *plane,
15222                        struct drm_plane_state *_new_plane_state)
15223 {
15224         struct intel_plane_state *new_plane_state =
15225                 to_intel_plane_state(_new_plane_state);
15226         struct intel_atomic_state *intel_state =
15227                 to_intel_atomic_state(new_plane_state->uapi.state);
15228         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15229         struct drm_framebuffer *fb = new_plane_state->hw.fb;
15230         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15231         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
15232         int ret;
15233
15234         if (old_obj) {
15235                 struct intel_crtc_state *crtc_state =
15236                         intel_atomic_get_new_crtc_state(intel_state,
15237                                                         to_intel_crtc(plane->state->crtc));
15238
15239                 /* Big Hammer, we also need to ensure that any pending
15240                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15241                  * current scanout is retired before unpinning the old
15242                  * framebuffer. Note that we rely on userspace rendering
15243                  * into the buffer attached to the pipe they are waiting
15244                  * on. If not, userspace generates a GPU hang with IPEHR
15245                  * point to the MI_WAIT_FOR_EVENT.
15246                  *
15247                  * This should only fail upon a hung GPU, in which case we
15248                  * can safely continue.
15249                  */
15250                 if (needs_modeset(crtc_state)) {
15251                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15252                                                               old_obj->base.resv, NULL,
15253                                                               false, 0,
15254                                                               GFP_KERNEL);
15255                         if (ret < 0)
15256                                 return ret;
15257                 }
15258         }
15259
15260         if (new_plane_state->uapi.fence) { /* explicit fencing */
15261                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
15262                                                     new_plane_state->uapi.fence,
15263                                                     I915_FENCE_TIMEOUT,
15264                                                     GFP_KERNEL);
15265                 if (ret < 0)
15266                         return ret;
15267         }
15268
15269         if (!obj)
15270                 return 0;
15271
15272         ret = i915_gem_object_pin_pages(obj);
15273         if (ret)
15274                 return ret;
15275
15276         ret = intel_plane_pin_fb(new_plane_state);
15277
15278         i915_gem_object_unpin_pages(obj);
15279         if (ret)
15280                 return ret;
15281
15282         fb_obj_bump_render_priority(obj);
15283         intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
15284
15285         if (!new_plane_state->uapi.fence) { /* implicit fencing */
15286                 struct dma_fence *fence;
15287
15288                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15289                                                       obj->base.resv, NULL,
15290                                                       false, I915_FENCE_TIMEOUT,
15291                                                       GFP_KERNEL);
15292                 if (ret < 0)
15293                         return ret;
15294
15295                 fence = dma_resv_get_excl_rcu(obj->base.resv);
15296                 if (fence) {
15297                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15298                                                    fence);
15299                         dma_fence_put(fence);
15300                 }
15301         } else {
15302                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15303                                            new_plane_state->uapi.fence);
15304         }
15305
15306         /*
15307          * We declare pageflips to be interactive and so merit a small bias
15308          * towards upclocking to deliver the frame on time. By only changing
15309          * the RPS thresholds to sample more regularly and aim for higher
15310          * clocks we can hopefully deliver low power workloads (like kodi)
15311          * that are not quite steady state without resorting to forcing
15312          * maximum clocks following a vblank miss (see do_rps_boost()).
15313          */
15314         if (!intel_state->rps_interactive) {
15315                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15316                 intel_state->rps_interactive = true;
15317         }
15318
15319         return 0;
15320 }
15321
15322 /**
15323  * intel_cleanup_plane_fb - Cleans up an fb after plane use
15324  * @plane: drm plane to clean up for
15325  * @_old_plane_state: the state from the previous modeset
15326  *
15327  * Cleans up a framebuffer that has just been removed from a plane.
15328  */
15329 void
15330 intel_cleanup_plane_fb(struct drm_plane *plane,
15331                        struct drm_plane_state *_old_plane_state)
15332 {
15333         struct intel_plane_state *old_plane_state =
15334                 to_intel_plane_state(_old_plane_state);
15335         struct intel_atomic_state *intel_state =
15336                 to_intel_atomic_state(old_plane_state->uapi.state);
15337         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15338
15339         if (intel_state->rps_interactive) {
15340                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15341                 intel_state->rps_interactive = false;
15342         }
15343
15344         /* Should only be called after a successful intel_prepare_plane_fb()! */
15345         intel_plane_unpin_fb(old_plane_state);
15346 }
15347
15348 /**
15349  * intel_plane_destroy - destroy a plane
15350  * @plane: plane to destroy
15351  *
15352  * Common destruction function for all types of planes (primary, cursor,
15353  * sprite).
15354  */
15355 void intel_plane_destroy(struct drm_plane *plane)
15356 {
15357         drm_plane_cleanup(plane);
15358         kfree(to_intel_plane(plane));
15359 }
15360
15361 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15362                                             u32 format, u64 modifier)
15363 {
15364         switch (modifier) {
15365         case DRM_FORMAT_MOD_LINEAR:
15366         case I915_FORMAT_MOD_X_TILED:
15367                 break;
15368         default:
15369                 return false;
15370         }
15371
15372         switch (format) {
15373         case DRM_FORMAT_C8:
15374         case DRM_FORMAT_RGB565:
15375         case DRM_FORMAT_XRGB1555:
15376         case DRM_FORMAT_XRGB8888:
15377                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15378                         modifier == I915_FORMAT_MOD_X_TILED;
15379         default:
15380                 return false;
15381         }
15382 }
15383
15384 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
15385                                             u32 format, u64 modifier)
15386 {
15387         switch (modifier) {
15388         case DRM_FORMAT_MOD_LINEAR:
15389         case I915_FORMAT_MOD_X_TILED:
15390                 break;
15391         default:
15392                 return false;
15393         }
15394
15395         switch (format) {
15396         case DRM_FORMAT_C8:
15397         case DRM_FORMAT_RGB565:
15398         case DRM_FORMAT_XRGB8888:
15399         case DRM_FORMAT_XBGR8888:
15400         case DRM_FORMAT_ARGB8888:
15401         case DRM_FORMAT_ABGR8888:
15402         case DRM_FORMAT_XRGB2101010:
15403         case DRM_FORMAT_XBGR2101010:
15404         case DRM_FORMAT_ARGB2101010:
15405         case DRM_FORMAT_ABGR2101010:
15406         case DRM_FORMAT_XBGR16161616F:
15407                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15408                         modifier == I915_FORMAT_MOD_X_TILED;
15409         default:
15410                 return false;
15411         }
15412 }
15413
15414 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
15415                                               u32 format, u64 modifier)
15416 {
15417         return modifier == DRM_FORMAT_MOD_LINEAR &&
15418                 format == DRM_FORMAT_ARGB8888;
15419 }
15420
15421 static const struct drm_plane_funcs i965_plane_funcs = {
15422         .update_plane = drm_atomic_helper_update_plane,
15423         .disable_plane = drm_atomic_helper_disable_plane,
15424         .destroy = intel_plane_destroy,
15425         .atomic_duplicate_state = intel_plane_duplicate_state,
15426         .atomic_destroy_state = intel_plane_destroy_state,
15427         .format_mod_supported = i965_plane_format_mod_supported,
15428 };
15429
15430 static const struct drm_plane_funcs i8xx_plane_funcs = {
15431         .update_plane = drm_atomic_helper_update_plane,
15432         .disable_plane = drm_atomic_helper_disable_plane,
15433         .destroy = intel_plane_destroy,
15434         .atomic_duplicate_state = intel_plane_duplicate_state,
15435         .atomic_destroy_state = intel_plane_destroy_state,
15436         .format_mod_supported = i8xx_plane_format_mod_supported,
15437 };
15438
15439 static int
15440 intel_legacy_cursor_update(struct drm_plane *_plane,
15441                            struct drm_crtc *_crtc,
15442                            struct drm_framebuffer *fb,
15443                            int crtc_x, int crtc_y,
15444                            unsigned int crtc_w, unsigned int crtc_h,
15445                            u32 src_x, u32 src_y,
15446                            u32 src_w, u32 src_h,
15447                            struct drm_modeset_acquire_ctx *ctx)
15448 {
15449         struct intel_plane *plane = to_intel_plane(_plane);
15450         struct intel_crtc *crtc = to_intel_crtc(_crtc);
15451         struct intel_plane_state *old_plane_state =
15452                 to_intel_plane_state(plane->base.state);
15453         struct intel_plane_state *new_plane_state;
15454         struct intel_crtc_state *crtc_state =
15455                 to_intel_crtc_state(crtc->base.state);
15456         struct intel_crtc_state *new_crtc_state;
15457         int ret;
15458
15459         /*
15460          * When crtc is inactive or there is a modeset pending,
15461          * wait for it to complete in the slowpath
15462          */
15463         if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
15464             crtc_state->update_pipe)
15465                 goto slow;
15466
15467         /*
15468          * Don't do an async update if there is an outstanding commit modifying
15469          * the plane.  This prevents our async update's changes from getting
15470          * overridden by a previous synchronous update's state.
15471          */
15472         if (old_plane_state->uapi.commit &&
15473             !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
15474                 goto slow;
15475
15476         /*
15477          * If any parameters change that may affect watermarks,
15478          * take the slowpath. Only changing fb or position should be
15479          * in the fastpath.
15480          */
15481         if (old_plane_state->uapi.crtc != &crtc->base ||
15482             old_plane_state->uapi.src_w != src_w ||
15483             old_plane_state->uapi.src_h != src_h ||
15484             old_plane_state->uapi.crtc_w != crtc_w ||
15485             old_plane_state->uapi.crtc_h != crtc_h ||
15486             !old_plane_state->uapi.fb != !fb)
15487                 goto slow;
15488
15489         new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
15490         if (!new_plane_state)
15491                 return -ENOMEM;
15492
15493         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
15494         if (!new_crtc_state) {
15495                 ret = -ENOMEM;
15496                 goto out_free;
15497         }
15498
15499         drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
15500
15501         new_plane_state->uapi.src_x = src_x;
15502         new_plane_state->uapi.src_y = src_y;
15503         new_plane_state->uapi.src_w = src_w;
15504         new_plane_state->uapi.src_h = src_h;
15505         new_plane_state->uapi.crtc_x = crtc_x;
15506         new_plane_state->uapi.crtc_y = crtc_y;
15507         new_plane_state->uapi.crtc_w = crtc_w;
15508         new_plane_state->uapi.crtc_h = crtc_h;
15509
15510         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
15511                                                   old_plane_state, new_plane_state);
15512         if (ret)
15513                 goto out_free;
15514
15515         ret = intel_plane_pin_fb(new_plane_state);
15516         if (ret)
15517                 goto out_free;
15518
15519         intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
15520                                 ORIGIN_FLIP);
15521         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15522                                 to_intel_frontbuffer(new_plane_state->hw.fb),
15523                                 plane->frontbuffer_bit);
15524
15525         /* Swap plane state */
15526         plane->base.state = &new_plane_state->uapi;
15527
15528         /*
15529          * We cannot swap crtc_state as it may be in use by an atomic commit or
15530          * page flip that's running simultaneously. If we swap crtc_state and
15531          * destroy the old state, we will cause a use-after-free there.
15532          *
15533          * Only update active_planes, which is needed for our internal
15534          * bookkeeping. Either value will do the right thing when updating
15535          * planes atomically. If the cursor was part of the atomic update then
15536          * we would have taken the slowpath.
15537          */
15538         crtc_state->active_planes = new_crtc_state->active_planes;
15539
15540         if (new_plane_state->uapi.visible)
15541                 intel_update_plane(plane, crtc_state, new_plane_state);
15542         else
15543                 intel_disable_plane(plane, crtc_state);
15544
15545         intel_plane_unpin_fb(old_plane_state);
15546
15547 out_free:
15548         if (new_crtc_state)
15549                 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
15550         if (ret)
15551                 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
15552         else
15553                 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
15554         return ret;
15555
15556 slow:
15557         return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
15558                                               crtc_x, crtc_y, crtc_w, crtc_h,
15559                                               src_x, src_y, src_w, src_h, ctx);
15560 }
15561
15562 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
15563         .update_plane = intel_legacy_cursor_update,
15564         .disable_plane = drm_atomic_helper_disable_plane,
15565         .destroy = intel_plane_destroy,
15566         .atomic_duplicate_state = intel_plane_duplicate_state,
15567         .atomic_destroy_state = intel_plane_destroy_state,
15568         .format_mod_supported = intel_cursor_format_mod_supported,
15569 };
15570
15571 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
15572                                enum i9xx_plane_id i9xx_plane)
15573 {
15574         if (!HAS_FBC(dev_priv))
15575                 return false;
15576
15577         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15578                 return i9xx_plane == PLANE_A; /* tied to pipe A */
15579         else if (IS_IVYBRIDGE(dev_priv))
15580                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
15581                         i9xx_plane == PLANE_C;
15582         else if (INTEL_GEN(dev_priv) >= 4)
15583                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
15584         else
15585                 return i9xx_plane == PLANE_A;
15586 }
15587
15588 static struct intel_plane *
15589 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
15590 {
15591         struct intel_plane *plane;
15592         const struct drm_plane_funcs *plane_funcs;
15593         unsigned int supported_rotations;
15594         unsigned int possible_crtcs;
15595         const u32 *formats;
15596         int num_formats;
15597         int ret, zpos;
15598
15599         if (INTEL_GEN(dev_priv) >= 9)
15600                 return skl_universal_plane_create(dev_priv, pipe,
15601                                                   PLANE_PRIMARY);
15602
15603         plane = intel_plane_alloc();
15604         if (IS_ERR(plane))
15605                 return plane;
15606
15607         plane->pipe = pipe;
15608         /*
15609          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
15610          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
15611          */
15612         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
15613                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
15614         else
15615                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
15616         plane->id = PLANE_PRIMARY;
15617         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
15618
15619         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
15620         if (plane->has_fbc) {
15621                 struct intel_fbc *fbc = &dev_priv->fbc;
15622
15623                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
15624         }
15625
15626         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15627                 formats = vlv_primary_formats;
15628                 num_formats = ARRAY_SIZE(vlv_primary_formats);
15629         } else if (INTEL_GEN(dev_priv) >= 4) {
15630                 /*
15631                  * WaFP16GammaEnabling:ivb
15632                  * "Workaround : When using the 64-bit format, the plane
15633                  *  output on each color channel has one quarter amplitude.
15634                  *  It can be brought up to full amplitude by using pipe
15635                  *  gamma correction or pipe color space conversion to
15636                  *  multiply the plane output by four."
15637                  *
15638                  * There is no dedicated plane gamma for the primary plane,
15639                  * and using the pipe gamma/csc could conflict with other
15640                  * planes, so we choose not to expose fp16 on IVB primary
15641                  * planes. HSW primary planes no longer have this problem.
15642                  */
15643                 if (IS_IVYBRIDGE(dev_priv)) {
15644                         formats = ivb_primary_formats;
15645                         num_formats = ARRAY_SIZE(ivb_primary_formats);
15646                 } else {
15647                         formats = i965_primary_formats;
15648                         num_formats = ARRAY_SIZE(i965_primary_formats);
15649                 }
15650         } else {
15651                 formats = i8xx_primary_formats;
15652                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
15653         }
15654
15655         if (INTEL_GEN(dev_priv) >= 4)
15656                 plane_funcs = &i965_plane_funcs;
15657         else
15658                 plane_funcs = &i8xx_plane_funcs;
15659
15660         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15661                 plane->min_cdclk = vlv_plane_min_cdclk;
15662         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15663                 plane->min_cdclk = hsw_plane_min_cdclk;
15664         else if (IS_IVYBRIDGE(dev_priv))
15665                 plane->min_cdclk = ivb_plane_min_cdclk;
15666         else
15667                 plane->min_cdclk = i9xx_plane_min_cdclk;
15668
15669         plane->max_stride = i9xx_plane_max_stride;
15670         plane->update_plane = i9xx_update_plane;
15671         plane->disable_plane = i9xx_disable_plane;
15672         plane->get_hw_state = i9xx_plane_get_hw_state;
15673         plane->check_plane = i9xx_plane_check;
15674
15675         possible_crtcs = BIT(pipe);
15676
15677         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
15678                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15679                                                possible_crtcs, plane_funcs,
15680                                                formats, num_formats,
15681                                                i9xx_format_modifiers,
15682                                                DRM_PLANE_TYPE_PRIMARY,
15683                                                "primary %c", pipe_name(pipe));
15684         else
15685                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15686                                                possible_crtcs, plane_funcs,
15687                                                formats, num_formats,
15688                                                i9xx_format_modifiers,
15689                                                DRM_PLANE_TYPE_PRIMARY,
15690                                                "plane %c",
15691                                                plane_name(plane->i9xx_plane));
15692         if (ret)
15693                 goto fail;
15694
15695         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
15696                 supported_rotations =
15697                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
15698                         DRM_MODE_REFLECT_X;
15699         } else if (INTEL_GEN(dev_priv) >= 4) {
15700                 supported_rotations =
15701                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
15702         } else {
15703                 supported_rotations = DRM_MODE_ROTATE_0;
15704         }
15705
15706         if (INTEL_GEN(dev_priv) >= 4)
15707                 drm_plane_create_rotation_property(&plane->base,
15708                                                    DRM_MODE_ROTATE_0,
15709                                                    supported_rotations);
15710
15711         zpos = 0;
15712         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
15713
15714         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
15715
15716         return plane;
15717
15718 fail:
15719         intel_plane_free(plane);
15720
15721         return ERR_PTR(ret);
15722 }
15723
15724 static struct intel_plane *
15725 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
15726                           enum pipe pipe)
15727 {
15728         unsigned int possible_crtcs;
15729         struct intel_plane *cursor;
15730         int ret, zpos;
15731
15732         cursor = intel_plane_alloc();
15733         if (IS_ERR(cursor))
15734                 return cursor;
15735
15736         cursor->pipe = pipe;
15737         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
15738         cursor->id = PLANE_CURSOR;
15739         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
15740
15741         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15742                 cursor->max_stride = i845_cursor_max_stride;
15743                 cursor->update_plane = i845_update_cursor;
15744                 cursor->disable_plane = i845_disable_cursor;
15745                 cursor->get_hw_state = i845_cursor_get_hw_state;
15746                 cursor->check_plane = i845_check_cursor;
15747         } else {
15748                 cursor->max_stride = i9xx_cursor_max_stride;
15749                 cursor->update_plane = i9xx_update_cursor;
15750                 cursor->disable_plane = i9xx_disable_cursor;
15751                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
15752                 cursor->check_plane = i9xx_check_cursor;
15753         }
15754
15755         cursor->cursor.base = ~0;
15756         cursor->cursor.cntl = ~0;
15757
15758         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
15759                 cursor->cursor.size = ~0;
15760
15761         possible_crtcs = BIT(pipe);
15762
15763         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
15764                                        possible_crtcs, &intel_cursor_plane_funcs,
15765                                        intel_cursor_formats,
15766                                        ARRAY_SIZE(intel_cursor_formats),
15767                                        cursor_format_modifiers,
15768                                        DRM_PLANE_TYPE_CURSOR,
15769                                        "cursor %c", pipe_name(pipe));
15770         if (ret)
15771                 goto fail;
15772
15773         if (INTEL_GEN(dev_priv) >= 4)
15774                 drm_plane_create_rotation_property(&cursor->base,
15775                                                    DRM_MODE_ROTATE_0,
15776                                                    DRM_MODE_ROTATE_0 |
15777                                                    DRM_MODE_ROTATE_180);
15778
15779         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
15780         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
15781
15782         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15783
15784         return cursor;
15785
15786 fail:
15787         intel_plane_free(cursor);
15788
15789         return ERR_PTR(ret);
15790 }
15791
15792 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
15793                                     struct intel_crtc_state *crtc_state)
15794 {
15795         struct intel_crtc_scaler_state *scaler_state =
15796                 &crtc_state->scaler_state;
15797         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15798         int i;
15799
15800         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
15801         if (!crtc->num_scalers)
15802                 return;
15803
15804         for (i = 0; i < crtc->num_scalers; i++) {
15805                 struct intel_scaler *scaler = &scaler_state->scalers[i];
15806
15807                 scaler->in_use = 0;
15808                 scaler->mode = 0;
15809         }
15810
15811         scaler_state->scaler_id = -1;
15812 }
15813
15814 #define INTEL_CRTC_FUNCS \
15815         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
15816         .set_config = drm_atomic_helper_set_config, \
15817         .destroy = intel_crtc_destroy, \
15818         .page_flip = drm_atomic_helper_page_flip, \
15819         .atomic_duplicate_state = intel_crtc_duplicate_state, \
15820         .atomic_destroy_state = intel_crtc_destroy_state, \
15821         .set_crc_source = intel_crtc_set_crc_source, \
15822         .verify_crc_source = intel_crtc_verify_crc_source, \
15823         .get_crc_sources = intel_crtc_get_crc_sources
15824
15825 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15826         INTEL_CRTC_FUNCS,
15827
15828         .get_vblank_counter = g4x_get_vblank_counter,
15829         .enable_vblank = bdw_enable_vblank,
15830         .disable_vblank = bdw_disable_vblank,
15831 };
15832
15833 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15834         INTEL_CRTC_FUNCS,
15835
15836         .get_vblank_counter = g4x_get_vblank_counter,
15837         .enable_vblank = ilk_enable_vblank,
15838         .disable_vblank = ilk_disable_vblank,
15839 };
15840
15841 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15842         INTEL_CRTC_FUNCS,
15843
15844         .get_vblank_counter = g4x_get_vblank_counter,
15845         .enable_vblank = i965_enable_vblank,
15846         .disable_vblank = i965_disable_vblank,
15847 };
15848
15849 static const struct drm_crtc_funcs i965_crtc_funcs = {
15850         INTEL_CRTC_FUNCS,
15851
15852         .get_vblank_counter = i915_get_vblank_counter,
15853         .enable_vblank = i965_enable_vblank,
15854         .disable_vblank = i965_disable_vblank,
15855 };
15856
15857 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15858         INTEL_CRTC_FUNCS,
15859
15860         .get_vblank_counter = i915_get_vblank_counter,
15861         .enable_vblank = i915gm_enable_vblank,
15862         .disable_vblank = i915gm_disable_vblank,
15863 };
15864
15865 static const struct drm_crtc_funcs i915_crtc_funcs = {
15866         INTEL_CRTC_FUNCS,
15867
15868         .get_vblank_counter = i915_get_vblank_counter,
15869         .enable_vblank = i8xx_enable_vblank,
15870         .disable_vblank = i8xx_disable_vblank,
15871 };
15872
15873 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15874         INTEL_CRTC_FUNCS,
15875
15876         /* no hw vblank counter */
15877         .enable_vblank = i8xx_enable_vblank,
15878         .disable_vblank = i8xx_disable_vblank,
15879 };
15880
15881 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15882 {
15883         const struct drm_crtc_funcs *funcs;
15884         struct intel_crtc *intel_crtc;
15885         struct intel_crtc_state *crtc_state = NULL;
15886         struct intel_plane *primary = NULL;
15887         struct intel_plane *cursor = NULL;
15888         int sprite, ret;
15889
15890         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
15891         if (!intel_crtc)
15892                 return -ENOMEM;
15893
15894         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
15895         if (!crtc_state) {
15896                 ret = -ENOMEM;
15897                 goto fail;
15898         }
15899         __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->uapi);
15900         intel_crtc->config = crtc_state;
15901
15902         primary = intel_primary_plane_create(dev_priv, pipe);
15903         if (IS_ERR(primary)) {
15904                 ret = PTR_ERR(primary);
15905                 goto fail;
15906         }
15907         intel_crtc->plane_ids_mask |= BIT(primary->id);
15908
15909         for_each_sprite(dev_priv, pipe, sprite) {
15910                 struct intel_plane *plane;
15911
15912                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15913                 if (IS_ERR(plane)) {
15914                         ret = PTR_ERR(plane);
15915                         goto fail;
15916                 }
15917                 intel_crtc->plane_ids_mask |= BIT(plane->id);
15918         }
15919
15920         cursor = intel_cursor_plane_create(dev_priv, pipe);
15921         if (IS_ERR(cursor)) {
15922                 ret = PTR_ERR(cursor);
15923                 goto fail;
15924         }
15925         intel_crtc->plane_ids_mask |= BIT(cursor->id);
15926
15927         if (HAS_GMCH(dev_priv)) {
15928                 if (IS_CHERRYVIEW(dev_priv) ||
15929                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15930                         funcs = &g4x_crtc_funcs;
15931                 else if (IS_GEN(dev_priv, 4))
15932                         funcs = &i965_crtc_funcs;
15933                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
15934                         funcs = &i915gm_crtc_funcs;
15935                 else if (IS_GEN(dev_priv, 3))
15936                         funcs = &i915_crtc_funcs;
15937                 else
15938                         funcs = &i8xx_crtc_funcs;
15939         } else {
15940                 if (INTEL_GEN(dev_priv) >= 8)
15941                         funcs = &bdw_crtc_funcs;
15942                 else
15943                         funcs = &ilk_crtc_funcs;
15944         }
15945
15946         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
15947                                         &primary->base, &cursor->base,
15948                                         funcs, "pipe %c", pipe_name(pipe));
15949         if (ret)
15950                 goto fail;
15951
15952         intel_crtc->pipe = pipe;
15953
15954         /* initialize shared scalers */
15955         intel_crtc_init_scalers(intel_crtc, crtc_state);
15956
15957         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15958                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15959         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
15960
15961         if (INTEL_GEN(dev_priv) < 9) {
15962                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15963
15964                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15965                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15966                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
15967         }
15968
15969         intel_color_init(intel_crtc);
15970
15971         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15972
15973         return 0;
15974
15975 fail:
15976         /*
15977          * drm_mode_config_cleanup() will free up any
15978          * crtcs/planes already initialized.
15979          */
15980         kfree(crtc_state);
15981         kfree(intel_crtc);
15982
15983         return ret;
15984 }
15985
15986 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15987                                       struct drm_file *file)
15988 {
15989         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15990         struct drm_crtc *drmmode_crtc;
15991         struct intel_crtc *crtc;
15992
15993         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15994         if (!drmmode_crtc)
15995                 return -ENOENT;
15996
15997         crtc = to_intel_crtc(drmmode_crtc);
15998         pipe_from_crtc_id->pipe = crtc->pipe;
15999
16000         return 0;
16001 }
16002
16003 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16004 {
16005         struct drm_device *dev = encoder->base.dev;
16006         struct intel_encoder *source_encoder;
16007         u32 possible_clones = 0;
16008
16009         for_each_intel_encoder(dev, source_encoder) {
16010                 if (encoders_cloneable(encoder, source_encoder))
16011                         possible_clones |= drm_encoder_mask(&source_encoder->base);
16012         }
16013
16014         return possible_clones;
16015 }
16016
16017 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16018 {
16019         struct drm_device *dev = encoder->base.dev;
16020         struct intel_crtc *crtc;
16021         u32 possible_crtcs = 0;
16022
16023         for_each_intel_crtc(dev, crtc) {
16024                 if (encoder->pipe_mask & BIT(crtc->pipe))
16025                         possible_crtcs |= drm_crtc_mask(&crtc->base);
16026         }
16027
16028         return possible_crtcs;
16029 }
16030
16031 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16032 {
16033         if (!IS_MOBILE(dev_priv))
16034                 return false;
16035
16036         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
16037                 return false;
16038
16039         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
16040                 return false;
16041
16042         return true;
16043 }
16044
16045 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16046 {
16047         if (INTEL_GEN(dev_priv) >= 9)
16048                 return false;
16049
16050         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16051                 return false;
16052
16053         if (HAS_PCH_LPT_H(dev_priv) &&
16054             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16055                 return false;
16056
16057         /* DDI E can't be used if DDI A requires 4 lanes */
16058         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16059                 return false;
16060
16061         if (!dev_priv->vbt.int_crt_support)
16062                 return false;
16063
16064         return true;
16065 }
16066
16067 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16068 {
16069         int pps_num;
16070         int pps_idx;
16071
16072         if (HAS_DDI(dev_priv))
16073                 return;
16074         /*
16075          * This w/a is needed at least on CPT/PPT, but to be sure apply it
16076          * everywhere where registers can be write protected.
16077          */
16078         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16079                 pps_num = 2;
16080         else
16081                 pps_num = 1;
16082
16083         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16084                 u32 val = I915_READ(PP_CONTROL(pps_idx));
16085
16086                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16087                 I915_WRITE(PP_CONTROL(pps_idx), val);
16088         }
16089 }
16090
16091 static void intel_pps_init(struct drm_i915_private *dev_priv)
16092 {
16093         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16094                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16095         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16096                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16097         else
16098                 dev_priv->pps_mmio_base = PPS_BASE;
16099
16100         intel_pps_unlock_regs_wa(dev_priv);
16101 }
16102
16103 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16104 {
16105         struct intel_encoder *encoder;
16106         bool dpd_is_edp = false;
16107
16108         intel_pps_init(dev_priv);
16109
16110         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16111                 return;
16112
16113         if (INTEL_GEN(dev_priv) >= 12) {
16114                 intel_ddi_init(dev_priv, PORT_A);
16115                 intel_ddi_init(dev_priv, PORT_B);
16116                 intel_ddi_init(dev_priv, PORT_D);
16117                 intel_ddi_init(dev_priv, PORT_E);
16118                 intel_ddi_init(dev_priv, PORT_F);
16119                 intel_ddi_init(dev_priv, PORT_G);
16120                 intel_ddi_init(dev_priv, PORT_H);
16121                 intel_ddi_init(dev_priv, PORT_I);
16122                 icl_dsi_init(dev_priv);
16123         } else if (IS_ELKHARTLAKE(dev_priv)) {
16124                 intel_ddi_init(dev_priv, PORT_A);
16125                 intel_ddi_init(dev_priv, PORT_B);
16126                 intel_ddi_init(dev_priv, PORT_C);
16127                 intel_ddi_init(dev_priv, PORT_D);
16128                 icl_dsi_init(dev_priv);
16129         } else if (IS_GEN(dev_priv, 11)) {
16130                 intel_ddi_init(dev_priv, PORT_A);
16131                 intel_ddi_init(dev_priv, PORT_B);
16132                 intel_ddi_init(dev_priv, PORT_C);
16133                 intel_ddi_init(dev_priv, PORT_D);
16134                 intel_ddi_init(dev_priv, PORT_E);
16135                 /*
16136                  * On some ICL SKUs port F is not present. No strap bits for
16137                  * this, so rely on VBT.
16138                  * Work around broken VBTs on SKUs known to have no port F.
16139                  */
16140                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16141                     intel_bios_is_port_present(dev_priv, PORT_F))
16142                         intel_ddi_init(dev_priv, PORT_F);
16143
16144                 icl_dsi_init(dev_priv);
16145         } else if (IS_GEN9_LP(dev_priv)) {
16146                 /*
16147                  * FIXME: Broxton doesn't support port detection via the
16148                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16149                  * detect the ports.
16150                  */
16151                 intel_ddi_init(dev_priv, PORT_A);
16152                 intel_ddi_init(dev_priv, PORT_B);
16153                 intel_ddi_init(dev_priv, PORT_C);
16154
16155                 vlv_dsi_init(dev_priv);
16156         } else if (HAS_DDI(dev_priv)) {
16157                 int found;
16158
16159                 if (intel_ddi_crt_present(dev_priv))
16160                         intel_crt_init(dev_priv);
16161
16162                 /*
16163                  * Haswell uses DDI functions to detect digital outputs.
16164                  * On SKL pre-D0 the strap isn't connected, so we assume
16165                  * it's there.
16166                  */
16167                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16168                 /* WaIgnoreDDIAStrap: skl */
16169                 if (found || IS_GEN9_BC(dev_priv))
16170                         intel_ddi_init(dev_priv, PORT_A);
16171
16172                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16173                  * register */
16174                 found = I915_READ(SFUSE_STRAP);
16175
16176                 if (found & SFUSE_STRAP_DDIB_DETECTED)
16177                         intel_ddi_init(dev_priv, PORT_B);
16178                 if (found & SFUSE_STRAP_DDIC_DETECTED)
16179                         intel_ddi_init(dev_priv, PORT_C);
16180                 if (found & SFUSE_STRAP_DDID_DETECTED)
16181                         intel_ddi_init(dev_priv, PORT_D);
16182                 if (found & SFUSE_STRAP_DDIF_DETECTED)
16183                         intel_ddi_init(dev_priv, PORT_F);
16184                 /*
16185                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16186                  */
16187                 if (IS_GEN9_BC(dev_priv) &&
16188                     intel_bios_is_port_present(dev_priv, PORT_E))
16189                         intel_ddi_init(dev_priv, PORT_E);
16190
16191         } else if (HAS_PCH_SPLIT(dev_priv)) {
16192                 int found;
16193
16194                 /*
16195                  * intel_edp_init_connector() depends on this completing first,
16196                  * to prevent the registration of both eDP and LVDS and the
16197                  * incorrect sharing of the PPS.
16198                  */
16199                 intel_lvds_init(dev_priv);
16200                 intel_crt_init(dev_priv);
16201
16202                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16203
16204                 if (ilk_has_edp_a(dev_priv))
16205                         intel_dp_init(dev_priv, DP_A, PORT_A);
16206
16207                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
16208                         /* PCH SDVOB multiplex with HDMIB */
16209                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16210                         if (!found)
16211                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16212                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
16213                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16214                 }
16215
16216                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
16217                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16218
16219                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
16220                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16221
16222                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
16223                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16224
16225                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
16226                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16227         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16228                 bool has_edp, has_port;
16229
16230                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16231                         intel_crt_init(dev_priv);
16232
16233                 /*
16234                  * The DP_DETECTED bit is the latched state of the DDC
16235                  * SDA pin at boot. However since eDP doesn't require DDC
16236                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
16237                  * eDP ports may have been muxed to an alternate function.
16238                  * Thus we can't rely on the DP_DETECTED bit alone to detect
16239                  * eDP ports. Consult the VBT as well as DP_DETECTED to
16240                  * detect eDP ports.
16241                  *
16242                  * Sadly the straps seem to be missing sometimes even for HDMI
16243                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16244                  * and VBT for the presence of the port. Additionally we can't
16245                  * trust the port type the VBT declares as we've seen at least
16246                  * HDMI ports that the VBT claim are DP or eDP.
16247                  */
16248                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16249                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16250                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
16251                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16252                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16253                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16254
16255                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16256                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16257                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
16258                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16259                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16260                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16261
16262                 if (IS_CHERRYVIEW(dev_priv)) {
16263                         /*
16264                          * eDP not supported on port D,
16265                          * so no need to worry about it
16266                          */
16267                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16268                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
16269                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16270                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
16271                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16272                 }
16273
16274                 vlv_dsi_init(dev_priv);
16275         } else if (IS_PINEVIEW(dev_priv)) {
16276                 intel_lvds_init(dev_priv);
16277                 intel_crt_init(dev_priv);
16278         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16279                 bool found = false;
16280
16281                 if (IS_MOBILE(dev_priv))
16282                         intel_lvds_init(dev_priv);
16283
16284                 intel_crt_init(dev_priv);
16285
16286                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16287                         DRM_DEBUG_KMS("probing SDVOB\n");
16288                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16289                         if (!found && IS_G4X(dev_priv)) {
16290                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
16291                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16292                         }
16293
16294                         if (!found && IS_G4X(dev_priv))
16295                                 intel_dp_init(dev_priv, DP_B, PORT_B);
16296                 }
16297
16298                 /* Before G4X SDVOC doesn't have its own detect register */
16299
16300                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16301                         DRM_DEBUG_KMS("probing SDVOC\n");
16302                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16303                 }
16304
16305                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
16306
16307                         if (IS_G4X(dev_priv)) {
16308                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
16309                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16310                         }
16311                         if (IS_G4X(dev_priv))
16312                                 intel_dp_init(dev_priv, DP_C, PORT_C);
16313                 }
16314
16315                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
16316                         intel_dp_init(dev_priv, DP_D, PORT_D);
16317
16318                 if (SUPPORTS_TV(dev_priv))
16319                         intel_tv_init(dev_priv);
16320         } else if (IS_GEN(dev_priv, 2)) {
16321                 if (IS_I85X(dev_priv))
16322                         intel_lvds_init(dev_priv);
16323
16324                 intel_crt_init(dev_priv);
16325                 intel_dvo_init(dev_priv);
16326         }
16327
16328         intel_psr_init(dev_priv);
16329
16330         for_each_intel_encoder(&dev_priv->drm, encoder) {
16331                 encoder->base.possible_crtcs =
16332                         intel_encoder_possible_crtcs(encoder);
16333                 encoder->base.possible_clones =
16334                         intel_encoder_possible_clones(encoder);
16335         }
16336
16337         intel_init_pch_refclk(dev_priv);
16338
16339         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16340 }
16341
16342 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16343 {
16344         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16345
16346         drm_framebuffer_cleanup(fb);
16347         intel_frontbuffer_put(intel_fb->frontbuffer);
16348
16349         kfree(intel_fb);
16350 }
16351
16352 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16353                                                 struct drm_file *file,
16354                                                 unsigned int *handle)
16355 {
16356         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16357
16358         if (obj->userptr.mm) {
16359                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
16360                 return -EINVAL;
16361         }
16362
16363         return drm_gem_handle_create(file, &obj->base, handle);
16364 }
16365
16366 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16367                                         struct drm_file *file,
16368                                         unsigned flags, unsigned color,
16369                                         struct drm_clip_rect *clips,
16370                                         unsigned num_clips)
16371 {
16372         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16373
16374         i915_gem_object_flush_if_display(obj);
16375         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16376
16377         return 0;
16378 }
16379
16380 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16381         .destroy = intel_user_framebuffer_destroy,
16382         .create_handle = intel_user_framebuffer_create_handle,
16383         .dirty = intel_user_framebuffer_dirty,
16384 };
16385
16386 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16387                                   struct drm_i915_gem_object *obj,
16388                                   struct drm_mode_fb_cmd2 *mode_cmd)
16389 {
16390         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16391         struct drm_framebuffer *fb = &intel_fb->base;
16392         u32 max_stride;
16393         unsigned int tiling, stride;
16394         int ret = -EINVAL;
16395         int i;
16396
16397         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16398         if (!intel_fb->frontbuffer)
16399                 return -ENOMEM;
16400
16401         i915_gem_object_lock(obj);
16402         tiling = i915_gem_object_get_tiling(obj);
16403         stride = i915_gem_object_get_stride(obj);
16404         i915_gem_object_unlock(obj);
16405
16406         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16407                 /*
16408                  * If there's a fence, enforce that
16409                  * the fb modifier and tiling mode match.
16410                  */
16411                 if (tiling != I915_TILING_NONE &&
16412                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16413                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
16414                         goto err;
16415                 }
16416         } else {
16417                 if (tiling == I915_TILING_X) {
16418                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16419                 } else if (tiling == I915_TILING_Y) {
16420                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
16421                         goto err;
16422                 }
16423         }
16424
16425         if (!drm_any_plane_has_format(&dev_priv->drm,
16426                                       mode_cmd->pixel_format,
16427                                       mode_cmd->modifier[0])) {
16428                 struct drm_format_name_buf format_name;
16429
16430                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
16431                               drm_get_format_name(mode_cmd->pixel_format,
16432                                                   &format_name),
16433                               mode_cmd->modifier[0]);
16434                 goto err;
16435         }
16436
16437         /*
16438          * gen2/3 display engine uses the fence if present,
16439          * so the tiling mode must match the fb modifier exactly.
16440          */
16441         if (INTEL_GEN(dev_priv) < 4 &&
16442             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16443                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
16444                 goto err;
16445         }
16446
16447         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16448                                          mode_cmd->modifier[0]);
16449         if (mode_cmd->pitches[0] > max_stride) {
16450                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
16451                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16452                               "tiled" : "linear",
16453                               mode_cmd->pitches[0], max_stride);
16454                 goto err;
16455         }
16456
16457         /*
16458          * If there's a fence, enforce that
16459          * the fb pitch and fence stride match.
16460          */
16461         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16462                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
16463                               mode_cmd->pitches[0], stride);
16464                 goto err;
16465         }
16466
16467         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16468         if (mode_cmd->offsets[0] != 0)
16469                 goto err;
16470
16471         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16472
16473         for (i = 0; i < fb->format->num_planes; i++) {
16474                 u32 stride_alignment;
16475
16476                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16477                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
16478                         goto err;
16479                 }
16480
16481                 stride_alignment = intel_fb_stride_alignment(fb, i);
16482
16483                 /*
16484                  * Display WA #0531: skl,bxt,kbl,glk
16485                  *
16486                  * Render decompression and plane width > 3840
16487                  * combined with horizontal panning requires the
16488                  * plane stride to be a multiple of 4. We'll just
16489                  * require the entire fb to accommodate that to avoid
16490                  * potential runtime errors at plane configuration time.
16491                  */
16492                 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
16493                     is_ccs_modifier(fb->modifier))
16494                         stride_alignment *= 4;
16495
16496                 if (fb->pitches[i] & (stride_alignment - 1)) {
16497                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
16498                                       i, fb->pitches[i], stride_alignment);
16499                         goto err;
16500                 }
16501
16502                 fb->obj[i] = &obj->base;
16503         }
16504
16505         ret = intel_fill_fb_info(dev_priv, fb);
16506         if (ret)
16507                 goto err;
16508
16509         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16510         if (ret) {
16511                 DRM_ERROR("framebuffer init failed %d\n", ret);
16512                 goto err;
16513         }
16514
16515         return 0;
16516
16517 err:
16518         intel_frontbuffer_put(intel_fb->frontbuffer);
16519         return ret;
16520 }
16521
16522 static struct drm_framebuffer *
16523 intel_user_framebuffer_create(struct drm_device *dev,
16524                               struct drm_file *filp,
16525                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
16526 {
16527         struct drm_framebuffer *fb;
16528         struct drm_i915_gem_object *obj;
16529         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16530
16531         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16532         if (!obj)
16533                 return ERR_PTR(-ENOENT);
16534
16535         fb = intel_framebuffer_create(obj, &mode_cmd);
16536         i915_gem_object_put(obj);
16537
16538         return fb;
16539 }
16540
16541 static void intel_atomic_state_free(struct drm_atomic_state *state)
16542 {
16543         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
16544
16545         drm_atomic_state_default_release(state);
16546
16547         i915_sw_fence_fini(&intel_state->commit_ready);
16548
16549         kfree(state);
16550 }
16551
16552 static enum drm_mode_status
16553 intel_mode_valid(struct drm_device *dev,
16554                  const struct drm_display_mode *mode)
16555 {
16556         struct drm_i915_private *dev_priv = to_i915(dev);
16557         int hdisplay_max, htotal_max;
16558         int vdisplay_max, vtotal_max;
16559
16560         /*
16561          * Can't reject DBLSCAN here because Xorg ddxen can add piles
16562          * of DBLSCAN modes to the output's mode list when they detect
16563          * the scaling mode property on the connector. And they don't
16564          * ask the kernel to validate those modes in any way until
16565          * modeset time at which point the client gets a protocol error.
16566          * So in order to not upset those clients we silently ignore the
16567          * DBLSCAN flag on such connectors. For other connectors we will
16568          * reject modes with the DBLSCAN flag in encoder->compute_config().
16569          * And we always reject DBLSCAN modes in connector->mode_valid()
16570          * as we never want such modes on the connector's mode list.
16571          */
16572
16573         if (mode->vscan > 1)
16574                 return MODE_NO_VSCAN;
16575
16576         if (mode->flags & DRM_MODE_FLAG_HSKEW)
16577                 return MODE_H_ILLEGAL;
16578
16579         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16580                            DRM_MODE_FLAG_NCSYNC |
16581                            DRM_MODE_FLAG_PCSYNC))
16582                 return MODE_HSYNC;
16583
16584         if (mode->flags & (DRM_MODE_FLAG_BCAST |
16585                            DRM_MODE_FLAG_PIXMUX |
16586                            DRM_MODE_FLAG_CLKDIV2))
16587                 return MODE_BAD;
16588
16589         /* Transcoder timing limits */
16590         if (INTEL_GEN(dev_priv) >= 11) {
16591                 hdisplay_max = 16384;
16592                 vdisplay_max = 8192;
16593                 htotal_max = 16384;
16594                 vtotal_max = 8192;
16595         } else if (INTEL_GEN(dev_priv) >= 9 ||
16596                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16597                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16598                 vdisplay_max = 4096;
16599                 htotal_max = 8192;
16600                 vtotal_max = 8192;
16601         } else if (INTEL_GEN(dev_priv) >= 3) {
16602                 hdisplay_max = 4096;
16603                 vdisplay_max = 4096;
16604                 htotal_max = 8192;
16605                 vtotal_max = 8192;
16606         } else {
16607                 hdisplay_max = 2048;
16608                 vdisplay_max = 2048;
16609                 htotal_max = 4096;
16610                 vtotal_max = 4096;
16611         }
16612
16613         if (mode->hdisplay > hdisplay_max ||
16614             mode->hsync_start > htotal_max ||
16615             mode->hsync_end > htotal_max ||
16616             mode->htotal > htotal_max)
16617                 return MODE_H_ILLEGAL;
16618
16619         if (mode->vdisplay > vdisplay_max ||
16620             mode->vsync_start > vtotal_max ||
16621             mode->vsync_end > vtotal_max ||
16622             mode->vtotal > vtotal_max)
16623                 return MODE_V_ILLEGAL;
16624
16625         if (INTEL_GEN(dev_priv) >= 5) {
16626                 if (mode->hdisplay < 64 ||
16627                     mode->htotal - mode->hdisplay < 32)
16628                         return MODE_H_ILLEGAL;
16629
16630                 if (mode->vtotal - mode->vdisplay < 5)
16631                         return MODE_V_ILLEGAL;
16632         } else {
16633                 if (mode->htotal - mode->hdisplay < 32)
16634                         return MODE_H_ILLEGAL;
16635
16636                 if (mode->vtotal - mode->vdisplay < 3)
16637                         return MODE_V_ILLEGAL;
16638         }
16639
16640         return MODE_OK;
16641 }
16642
16643 enum drm_mode_status
16644 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16645                                 const struct drm_display_mode *mode)
16646 {
16647         int plane_width_max, plane_height_max;
16648
16649         /*
16650          * intel_mode_valid() should be
16651          * sufficient on older platforms.
16652          */
16653         if (INTEL_GEN(dev_priv) < 9)
16654                 return MODE_OK;
16655
16656         /*
16657          * Most people will probably want a fullscreen
16658          * plane so let's not advertize modes that are
16659          * too big for that.
16660          */
16661         if (INTEL_GEN(dev_priv) >= 11) {
16662                 plane_width_max = 5120;
16663                 plane_height_max = 4320;
16664         } else {
16665                 plane_width_max = 5120;
16666                 plane_height_max = 4096;
16667         }
16668
16669         if (mode->hdisplay > plane_width_max)
16670                 return MODE_H_ILLEGAL;
16671
16672         if (mode->vdisplay > plane_height_max)
16673                 return MODE_V_ILLEGAL;
16674
16675         return MODE_OK;
16676 }
16677
16678 static const struct drm_mode_config_funcs intel_mode_funcs = {
16679         .fb_create = intel_user_framebuffer_create,
16680         .get_format_info = intel_get_format_info,
16681         .output_poll_changed = intel_fbdev_output_poll_changed,
16682         .mode_valid = intel_mode_valid,
16683         .atomic_check = intel_atomic_check,
16684         .atomic_commit = intel_atomic_commit,
16685         .atomic_state_alloc = intel_atomic_state_alloc,
16686         .atomic_state_clear = intel_atomic_state_clear,
16687         .atomic_state_free = intel_atomic_state_free,
16688 };
16689
16690 /**
16691  * intel_init_display_hooks - initialize the display modesetting hooks
16692  * @dev_priv: device private
16693  */
16694 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16695 {
16696         intel_init_cdclk_hooks(dev_priv);
16697
16698         if (INTEL_GEN(dev_priv) >= 9) {
16699                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16700                 dev_priv->display.get_initial_plane_config =
16701                         skylake_get_initial_plane_config;
16702                 dev_priv->display.crtc_compute_clock =
16703                         haswell_crtc_compute_clock;
16704                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16705                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16706         } else if (HAS_DDI(dev_priv)) {
16707                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16708                 dev_priv->display.get_initial_plane_config =
16709                         i9xx_get_initial_plane_config;
16710                 dev_priv->display.crtc_compute_clock =
16711                         haswell_crtc_compute_clock;
16712                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16713                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16714         } else if (HAS_PCH_SPLIT(dev_priv)) {
16715                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
16716                 dev_priv->display.get_initial_plane_config =
16717                         i9xx_get_initial_plane_config;
16718                 dev_priv->display.crtc_compute_clock =
16719                         ironlake_crtc_compute_clock;
16720                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
16721                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
16722         } else if (IS_CHERRYVIEW(dev_priv)) {
16723                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16724                 dev_priv->display.get_initial_plane_config =
16725                         i9xx_get_initial_plane_config;
16726                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16727                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16728                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16729         } else if (IS_VALLEYVIEW(dev_priv)) {
16730                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16731                 dev_priv->display.get_initial_plane_config =
16732                         i9xx_get_initial_plane_config;
16733                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16734                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16735                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16736         } else if (IS_G4X(dev_priv)) {
16737                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16738                 dev_priv->display.get_initial_plane_config =
16739                         i9xx_get_initial_plane_config;
16740                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16741                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16742                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16743         } else if (IS_PINEVIEW(dev_priv)) {
16744                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16745                 dev_priv->display.get_initial_plane_config =
16746                         i9xx_get_initial_plane_config;
16747                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16748                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16749                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16750         } else if (!IS_GEN(dev_priv, 2)) {
16751                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16752                 dev_priv->display.get_initial_plane_config =
16753                         i9xx_get_initial_plane_config;
16754                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16755                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16756                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16757         } else {
16758                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16759                 dev_priv->display.get_initial_plane_config =
16760                         i9xx_get_initial_plane_config;
16761                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16762                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16763                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16764         }
16765
16766         if (IS_GEN(dev_priv, 5)) {
16767                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
16768         } else if (IS_GEN(dev_priv, 6)) {
16769                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16770         } else if (IS_IVYBRIDGE(dev_priv)) {
16771                 /* FIXME: detect B0+ stepping and use auto training */
16772                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16773         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
16774                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
16775         }
16776
16777         if (INTEL_GEN(dev_priv) >= 9)
16778                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16779         else
16780                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16781
16782 }
16783
16784 void intel_modeset_init_hw(struct drm_i915_private *i915)
16785 {
16786         intel_update_cdclk(i915);
16787         intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
16788         i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
16789 }
16790
16791 /*
16792  * Calculate what we think the watermarks should be for the state we've read
16793  * out of the hardware and then immediately program those watermarks so that
16794  * we ensure the hardware settings match our internal state.
16795  *
16796  * We can calculate what we think WM's should be by creating a duplicate of the
16797  * current state (which was constructed during hardware readout) and running it
16798  * through the atomic check code to calculate new watermark values in the
16799  * state object.
16800  */
16801 static void sanitize_watermarks(struct drm_device *dev)
16802 {
16803         struct drm_i915_private *dev_priv = to_i915(dev);
16804         struct drm_atomic_state *state;
16805         struct intel_atomic_state *intel_state;
16806         struct intel_crtc *crtc;
16807         struct intel_crtc_state *crtc_state;
16808         struct drm_modeset_acquire_ctx ctx;
16809         int ret;
16810         int i;
16811
16812         /* Only supported on platforms that use atomic watermark design */
16813         if (!dev_priv->display.optimize_watermarks)
16814                 return;
16815
16816         /*
16817          * We need to hold connection_mutex before calling duplicate_state so
16818          * that the connector loop is protected.
16819          */
16820         drm_modeset_acquire_init(&ctx, 0);
16821 retry:
16822         ret = drm_modeset_lock_all_ctx(dev, &ctx);
16823         if (ret == -EDEADLK) {
16824                 drm_modeset_backoff(&ctx);
16825                 goto retry;
16826         } else if (WARN_ON(ret)) {
16827                 goto fail;
16828         }
16829
16830         state = drm_atomic_helper_duplicate_state(dev, &ctx);
16831         if (WARN_ON(IS_ERR(state)))
16832                 goto fail;
16833
16834         intel_state = to_intel_atomic_state(state);
16835
16836         /*
16837          * Hardware readout is the only time we don't want to calculate
16838          * intermediate watermarks (since we don't trust the current
16839          * watermarks).
16840          */
16841         if (!HAS_GMCH(dev_priv))
16842                 intel_state->skip_intermediate_wm = true;
16843
16844         ret = intel_atomic_check(dev, state);
16845         if (ret) {
16846                 /*
16847                  * If we fail here, it means that the hardware appears to be
16848                  * programmed in a way that shouldn't be possible, given our
16849                  * understanding of watermark requirements.  This might mean a
16850                  * mistake in the hardware readout code or a mistake in the
16851                  * watermark calculations for a given platform.  Raise a WARN
16852                  * so that this is noticeable.
16853                  *
16854                  * If this actually happens, we'll have to just leave the
16855                  * BIOS-programmed watermarks untouched and hope for the best.
16856                  */
16857                 WARN(true, "Could not determine valid watermarks for inherited state\n");
16858                 goto put_state;
16859         }
16860
16861         /* Write calculated watermark values back */
16862         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16863                 crtc_state->wm.need_postvbl_update = true;
16864                 dev_priv->display.optimize_watermarks(intel_state, crtc_state);
16865
16866                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16867         }
16868
16869 put_state:
16870         drm_atomic_state_put(state);
16871 fail:
16872         drm_modeset_drop_locks(&ctx);
16873         drm_modeset_acquire_fini(&ctx);
16874 }
16875
16876 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16877 {
16878         if (IS_GEN(dev_priv, 5)) {
16879                 u32 fdi_pll_clk =
16880                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16881
16882                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16883         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16884                 dev_priv->fdi_pll_freq = 270000;
16885         } else {
16886                 return;
16887         }
16888
16889         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16890 }
16891
16892 static int intel_initial_commit(struct drm_device *dev)
16893 {
16894         struct drm_atomic_state *state = NULL;
16895         struct drm_modeset_acquire_ctx ctx;
16896         struct intel_crtc *crtc;
16897         int ret = 0;
16898
16899         state = drm_atomic_state_alloc(dev);
16900         if (!state)
16901                 return -ENOMEM;
16902
16903         drm_modeset_acquire_init(&ctx, 0);
16904
16905 retry:
16906         state->acquire_ctx = &ctx;
16907
16908         for_each_intel_crtc(dev, crtc) {
16909                 struct intel_crtc_state *crtc_state =
16910                         intel_atomic_get_crtc_state(state, crtc);
16911
16912                 if (IS_ERR(crtc_state)) {
16913                         ret = PTR_ERR(crtc_state);
16914                         goto out;
16915                 }
16916
16917                 if (crtc_state->hw.active) {
16918                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
16919                         if (ret)
16920                                 goto out;
16921
16922                         /*
16923                          * FIXME hack to force a LUT update to avoid the
16924                          * plane update forcing the pipe gamma on without
16925                          * having a proper LUT loaded. Remove once we
16926                          * have readout for pipe gamma enable.
16927                          */
16928                         crtc_state->uapi.color_mgmt_changed = true;
16929                 }
16930         }
16931
16932         ret = drm_atomic_commit(state);
16933
16934 out:
16935         if (ret == -EDEADLK) {
16936                 drm_atomic_state_clear(state);
16937                 drm_modeset_backoff(&ctx);
16938                 goto retry;
16939         }
16940
16941         drm_atomic_state_put(state);
16942
16943         drm_modeset_drop_locks(&ctx);
16944         drm_modeset_acquire_fini(&ctx);
16945
16946         return ret;
16947 }
16948
16949 static void intel_mode_config_init(struct drm_i915_private *i915)
16950 {
16951         struct drm_mode_config *mode_config = &i915->drm.mode_config;
16952
16953         drm_mode_config_init(&i915->drm);
16954
16955         mode_config->min_width = 0;
16956         mode_config->min_height = 0;
16957
16958         mode_config->preferred_depth = 24;
16959         mode_config->prefer_shadow = 1;
16960
16961         mode_config->allow_fb_modifiers = true;
16962
16963         mode_config->funcs = &intel_mode_funcs;
16964
16965         /*
16966          * Maximum framebuffer dimensions, chosen to match
16967          * the maximum render engine surface size on gen4+.
16968          */
16969         if (INTEL_GEN(i915) >= 7) {
16970                 mode_config->max_width = 16384;
16971                 mode_config->max_height = 16384;
16972         } else if (INTEL_GEN(i915) >= 4) {
16973                 mode_config->max_width = 8192;
16974                 mode_config->max_height = 8192;
16975         } else if (IS_GEN(i915, 3)) {
16976                 mode_config->max_width = 4096;
16977                 mode_config->max_height = 4096;
16978         } else {
16979                 mode_config->max_width = 2048;
16980                 mode_config->max_height = 2048;
16981         }
16982
16983         if (IS_I845G(i915) || IS_I865G(i915)) {
16984                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
16985                 mode_config->cursor_height = 1023;
16986         } else if (IS_GEN(i915, 2)) {
16987                 mode_config->cursor_width = 64;
16988                 mode_config->cursor_height = 64;
16989         } else {
16990                 mode_config->cursor_width = 256;
16991                 mode_config->cursor_height = 256;
16992         }
16993 }
16994
16995 int intel_modeset_init(struct drm_i915_private *i915)
16996 {
16997         struct drm_device *dev = &i915->drm;
16998         enum pipe pipe;
16999         struct intel_crtc *crtc;
17000         int ret;
17001
17002         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17003         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17004                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17005
17006         intel_mode_config_init(i915);
17007
17008         ret = intel_bw_init(i915);
17009         if (ret)
17010                 return ret;
17011
17012         init_llist_head(&i915->atomic_helper.free_list);
17013         INIT_WORK(&i915->atomic_helper.free_work,
17014                   intel_atomic_helper_free_state_worker);
17015
17016         intel_init_quirks(i915);
17017
17018         intel_fbc_init(i915);
17019
17020         intel_init_pm(i915);
17021
17022         intel_panel_sanitize_ssc(i915);
17023
17024         intel_gmbus_setup(i915);
17025
17026         DRM_DEBUG_KMS("%d display pipe%s available.\n",
17027                       INTEL_NUM_PIPES(i915),
17028                       INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17029
17030         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
17031                 for_each_pipe(i915, pipe) {
17032                         ret = intel_crtc_init(i915, pipe);
17033                         if (ret) {
17034                                 drm_mode_config_cleanup(dev);
17035                                 return ret;
17036                         }
17037                 }
17038         }
17039
17040         intel_shared_dpll_init(dev);
17041         intel_update_fdi_pll_freq(i915);
17042
17043         intel_update_czclk(i915);
17044         intel_modeset_init_hw(i915);
17045
17046         intel_hdcp_component_init(i915);
17047
17048         if (i915->max_cdclk_freq == 0)
17049                 intel_update_max_cdclk(i915);
17050
17051         /* Just disable it once at startup */
17052         intel_vga_disable(i915);
17053         intel_setup_outputs(i915);
17054
17055         drm_modeset_lock_all(dev);
17056         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17057         drm_modeset_unlock_all(dev);
17058
17059         for_each_intel_crtc(dev, crtc) {
17060                 struct intel_initial_plane_config plane_config = {};
17061
17062                 if (!crtc->active)
17063                         continue;
17064
17065                 /*
17066                  * Note that reserving the BIOS fb up front prevents us
17067                  * from stuffing other stolen allocations like the ring
17068                  * on top.  This prevents some ugliness at boot time, and
17069                  * can even allow for smooth boot transitions if the BIOS
17070                  * fb is large enough for the active pipe configuration.
17071                  */
17072                 i915->display.get_initial_plane_config(crtc, &plane_config);
17073
17074                 /*
17075                  * If the fb is shared between multiple heads, we'll
17076                  * just get the first one.
17077                  */
17078                 intel_find_initial_plane_obj(crtc, &plane_config);
17079         }
17080
17081         /*
17082          * Make sure hardware watermarks really match the state we read out.
17083          * Note that we need to do this after reconstructing the BIOS fb's
17084          * since the watermark calculation done here will use pstate->fb.
17085          */
17086         if (!HAS_GMCH(i915))
17087                 sanitize_watermarks(dev);
17088
17089         /*
17090          * Force all active planes to recompute their states. So that on
17091          * mode_setcrtc after probe, all the intel_plane_state variables
17092          * are already calculated and there is no assert_plane warnings
17093          * during bootup.
17094          */
17095         ret = intel_initial_commit(dev);
17096         if (ret)
17097                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
17098
17099         return 0;
17100 }
17101
17102 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17103 {
17104         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17105         /* 640x480@60Hz, ~25175 kHz */
17106         struct dpll clock = {
17107                 .m1 = 18,
17108                 .m2 = 7,
17109                 .p1 = 13,
17110                 .p2 = 4,
17111                 .n = 2,
17112         };
17113         u32 dpll, fp;
17114         int i;
17115
17116         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
17117
17118         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17119                       pipe_name(pipe), clock.vco, clock.dot);
17120
17121         fp = i9xx_dpll_compute_fp(&clock);
17122         dpll = DPLL_DVO_2X_MODE |
17123                 DPLL_VGA_MODE_DIS |
17124                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17125                 PLL_P2_DIVIDE_BY_4 |
17126                 PLL_REF_INPUT_DREFCLK |
17127                 DPLL_VCO_ENABLE;
17128
17129         I915_WRITE(FP0(pipe), fp);
17130         I915_WRITE(FP1(pipe), fp);
17131
17132         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17133         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17134         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17135         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17136         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17137         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17138         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17139
17140         /*
17141          * Apparently we need to have VGA mode enabled prior to changing
17142          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17143          * dividers, even though the register value does change.
17144          */
17145         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17146         I915_WRITE(DPLL(pipe), dpll);
17147
17148         /* Wait for the clocks to stabilize. */
17149         POSTING_READ(DPLL(pipe));
17150         udelay(150);
17151
17152         /* The pixel multiplier can only be updated once the
17153          * DPLL is enabled and the clocks are stable.
17154          *
17155          * So write it again.
17156          */
17157         I915_WRITE(DPLL(pipe), dpll);
17158
17159         /* We do this three times for luck */
17160         for (i = 0; i < 3 ; i++) {
17161                 I915_WRITE(DPLL(pipe), dpll);
17162                 POSTING_READ(DPLL(pipe));
17163                 udelay(150); /* wait for warmup */
17164         }
17165
17166         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17167         POSTING_READ(PIPECONF(pipe));
17168
17169         intel_wait_for_pipe_scanline_moving(crtc);
17170 }
17171
17172 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17173 {
17174         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17175
17176         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
17177                       pipe_name(pipe));
17178
17179         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
17180         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
17181         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
17182         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
17183         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
17184
17185         I915_WRITE(PIPECONF(pipe), 0);
17186         POSTING_READ(PIPECONF(pipe));
17187
17188         intel_wait_for_pipe_scanline_stopped(crtc);
17189
17190         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
17191         POSTING_READ(DPLL(pipe));
17192 }
17193
17194 static void
17195 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17196 {
17197         struct intel_crtc *crtc;
17198
17199         if (INTEL_GEN(dev_priv) >= 4)
17200                 return;
17201
17202         for_each_intel_crtc(&dev_priv->drm, crtc) {
17203                 struct intel_plane *plane =
17204                         to_intel_plane(crtc->base.primary);
17205                 struct intel_crtc *plane_crtc;
17206                 enum pipe pipe;
17207
17208                 if (!plane->get_hw_state(plane, &pipe))
17209                         continue;
17210
17211                 if (pipe == crtc->pipe)
17212                         continue;
17213
17214                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17215                               plane->base.base.id, plane->base.name);
17216
17217                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17218                 intel_plane_disable_noatomic(plane_crtc, plane);
17219         }
17220 }
17221
17222 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17223 {
17224         struct drm_device *dev = crtc->base.dev;
17225         struct intel_encoder *encoder;
17226
17227         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17228                 return true;
17229
17230         return false;
17231 }
17232
17233 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17234 {
17235         struct drm_device *dev = encoder->base.dev;
17236         struct intel_connector *connector;
17237
17238         for_each_connector_on_encoder(dev, &encoder->base, connector)
17239                 return connector;
17240
17241         return NULL;
17242 }
17243
17244 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17245                               enum pipe pch_transcoder)
17246 {
17247         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17248                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17249 }
17250
17251 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
17252 {
17253         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
17254         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
17255         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17256
17257         if (INTEL_GEN(dev_priv) >= 9 ||
17258             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17259                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
17260                 u32 val;
17261
17262                 if (transcoder_is_dsi(cpu_transcoder))
17263                         return;
17264
17265                 val = I915_READ(reg);
17266                 val &= ~HSW_FRAME_START_DELAY_MASK;
17267                 val |= HSW_FRAME_START_DELAY(0);
17268                 I915_WRITE(reg, val);
17269         } else {
17270                 i915_reg_t reg = PIPECONF(cpu_transcoder);
17271                 u32 val;
17272
17273                 val = I915_READ(reg);
17274                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
17275                 val |= PIPECONF_FRAME_START_DELAY(0);
17276                 I915_WRITE(reg, val);
17277         }
17278
17279         if (!crtc_state->has_pch_encoder)
17280                 return;
17281
17282         if (HAS_PCH_IBX(dev_priv)) {
17283                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
17284                 u32 val;
17285
17286                 val = I915_READ(reg);
17287                 val &= ~TRANS_FRAME_START_DELAY_MASK;
17288                 val |= TRANS_FRAME_START_DELAY(0);
17289                 I915_WRITE(reg, val);
17290         } else {
17291                 i915_reg_t reg = TRANS_CHICKEN2(crtc->pipe);
17292                 u32 val;
17293
17294                 val = I915_READ(reg);
17295                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
17296                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
17297                 I915_WRITE(reg, val);
17298         }
17299 }
17300
17301 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17302                                 struct drm_modeset_acquire_ctx *ctx)
17303 {
17304         struct drm_device *dev = crtc->base.dev;
17305         struct drm_i915_private *dev_priv = to_i915(dev);
17306         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17307
17308         if (crtc_state->hw.active) {
17309                 struct intel_plane *plane;
17310
17311                 /* Clear any frame start delays used for debugging left by the BIOS */
17312                 intel_sanitize_frame_start_delay(crtc_state);
17313
17314                 /* Disable everything but the primary plane */
17315                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
17316                         const struct intel_plane_state *plane_state =
17317                                 to_intel_plane_state(plane->base.state);
17318
17319                         if (plane_state->uapi.visible &&
17320                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17321                                 intel_plane_disable_noatomic(crtc, plane);
17322                 }
17323
17324                 /*
17325                  * Disable any background color set by the BIOS, but enable the
17326                  * gamma and CSC to match how we program our planes.
17327                  */
17328                 if (INTEL_GEN(dev_priv) >= 9)
17329                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
17330                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
17331                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
17332         }
17333
17334         /* Adjust the state of the output pipe according to whether we
17335          * have active connectors/encoders. */
17336         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
17337                 intel_crtc_disable_noatomic(&crtc->base, ctx);
17338
17339         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
17340                 /*
17341                  * We start out with underrun reporting disabled to avoid races.
17342                  * For correct bookkeeping mark this on active crtcs.
17343                  *
17344                  * Also on gmch platforms we dont have any hardware bits to
17345                  * disable the underrun reporting. Which means we need to start
17346                  * out with underrun reporting disabled also on inactive pipes,
17347                  * since otherwise we'll complain about the garbage we read when
17348                  * e.g. coming up after runtime pm.
17349                  *
17350                  * No protection against concurrent access is required - at
17351                  * worst a fifo underrun happens which also sets this to false.
17352                  */
17353                 crtc->cpu_fifo_underrun_disabled = true;
17354                 /*
17355                  * We track the PCH trancoder underrun reporting state
17356                  * within the crtc. With crtc for pipe A housing the underrun
17357                  * reporting state for PCH transcoder A, crtc for pipe B housing
17358                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17359                  * and marking underrun reporting as disabled for the non-existing
17360                  * PCH transcoders B and C would prevent enabling the south
17361                  * error interrupt (see cpt_can_enable_serr_int()).
17362                  */
17363                 if (has_pch_trancoder(dev_priv, crtc->pipe))
17364                         crtc->pch_fifo_underrun_disabled = true;
17365         }
17366 }
17367
17368 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17369 {
17370         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
17371
17372         /*
17373          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17374          * the hardware when a high res displays plugged in. DPLL P
17375          * divider is zero, and the pipe timings are bonkers. We'll
17376          * try to disable everything in that case.
17377          *
17378          * FIXME would be nice to be able to sanitize this state
17379          * without several WARNs, but for now let's take the easy
17380          * road.
17381          */
17382         return IS_GEN(dev_priv, 6) &&
17383                 crtc_state->hw.active &&
17384                 crtc_state->shared_dpll &&
17385                 crtc_state->port_clock == 0;
17386 }
17387
17388 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17389 {
17390         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17391         struct intel_connector *connector;
17392         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17393         struct intel_crtc_state *crtc_state = crtc ?
17394                 to_intel_crtc_state(crtc->base.state) : NULL;
17395
17396         /* We need to check both for a crtc link (meaning that the
17397          * encoder is active and trying to read from a pipe) and the
17398          * pipe itself being active. */
17399         bool has_active_crtc = crtc_state &&
17400                 crtc_state->hw.active;
17401
17402         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17403                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17404                               pipe_name(crtc->pipe));
17405                 has_active_crtc = false;
17406         }
17407
17408         connector = intel_encoder_find_connector(encoder);
17409         if (connector && !has_active_crtc) {
17410                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17411                               encoder->base.base.id,
17412                               encoder->base.name);
17413
17414                 /* Connector is active, but has no active pipe. This is
17415                  * fallout from our resume register restoring. Disable
17416                  * the encoder manually again. */
17417                 if (crtc_state) {
17418                         struct drm_encoder *best_encoder;
17419
17420                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
17421                                       encoder->base.base.id,
17422                                       encoder->base.name);
17423
17424                         /* avoid oopsing in case the hooks consult best_encoder */
17425                         best_encoder = connector->base.state->best_encoder;
17426                         connector->base.state->best_encoder = &encoder->base;
17427
17428                         if (encoder->disable)
17429                                 encoder->disable(encoder, crtc_state,
17430                                                  connector->base.state);
17431                         if (encoder->post_disable)
17432                                 encoder->post_disable(encoder, crtc_state,
17433                                                       connector->base.state);
17434
17435                         connector->base.state->best_encoder = best_encoder;
17436                 }
17437                 encoder->base.crtc = NULL;
17438
17439                 /* Inconsistent output/port/pipe state happens presumably due to
17440                  * a bug in one of the get_hw_state functions. Or someplace else
17441                  * in our code, like the register restore mess on resume. Clamp
17442                  * things to off as a safer default. */
17443
17444                 connector->base.dpms = DRM_MODE_DPMS_OFF;
17445                 connector->base.encoder = NULL;
17446         }
17447
17448         /* notify opregion of the sanitized encoder state */
17449         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17450
17451         if (INTEL_GEN(dev_priv) >= 11)
17452                 icl_sanitize_encoder_pll_mapping(encoder);
17453 }
17454
17455 /* FIXME read out full plane state for all planes */
17456 static void readout_plane_state(struct drm_i915_private *dev_priv)
17457 {
17458         struct intel_plane *plane;
17459         struct intel_crtc *crtc;
17460
17461         for_each_intel_plane(&dev_priv->drm, plane) {
17462                 struct intel_plane_state *plane_state =
17463                         to_intel_plane_state(plane->base.state);
17464                 struct intel_crtc_state *crtc_state;
17465                 enum pipe pipe = PIPE_A;
17466                 bool visible;
17467
17468                 visible = plane->get_hw_state(plane, &pipe);
17469
17470                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17471                 crtc_state = to_intel_crtc_state(crtc->base.state);
17472
17473                 intel_set_plane_visible(crtc_state, plane_state, visible);
17474
17475                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17476                               plane->base.base.id, plane->base.name,
17477                               enableddisabled(visible), pipe_name(pipe));
17478         }
17479
17480         for_each_intel_crtc(&dev_priv->drm, crtc) {
17481                 struct intel_crtc_state *crtc_state =
17482                         to_intel_crtc_state(crtc->base.state);
17483
17484                 fixup_active_planes(crtc_state);
17485         }
17486 }
17487
17488 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17489 {
17490         struct drm_i915_private *dev_priv = to_i915(dev);
17491         enum pipe pipe;
17492         struct intel_crtc *crtc;
17493         struct intel_encoder *encoder;
17494         struct intel_connector *connector;
17495         struct drm_connector_list_iter conn_iter;
17496         int i;
17497
17498         dev_priv->active_pipes = 0;
17499
17500         for_each_intel_crtc(dev, crtc) {
17501                 struct intel_crtc_state *crtc_state =
17502                         to_intel_crtc_state(crtc->base.state);
17503
17504                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
17505                 intel_crtc_free_hw_state(crtc_state);
17506                 memset(crtc_state, 0, sizeof(*crtc_state));
17507                 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->uapi);
17508
17509                 crtc_state->hw.active = crtc_state->hw.enable =
17510                         dev_priv->display.get_pipe_config(crtc, crtc_state);
17511
17512                 crtc->base.enabled = crtc_state->hw.enable;
17513                 crtc->active = crtc_state->hw.active;
17514
17515                 if (crtc_state->hw.active)
17516                         dev_priv->active_pipes |= BIT(crtc->pipe);
17517
17518                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
17519                               crtc->base.base.id, crtc->base.name,
17520                               enableddisabled(crtc_state->hw.active));
17521         }
17522
17523         readout_plane_state(dev_priv);
17524
17525         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17526                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17527
17528                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
17529                                                         &pll->state.hw_state);
17530
17531                 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
17532                     pll->info->id == DPLL_ID_EHL_DPLL4) {
17533                         pll->wakeref = intel_display_power_get(dev_priv,
17534                                                                POWER_DOMAIN_DPLL_DC_OFF);
17535                 }
17536
17537                 pll->state.crtc_mask = 0;
17538                 for_each_intel_crtc(dev, crtc) {
17539                         struct intel_crtc_state *crtc_state =
17540                                 to_intel_crtc_state(crtc->base.state);
17541
17542                         if (crtc_state->hw.active &&
17543                             crtc_state->shared_dpll == pll)
17544                                 pll->state.crtc_mask |= 1 << crtc->pipe;
17545                 }
17546                 pll->active_mask = pll->state.crtc_mask;
17547
17548                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
17549                               pll->info->name, pll->state.crtc_mask, pll->on);
17550         }
17551
17552         for_each_intel_encoder(dev, encoder) {
17553                 pipe = 0;
17554
17555                 if (encoder->get_hw_state(encoder, &pipe)) {
17556                         struct intel_crtc_state *crtc_state;
17557
17558                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17559                         crtc_state = to_intel_crtc_state(crtc->base.state);
17560
17561                         encoder->base.crtc = &crtc->base;
17562                         encoder->get_config(encoder, crtc_state);
17563                 } else {
17564                         encoder->base.crtc = NULL;
17565                 }
17566
17567                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17568                               encoder->base.base.id, encoder->base.name,
17569                               enableddisabled(encoder->base.crtc),
17570                               pipe_name(pipe));
17571         }
17572
17573         drm_connector_list_iter_begin(dev, &conn_iter);
17574         for_each_intel_connector_iter(connector, &conn_iter) {
17575                 if (connector->get_hw_state(connector)) {
17576                         struct intel_crtc_state *crtc_state;
17577                         struct intel_crtc *crtc;
17578
17579                         connector->base.dpms = DRM_MODE_DPMS_ON;
17580
17581                         encoder = connector->encoder;
17582                         connector->base.encoder = &encoder->base;
17583
17584                         crtc = to_intel_crtc(encoder->base.crtc);
17585                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17586
17587                         if (crtc_state && crtc_state->hw.active) {
17588                                 /*
17589                                  * This has to be done during hardware readout
17590                                  * because anything calling .crtc_disable may
17591                                  * rely on the connector_mask being accurate.
17592                                  */
17593                                 crtc_state->uapi.connector_mask |=
17594                                         drm_connector_mask(&connector->base);
17595                                 crtc_state->uapi.encoder_mask |=
17596                                         drm_encoder_mask(&encoder->base);
17597                         }
17598                 } else {
17599                         connector->base.dpms = DRM_MODE_DPMS_OFF;
17600                         connector->base.encoder = NULL;
17601                 }
17602                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
17603                               connector->base.base.id, connector->base.name,
17604                               enableddisabled(connector->base.encoder));
17605         }
17606         drm_connector_list_iter_end(&conn_iter);
17607
17608         for_each_intel_crtc(dev, crtc) {
17609                 struct intel_bw_state *bw_state =
17610                         to_intel_bw_state(dev_priv->bw_obj.state);
17611                 struct intel_crtc_state *crtc_state =
17612                         to_intel_crtc_state(crtc->base.state);
17613                 struct intel_plane *plane;
17614                 int min_cdclk = 0;
17615
17616                 if (crtc_state->hw.active) {
17617                         struct drm_display_mode *mode = &crtc_state->hw.mode;
17618
17619                         intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
17620                                                     crtc_state);
17621
17622                         *mode = crtc_state->hw.adjusted_mode;
17623                         mode->hdisplay = crtc_state->pipe_src_w;
17624                         mode->vdisplay = crtc_state->pipe_src_h;
17625
17626                         /*
17627                          * The initial mode needs to be set in order to keep
17628                          * the atomic core happy. It wants a valid mode if the
17629                          * crtc's enabled, so we do the above call.
17630                          *
17631                          * But we don't set all the derived state fully, hence
17632                          * set a flag to indicate that a full recalculation is
17633                          * needed on the next commit.
17634                          */
17635                         mode->private_flags = I915_MODE_FLAG_INHERITED;
17636
17637                         intel_crtc_compute_pixel_rate(crtc_state);
17638
17639                         intel_crtc_update_active_timings(crtc_state);
17640
17641                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
17642                 }
17643
17644                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17645                         const struct intel_plane_state *plane_state =
17646                                 to_intel_plane_state(plane->base.state);
17647
17648                         /*
17649                          * FIXME don't have the fb yet, so can't
17650                          * use intel_plane_data_rate() :(
17651                          */
17652                         if (plane_state->uapi.visible)
17653                                 crtc_state->data_rate[plane->id] =
17654                                         4 * crtc_state->pixel_rate;
17655                         /*
17656                          * FIXME don't have the fb yet, so can't
17657                          * use plane->min_cdclk() :(
17658                          */
17659                         if (plane_state->uapi.visible && plane->min_cdclk) {
17660                                 if (crtc_state->double_wide ||
17661                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17662                                         crtc_state->min_cdclk[plane->id] =
17663                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17664                                 else
17665                                         crtc_state->min_cdclk[plane->id] =
17666                                                 crtc_state->pixel_rate;
17667                         }
17668                         DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
17669                                       plane->base.base.id, plane->base.name,
17670                                       crtc_state->min_cdclk[plane->id]);
17671                 }
17672
17673                 if (crtc_state->hw.active) {
17674                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17675                         if (WARN_ON(min_cdclk < 0))
17676                                 min_cdclk = 0;
17677                 }
17678
17679                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
17680                 dev_priv->min_voltage_level[crtc->pipe] =
17681                         crtc_state->min_voltage_level;
17682
17683                 intel_bw_crtc_update(bw_state, crtc_state);
17684
17685                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
17686         }
17687 }
17688
17689 static void
17690 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17691 {
17692         struct intel_encoder *encoder;
17693
17694         for_each_intel_encoder(&dev_priv->drm, encoder) {
17695                 struct intel_crtc_state *crtc_state;
17696
17697                 if (!encoder->get_power_domains)
17698                         continue;
17699
17700                 /*
17701                  * MST-primary and inactive encoders don't have a crtc state
17702                  * and neither of these require any power domain references.
17703                  */
17704                 if (!encoder->base.crtc)
17705                         continue;
17706
17707                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17708                 encoder->get_power_domains(encoder, crtc_state);
17709         }
17710 }
17711
17712 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17713 {
17714         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
17715         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
17716                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
17717                            DARBF_GATING_DIS);
17718
17719         if (IS_HASWELL(dev_priv)) {
17720                 /*
17721                  * WaRsPkgCStateDisplayPMReq:hsw
17722                  * System hang if this isn't done before disabling all planes!
17723                  */
17724                 I915_WRITE(CHICKEN_PAR1_1,
17725                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17726         }
17727 }
17728
17729 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17730                                        enum port port, i915_reg_t hdmi_reg)
17731 {
17732         u32 val = I915_READ(hdmi_reg);
17733
17734         if (val & SDVO_ENABLE ||
17735             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17736                 return;
17737
17738         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
17739                       port_name(port));
17740
17741         val &= ~SDVO_PIPE_SEL_MASK;
17742         val |= SDVO_PIPE_SEL(PIPE_A);
17743
17744         I915_WRITE(hdmi_reg, val);
17745 }
17746
17747 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
17748                                      enum port port, i915_reg_t dp_reg)
17749 {
17750         u32 val = I915_READ(dp_reg);
17751
17752         if (val & DP_PORT_EN ||
17753             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
17754                 return;
17755
17756         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
17757                       port_name(port));
17758
17759         val &= ~DP_PIPE_SEL_MASK;
17760         val |= DP_PIPE_SEL(PIPE_A);
17761
17762         I915_WRITE(dp_reg, val);
17763 }
17764
17765 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
17766 {
17767         /*
17768          * The BIOS may select transcoder B on some of the PCH
17769          * ports even it doesn't enable the port. This would trip
17770          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
17771          * Sanitize the transcoder select bits to prevent that. We
17772          * assume that the BIOS never actually enabled the port,
17773          * because if it did we'd actually have to toggle the port
17774          * on and back off to make the transcoder A select stick
17775          * (see. intel_dp_link_down(), intel_disable_hdmi(),
17776          * intel_disable_sdvo()).
17777          */
17778         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
17779         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
17780         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
17781
17782         /* PCH SDVOB multiplex with HDMIB */
17783         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
17784         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
17785         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
17786 }
17787
17788 /* Scan out the current hw modeset state,
17789  * and sanitizes it to the current state
17790  */
17791 static void
17792 intel_modeset_setup_hw_state(struct drm_device *dev,
17793                              struct drm_modeset_acquire_ctx *ctx)
17794 {
17795         struct drm_i915_private *dev_priv = to_i915(dev);
17796         struct intel_crtc_state *crtc_state;
17797         struct intel_encoder *encoder;
17798         struct intel_crtc *crtc;
17799         intel_wakeref_t wakeref;
17800         int i;
17801
17802         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
17803
17804         intel_early_display_was(dev_priv);
17805         intel_modeset_readout_hw_state(dev);
17806
17807         /* HW state is read out, now we need to sanitize this mess. */
17808
17809         /* Sanitize the TypeC port mode upfront, encoders depend on this */
17810         for_each_intel_encoder(dev, encoder) {
17811                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
17812
17813                 /* We need to sanitize only the MST primary port. */
17814                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
17815                     intel_phy_is_tc(dev_priv, phy))
17816                         intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
17817         }
17818
17819         get_encoder_power_domains(dev_priv);
17820
17821         if (HAS_PCH_IBX(dev_priv))
17822                 ibx_sanitize_pch_ports(dev_priv);
17823
17824         /*
17825          * intel_sanitize_plane_mapping() may need to do vblank
17826          * waits, so we need vblank interrupts restored beforehand.
17827          */
17828         for_each_intel_crtc(&dev_priv->drm, crtc) {
17829                 crtc_state = to_intel_crtc_state(crtc->base.state);
17830
17831                 drm_crtc_vblank_reset(&crtc->base);
17832
17833                 if (crtc_state->hw.active)
17834                         intel_crtc_vblank_on(crtc_state);
17835         }
17836
17837         intel_sanitize_plane_mapping(dev_priv);
17838
17839         for_each_intel_encoder(dev, encoder)
17840                 intel_sanitize_encoder(encoder);
17841
17842         for_each_intel_crtc(&dev_priv->drm, crtc) {
17843                 crtc_state = to_intel_crtc_state(crtc->base.state);
17844                 intel_sanitize_crtc(crtc, ctx);
17845                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
17846         }
17847
17848         intel_modeset_update_connector_atomic_state(dev);
17849
17850         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17851                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17852
17853                 if (!pll->on || pll->active_mask)
17854                         continue;
17855
17856                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17857                               pll->info->name);
17858
17859                 pll->info->funcs->disable(dev_priv, pll);
17860                 pll->on = false;
17861         }
17862
17863         if (IS_G4X(dev_priv)) {
17864                 g4x_wm_get_hw_state(dev_priv);
17865                 g4x_wm_sanitize(dev_priv);
17866         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17867                 vlv_wm_get_hw_state(dev_priv);
17868                 vlv_wm_sanitize(dev_priv);
17869         } else if (INTEL_GEN(dev_priv) >= 9) {
17870                 skl_wm_get_hw_state(dev_priv);
17871         } else if (HAS_PCH_SPLIT(dev_priv)) {
17872                 ilk_wm_get_hw_state(dev_priv);
17873         }
17874
17875         for_each_intel_crtc(dev, crtc) {
17876                 u64 put_domains;
17877
17878                 crtc_state = to_intel_crtc_state(crtc->base.state);
17879                 put_domains = modeset_get_crtc_power_domains(crtc_state);
17880                 if (WARN_ON(put_domains))
17881                         modeset_put_power_domains(dev_priv, put_domains);
17882         }
17883
17884         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17885
17886         intel_fbc_init_pipe_state(dev_priv);
17887 }
17888
17889 void intel_display_resume(struct drm_device *dev)
17890 {
17891         struct drm_i915_private *dev_priv = to_i915(dev);
17892         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17893         struct drm_modeset_acquire_ctx ctx;
17894         int ret;
17895
17896         dev_priv->modeset_restore_state = NULL;
17897         if (state)
17898                 state->acquire_ctx = &ctx;
17899
17900         drm_modeset_acquire_init(&ctx, 0);
17901
17902         while (1) {
17903                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17904                 if (ret != -EDEADLK)
17905                         break;
17906
17907                 drm_modeset_backoff(&ctx);
17908         }
17909
17910         if (!ret)
17911                 ret = __intel_display_resume(dev, state, &ctx);
17912
17913         intel_enable_ipc(dev_priv);
17914         drm_modeset_drop_locks(&ctx);
17915         drm_modeset_acquire_fini(&ctx);
17916
17917         if (ret)
17918                 DRM_ERROR("Restoring old state failed with %i\n", ret);
17919         if (state)
17920                 drm_atomic_state_put(state);
17921 }
17922
17923 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
17924 {
17925         struct intel_connector *connector;
17926         struct drm_connector_list_iter conn_iter;
17927
17928         /* Kill all the work that may have been queued by hpd. */
17929         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
17930         for_each_intel_connector_iter(connector, &conn_iter) {
17931                 if (connector->modeset_retry_work.func)
17932                         cancel_work_sync(&connector->modeset_retry_work);
17933                 if (connector->hdcp.shim) {
17934                         cancel_delayed_work_sync(&connector->hdcp.check_work);
17935                         cancel_work_sync(&connector->hdcp.prop_work);
17936                 }
17937         }
17938         drm_connector_list_iter_end(&conn_iter);
17939 }
17940
17941 void intel_modeset_driver_remove(struct drm_i915_private *i915)
17942 {
17943         flush_workqueue(i915->flip_wq);
17944         flush_workqueue(i915->modeset_wq);
17945
17946         flush_work(&i915->atomic_helper.free_work);
17947         WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
17948
17949         /*
17950          * Interrupts and polling as the first thing to avoid creating havoc.
17951          * Too much stuff here (turning of connectors, ...) would
17952          * experience fancy races otherwise.
17953          */
17954         intel_irq_uninstall(i915);
17955
17956         /*
17957          * Due to the hpd irq storm handling the hotplug work can re-arm the
17958          * poll handlers. Hence disable polling after hpd handling is shut down.
17959          */
17960         intel_hpd_poll_fini(i915);
17961
17962         /* poll work can call into fbdev, hence clean that up afterwards */
17963         intel_fbdev_fini(i915);
17964
17965         intel_unregister_dsm_handler();
17966
17967         intel_fbc_global_disable(i915);
17968
17969         /* flush any delayed tasks or pending work */
17970         flush_scheduled_work();
17971
17972         intel_hdcp_component_fini(i915);
17973
17974         drm_mode_config_cleanup(&i915->drm);
17975
17976         intel_overlay_cleanup(i915);
17977
17978         intel_gmbus_teardown(i915);
17979
17980         destroy_workqueue(i915->flip_wq);
17981         destroy_workqueue(i915->modeset_wq);
17982
17983         intel_fbc_cleanup_cfb(i915);
17984 }
17985
17986 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
17987
17988 struct intel_display_error_state {
17989
17990         u32 power_well_driver;
17991
17992         struct intel_cursor_error_state {
17993                 u32 control;
17994                 u32 position;
17995                 u32 base;
17996                 u32 size;
17997         } cursor[I915_MAX_PIPES];
17998
17999         struct intel_pipe_error_state {
18000                 bool power_domain_on;
18001                 u32 source;
18002                 u32 stat;
18003         } pipe[I915_MAX_PIPES];
18004
18005         struct intel_plane_error_state {
18006                 u32 control;
18007                 u32 stride;
18008                 u32 size;
18009                 u32 pos;
18010                 u32 addr;
18011                 u32 surface;
18012                 u32 tile_offset;
18013         } plane[I915_MAX_PIPES];
18014
18015         struct intel_transcoder_error_state {
18016                 bool available;
18017                 bool power_domain_on;
18018                 enum transcoder cpu_transcoder;
18019
18020                 u32 conf;
18021
18022                 u32 htotal;
18023                 u32 hblank;
18024                 u32 hsync;
18025                 u32 vtotal;
18026                 u32 vblank;
18027                 u32 vsync;
18028         } transcoder[5];
18029 };
18030
18031 struct intel_display_error_state *
18032 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18033 {
18034         struct intel_display_error_state *error;
18035         int transcoders[] = {
18036                 TRANSCODER_A,
18037                 TRANSCODER_B,
18038                 TRANSCODER_C,
18039                 TRANSCODER_D,
18040                 TRANSCODER_EDP,
18041         };
18042         int i;
18043
18044         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18045
18046         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
18047                 return NULL;
18048
18049         error = kzalloc(sizeof(*error), GFP_ATOMIC);
18050         if (error == NULL)
18051                 return NULL;
18052
18053         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18054                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
18055
18056         for_each_pipe(dev_priv, i) {
18057                 error->pipe[i].power_domain_on =
18058                         __intel_display_power_is_enabled(dev_priv,
18059                                                          POWER_DOMAIN_PIPE(i));
18060                 if (!error->pipe[i].power_domain_on)
18061                         continue;
18062
18063                 error->cursor[i].control = I915_READ(CURCNTR(i));
18064                 error->cursor[i].position = I915_READ(CURPOS(i));
18065                 error->cursor[i].base = I915_READ(CURBASE(i));
18066
18067                 error->plane[i].control = I915_READ(DSPCNTR(i));
18068                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
18069                 if (INTEL_GEN(dev_priv) <= 3) {
18070                         error->plane[i].size = I915_READ(DSPSIZE(i));
18071                         error->plane[i].pos = I915_READ(DSPPOS(i));
18072                 }
18073                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18074                         error->plane[i].addr = I915_READ(DSPADDR(i));
18075                 if (INTEL_GEN(dev_priv) >= 4) {
18076                         error->plane[i].surface = I915_READ(DSPSURF(i));
18077                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
18078                 }
18079
18080                 error->pipe[i].source = I915_READ(PIPESRC(i));
18081
18082                 if (HAS_GMCH(dev_priv))
18083                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
18084         }
18085
18086         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18087                 enum transcoder cpu_transcoder = transcoders[i];
18088
18089                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
18090                         continue;
18091
18092                 error->transcoder[i].available = true;
18093                 error->transcoder[i].power_domain_on =
18094                         __intel_display_power_is_enabled(dev_priv,
18095                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18096                 if (!error->transcoder[i].power_domain_on)
18097                         continue;
18098
18099                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18100
18101                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
18102                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
18103                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
18104                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
18105                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
18106                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
18107                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
18108         }
18109
18110         return error;
18111 }
18112
18113 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18114
18115 void
18116 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18117                                 struct intel_display_error_state *error)
18118 {
18119         struct drm_i915_private *dev_priv = m->i915;
18120         int i;
18121
18122         if (!error)
18123                 return;
18124
18125         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18126         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18127                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18128                            error->power_well_driver);
18129         for_each_pipe(dev_priv, i) {
18130                 err_printf(m, "Pipe [%d]:\n", i);
18131                 err_printf(m, "  Power: %s\n",
18132                            onoff(error->pipe[i].power_domain_on));
18133                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
18134                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
18135
18136                 err_printf(m, "Plane [%d]:\n", i);
18137                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
18138                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
18139                 if (INTEL_GEN(dev_priv) <= 3) {
18140                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
18141                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
18142                 }
18143                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18144                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
18145                 if (INTEL_GEN(dev_priv) >= 4) {
18146                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
18147                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
18148                 }
18149
18150                 err_printf(m, "Cursor [%d]:\n", i);
18151                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
18152                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
18153                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
18154         }
18155
18156         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18157                 if (!error->transcoder[i].available)
18158                         continue;
18159
18160                 err_printf(m, "CPU transcoder: %s\n",
18161                            transcoder_name(error->transcoder[i].cpu_transcoder));
18162                 err_printf(m, "  Power: %s\n",
18163                            onoff(error->transcoder[i].power_domain_on));
18164                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
18165                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
18166                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
18167                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
18168                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
18169                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
18170                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
18171         }
18172 }
18173
18174 #endif