]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/display/intel_display.c
Merge drm/drm-next into drm-intel-next-queued
[linux.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57
58 #include "gt/intel_rps.h"
59
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_dp_link_training.h"
70 #include "intel_fbc.h"
71 #include "intel_fbdev.h"
72 #include "intel_fifo_underrun.h"
73 #include "intel_frontbuffer.h"
74 #include "intel_hdcp.h"
75 #include "intel_hotplug.h"
76 #include "intel_overlay.h"
77 #include "intel_pipe_crc.h"
78 #include "intel_pm.h"
79 #include "intel_psr.h"
80 #include "intel_quirks.h"
81 #include "intel_sideband.h"
82 #include "intel_sprite.h"
83 #include "intel_tc.h"
84 #include "intel_vga.h"
85
86 /* Primary plane formats for gen <= 3 */
87 static const u32 i8xx_primary_formats[] = {
88         DRM_FORMAT_C8,
89         DRM_FORMAT_XRGB1555,
90         DRM_FORMAT_RGB565,
91         DRM_FORMAT_XRGB8888,
92 };
93
94 /* Primary plane formats for ivb (no fp16 due to hw issue) */
95 static const u32 ivb_primary_formats[] = {
96         DRM_FORMAT_C8,
97         DRM_FORMAT_RGB565,
98         DRM_FORMAT_XRGB8888,
99         DRM_FORMAT_XBGR8888,
100         DRM_FORMAT_XRGB2101010,
101         DRM_FORMAT_XBGR2101010,
102 };
103
104 /* Primary plane formats for gen >= 4, except ivb */
105 static const u32 i965_primary_formats[] = {
106         DRM_FORMAT_C8,
107         DRM_FORMAT_RGB565,
108         DRM_FORMAT_XRGB8888,
109         DRM_FORMAT_XBGR8888,
110         DRM_FORMAT_XRGB2101010,
111         DRM_FORMAT_XBGR2101010,
112         DRM_FORMAT_XBGR16161616F,
113 };
114
115 /* Primary plane formats for vlv/chv */
116 static const u32 vlv_primary_formats[] = {
117         DRM_FORMAT_C8,
118         DRM_FORMAT_RGB565,
119         DRM_FORMAT_XRGB8888,
120         DRM_FORMAT_XBGR8888,
121         DRM_FORMAT_ARGB8888,
122         DRM_FORMAT_ABGR8888,
123         DRM_FORMAT_XRGB2101010,
124         DRM_FORMAT_XBGR2101010,
125         DRM_FORMAT_ARGB2101010,
126         DRM_FORMAT_ABGR2101010,
127         DRM_FORMAT_XBGR16161616F,
128 };
129
130 static const u64 i9xx_format_modifiers[] = {
131         I915_FORMAT_MOD_X_TILED,
132         DRM_FORMAT_MOD_LINEAR,
133         DRM_FORMAT_MOD_INVALID
134 };
135
136 /* Cursor formats */
137 static const u32 intel_cursor_formats[] = {
138         DRM_FORMAT_ARGB8888,
139 };
140
141 static const u64 cursor_format_modifiers[] = {
142         DRM_FORMAT_MOD_LINEAR,
143         DRM_FORMAT_MOD_INVALID
144 };
145
146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
147                                 struct intel_crtc_state *pipe_config);
148 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
149                                    struct intel_crtc_state *pipe_config);
150
151 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
152                                   struct drm_i915_gem_object *obj,
153                                   struct drm_mode_fb_cmd2 *mode_cmd);
154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
157                                          const struct intel_link_m_n *m_n,
158                                          const struct intel_link_m_n *m2_n2);
159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
160 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
163 static void vlv_prepare_pll(struct intel_crtc *crtc,
164                             const struct intel_crtc_state *pipe_config);
165 static void chv_prepare_pll(struct intel_crtc *crtc,
166                             const struct intel_crtc_state *pipe_config);
167 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
168                                     struct intel_crtc_state *crtc_state);
169 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
170 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
171 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
172 static void intel_modeset_setup_hw_state(struct drm_device *dev,
173                                          struct drm_modeset_acquire_ctx *ctx);
174 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
175
176 struct intel_limit {
177         struct {
178                 int min, max;
179         } dot, vco, n, m, m1, m2, p, p1;
180
181         struct {
182                 int dot_limit;
183                 int p2_slow, p2_fast;
184         } p2;
185 };
186
187 /* returns HPLL frequency in kHz */
188 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
189 {
190         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
191
192         /* Obtain SKU information */
193         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
194                 CCK_FUSE_HPLL_FREQ_MASK;
195
196         return vco_freq[hpll_freq] * 1000;
197 }
198
199 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
200                       const char *name, u32 reg, int ref_freq)
201 {
202         u32 val;
203         int divider;
204
205         val = vlv_cck_read(dev_priv, reg);
206         divider = val & CCK_FREQUENCY_VALUES;
207
208         WARN((val & CCK_FREQUENCY_STATUS) !=
209              (divider << CCK_FREQUENCY_STATUS_SHIFT),
210              "%s change in progress\n", name);
211
212         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
213 }
214
215 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
216                            const char *name, u32 reg)
217 {
218         int hpll;
219
220         vlv_cck_get(dev_priv);
221
222         if (dev_priv->hpll_freq == 0)
223                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
224
225         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
226
227         vlv_cck_put(dev_priv);
228
229         return hpll;
230 }
231
232 static void intel_update_czclk(struct drm_i915_private *dev_priv)
233 {
234         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
235                 return;
236
237         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
238                                                       CCK_CZ_CLOCK_CONTROL);
239
240         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
241 }
242
243 static inline u32 /* units of 100MHz */
244 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
245                     const struct intel_crtc_state *pipe_config)
246 {
247         if (HAS_DDI(dev_priv))
248                 return pipe_config->port_clock; /* SPLL */
249         else
250                 return dev_priv->fdi_pll_freq;
251 }
252
253 static const struct intel_limit intel_limits_i8xx_dac = {
254         .dot = { .min = 25000, .max = 350000 },
255         .vco = { .min = 908000, .max = 1512000 },
256         .n = { .min = 2, .max = 16 },
257         .m = { .min = 96, .max = 140 },
258         .m1 = { .min = 18, .max = 26 },
259         .m2 = { .min = 6, .max = 16 },
260         .p = { .min = 4, .max = 128 },
261         .p1 = { .min = 2, .max = 33 },
262         .p2 = { .dot_limit = 165000,
263                 .p2_slow = 4, .p2_fast = 2 },
264 };
265
266 static const struct intel_limit intel_limits_i8xx_dvo = {
267         .dot = { .min = 25000, .max = 350000 },
268         .vco = { .min = 908000, .max = 1512000 },
269         .n = { .min = 2, .max = 16 },
270         .m = { .min = 96, .max = 140 },
271         .m1 = { .min = 18, .max = 26 },
272         .m2 = { .min = 6, .max = 16 },
273         .p = { .min = 4, .max = 128 },
274         .p1 = { .min = 2, .max = 33 },
275         .p2 = { .dot_limit = 165000,
276                 .p2_slow = 4, .p2_fast = 4 },
277 };
278
279 static const struct intel_limit intel_limits_i8xx_lvds = {
280         .dot = { .min = 25000, .max = 350000 },
281         .vco = { .min = 908000, .max = 1512000 },
282         .n = { .min = 2, .max = 16 },
283         .m = { .min = 96, .max = 140 },
284         .m1 = { .min = 18, .max = 26 },
285         .m2 = { .min = 6, .max = 16 },
286         .p = { .min = 4, .max = 128 },
287         .p1 = { .min = 1, .max = 6 },
288         .p2 = { .dot_limit = 165000,
289                 .p2_slow = 14, .p2_fast = 7 },
290 };
291
292 static const struct intel_limit intel_limits_i9xx_sdvo = {
293         .dot = { .min = 20000, .max = 400000 },
294         .vco = { .min = 1400000, .max = 2800000 },
295         .n = { .min = 1, .max = 6 },
296         .m = { .min = 70, .max = 120 },
297         .m1 = { .min = 8, .max = 18 },
298         .m2 = { .min = 3, .max = 7 },
299         .p = { .min = 5, .max = 80 },
300         .p1 = { .min = 1, .max = 8 },
301         .p2 = { .dot_limit = 200000,
302                 .p2_slow = 10, .p2_fast = 5 },
303 };
304
305 static const struct intel_limit intel_limits_i9xx_lvds = {
306         .dot = { .min = 20000, .max = 400000 },
307         .vco = { .min = 1400000, .max = 2800000 },
308         .n = { .min = 1, .max = 6 },
309         .m = { .min = 70, .max = 120 },
310         .m1 = { .min = 8, .max = 18 },
311         .m2 = { .min = 3, .max = 7 },
312         .p = { .min = 7, .max = 98 },
313         .p1 = { .min = 1, .max = 8 },
314         .p2 = { .dot_limit = 112000,
315                 .p2_slow = 14, .p2_fast = 7 },
316 };
317
318
319 static const struct intel_limit intel_limits_g4x_sdvo = {
320         .dot = { .min = 25000, .max = 270000 },
321         .vco = { .min = 1750000, .max = 3500000},
322         .n = { .min = 1, .max = 4 },
323         .m = { .min = 104, .max = 138 },
324         .m1 = { .min = 17, .max = 23 },
325         .m2 = { .min = 5, .max = 11 },
326         .p = { .min = 10, .max = 30 },
327         .p1 = { .min = 1, .max = 3},
328         .p2 = { .dot_limit = 270000,
329                 .p2_slow = 10,
330                 .p2_fast = 10
331         },
332 };
333
334 static const struct intel_limit intel_limits_g4x_hdmi = {
335         .dot = { .min = 22000, .max = 400000 },
336         .vco = { .min = 1750000, .max = 3500000},
337         .n = { .min = 1, .max = 4 },
338         .m = { .min = 104, .max = 138 },
339         .m1 = { .min = 16, .max = 23 },
340         .m2 = { .min = 5, .max = 11 },
341         .p = { .min = 5, .max = 80 },
342         .p1 = { .min = 1, .max = 8},
343         .p2 = { .dot_limit = 165000,
344                 .p2_slow = 10, .p2_fast = 5 },
345 };
346
347 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
348         .dot = { .min = 20000, .max = 115000 },
349         .vco = { .min = 1750000, .max = 3500000 },
350         .n = { .min = 1, .max = 3 },
351         .m = { .min = 104, .max = 138 },
352         .m1 = { .min = 17, .max = 23 },
353         .m2 = { .min = 5, .max = 11 },
354         .p = { .min = 28, .max = 112 },
355         .p1 = { .min = 2, .max = 8 },
356         .p2 = { .dot_limit = 0,
357                 .p2_slow = 14, .p2_fast = 14
358         },
359 };
360
361 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
362         .dot = { .min = 80000, .max = 224000 },
363         .vco = { .min = 1750000, .max = 3500000 },
364         .n = { .min = 1, .max = 3 },
365         .m = { .min = 104, .max = 138 },
366         .m1 = { .min = 17, .max = 23 },
367         .m2 = { .min = 5, .max = 11 },
368         .p = { .min = 14, .max = 42 },
369         .p1 = { .min = 2, .max = 6 },
370         .p2 = { .dot_limit = 0,
371                 .p2_slow = 7, .p2_fast = 7
372         },
373 };
374
375 static const struct intel_limit intel_limits_pineview_sdvo = {
376         .dot = { .min = 20000, .max = 400000},
377         .vco = { .min = 1700000, .max = 3500000 },
378         /* Pineview's Ncounter is a ring counter */
379         .n = { .min = 3, .max = 6 },
380         .m = { .min = 2, .max = 256 },
381         /* Pineview only has one combined m divider, which we treat as m2. */
382         .m1 = { .min = 0, .max = 0 },
383         .m2 = { .min = 0, .max = 254 },
384         .p = { .min = 5, .max = 80 },
385         .p1 = { .min = 1, .max = 8 },
386         .p2 = { .dot_limit = 200000,
387                 .p2_slow = 10, .p2_fast = 5 },
388 };
389
390 static const struct intel_limit intel_limits_pineview_lvds = {
391         .dot = { .min = 20000, .max = 400000 },
392         .vco = { .min = 1700000, .max = 3500000 },
393         .n = { .min = 3, .max = 6 },
394         .m = { .min = 2, .max = 256 },
395         .m1 = { .min = 0, .max = 0 },
396         .m2 = { .min = 0, .max = 254 },
397         .p = { .min = 7, .max = 112 },
398         .p1 = { .min = 1, .max = 8 },
399         .p2 = { .dot_limit = 112000,
400                 .p2_slow = 14, .p2_fast = 14 },
401 };
402
403 /* Ironlake / Sandybridge
404  *
405  * We calculate clock using (register_value + 2) for N/M1/M2, so here
406  * the range value for them is (actual_value - 2).
407  */
408 static const struct intel_limit intel_limits_ironlake_dac = {
409         .dot = { .min = 25000, .max = 350000 },
410         .vco = { .min = 1760000, .max = 3510000 },
411         .n = { .min = 1, .max = 5 },
412         .m = { .min = 79, .max = 127 },
413         .m1 = { .min = 12, .max = 22 },
414         .m2 = { .min = 5, .max = 9 },
415         .p = { .min = 5, .max = 80 },
416         .p1 = { .min = 1, .max = 8 },
417         .p2 = { .dot_limit = 225000,
418                 .p2_slow = 10, .p2_fast = 5 },
419 };
420
421 static const struct intel_limit intel_limits_ironlake_single_lvds = {
422         .dot = { .min = 25000, .max = 350000 },
423         .vco = { .min = 1760000, .max = 3510000 },
424         .n = { .min = 1, .max = 3 },
425         .m = { .min = 79, .max = 118 },
426         .m1 = { .min = 12, .max = 22 },
427         .m2 = { .min = 5, .max = 9 },
428         .p = { .min = 28, .max = 112 },
429         .p1 = { .min = 2, .max = 8 },
430         .p2 = { .dot_limit = 225000,
431                 .p2_slow = 14, .p2_fast = 14 },
432 };
433
434 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
435         .dot = { .min = 25000, .max = 350000 },
436         .vco = { .min = 1760000, .max = 3510000 },
437         .n = { .min = 1, .max = 3 },
438         .m = { .min = 79, .max = 127 },
439         .m1 = { .min = 12, .max = 22 },
440         .m2 = { .min = 5, .max = 9 },
441         .p = { .min = 14, .max = 56 },
442         .p1 = { .min = 2, .max = 8 },
443         .p2 = { .dot_limit = 225000,
444                 .p2_slow = 7, .p2_fast = 7 },
445 };
446
447 /* LVDS 100mhz refclk limits. */
448 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
449         .dot = { .min = 25000, .max = 350000 },
450         .vco = { .min = 1760000, .max = 3510000 },
451         .n = { .min = 1, .max = 2 },
452         .m = { .min = 79, .max = 126 },
453         .m1 = { .min = 12, .max = 22 },
454         .m2 = { .min = 5, .max = 9 },
455         .p = { .min = 28, .max = 112 },
456         .p1 = { .min = 2, .max = 8 },
457         .p2 = { .dot_limit = 225000,
458                 .p2_slow = 14, .p2_fast = 14 },
459 };
460
461 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
462         .dot = { .min = 25000, .max = 350000 },
463         .vco = { .min = 1760000, .max = 3510000 },
464         .n = { .min = 1, .max = 3 },
465         .m = { .min = 79, .max = 126 },
466         .m1 = { .min = 12, .max = 22 },
467         .m2 = { .min = 5, .max = 9 },
468         .p = { .min = 14, .max = 42 },
469         .p1 = { .min = 2, .max = 6 },
470         .p2 = { .dot_limit = 225000,
471                 .p2_slow = 7, .p2_fast = 7 },
472 };
473
474 static const struct intel_limit intel_limits_vlv = {
475          /*
476           * These are the data rate limits (measured in fast clocks)
477           * since those are the strictest limits we have. The fast
478           * clock and actual rate limits are more relaxed, so checking
479           * them would make no difference.
480           */
481         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
482         .vco = { .min = 4000000, .max = 6000000 },
483         .n = { .min = 1, .max = 7 },
484         .m1 = { .min = 2, .max = 3 },
485         .m2 = { .min = 11, .max = 156 },
486         .p1 = { .min = 2, .max = 3 },
487         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
488 };
489
490 static const struct intel_limit intel_limits_chv = {
491         /*
492          * These are the data rate limits (measured in fast clocks)
493          * since those are the strictest limits we have.  The fast
494          * clock and actual rate limits are more relaxed, so checking
495          * them would make no difference.
496          */
497         .dot = { .min = 25000 * 5, .max = 540000 * 5},
498         .vco = { .min = 4800000, .max = 6480000 },
499         .n = { .min = 1, .max = 1 },
500         .m1 = { .min = 2, .max = 2 },
501         .m2 = { .min = 24 << 22, .max = 175 << 22 },
502         .p1 = { .min = 2, .max = 4 },
503         .p2 = { .p2_slow = 1, .p2_fast = 14 },
504 };
505
506 static const struct intel_limit intel_limits_bxt = {
507         /* FIXME: find real dot limits */
508         .dot = { .min = 0, .max = INT_MAX },
509         .vco = { .min = 4800000, .max = 6700000 },
510         .n = { .min = 1, .max = 1 },
511         .m1 = { .min = 2, .max = 2 },
512         /* FIXME: find real m2 limits */
513         .m2 = { .min = 2 << 22, .max = 255 << 22 },
514         .p1 = { .min = 2, .max = 4 },
515         .p2 = { .p2_slow = 1, .p2_fast = 20 },
516 };
517
518 /* WA Display #0827: Gen9:all */
519 static void
520 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
521 {
522         if (enable)
523                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
524                            I915_READ(CLKGATE_DIS_PSL(pipe)) |
525                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
526         else
527                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
528                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
529                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
530 }
531
532 /* Wa_2006604312:icl */
533 static void
534 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
535                        bool enable)
536 {
537         if (enable)
538                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
539                            I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
540         else
541                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
542                            I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
543 }
544
545 static bool
546 needs_modeset(const struct intel_crtc_state *state)
547 {
548         return drm_atomic_crtc_needs_modeset(&state->uapi);
549 }
550
551 bool
552 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
553 {
554         return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
555                 crtc_state->sync_mode_slaves_mask);
556 }
557
558 static bool
559 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
560 {
561         return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
562                 crtc_state->sync_mode_slaves_mask);
563 }
564
565 /*
566  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
567  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
568  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
569  * The helpers' return value is the rate of the clock that is fed to the
570  * display engine's pipe which can be the above fast dot clock rate or a
571  * divided-down version of it.
572  */
573 /* m1 is reserved as 0 in Pineview, n is a ring counter */
574 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
575 {
576         clock->m = clock->m2 + 2;
577         clock->p = clock->p1 * clock->p2;
578         if (WARN_ON(clock->n == 0 || clock->p == 0))
579                 return 0;
580         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
581         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
582
583         return clock->dot;
584 }
585
586 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
587 {
588         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
589 }
590
591 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
592 {
593         clock->m = i9xx_dpll_compute_m(clock);
594         clock->p = clock->p1 * clock->p2;
595         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
596                 return 0;
597         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
598         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
599
600         return clock->dot;
601 }
602
603 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
604 {
605         clock->m = clock->m1 * clock->m2;
606         clock->p = clock->p1 * clock->p2;
607         if (WARN_ON(clock->n == 0 || clock->p == 0))
608                 return 0;
609         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
610         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
611
612         return clock->dot / 5;
613 }
614
615 int chv_calc_dpll_params(int refclk, struct dpll *clock)
616 {
617         clock->m = clock->m1 * clock->m2;
618         clock->p = clock->p1 * clock->p2;
619         if (WARN_ON(clock->n == 0 || clock->p == 0))
620                 return 0;
621         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
622                                            clock->n << 22);
623         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
624
625         return clock->dot / 5;
626 }
627
628 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
629
630 /*
631  * Returns whether the given set of divisors are valid for a given refclk with
632  * the given connectors.
633  */
634 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
635                                const struct intel_limit *limit,
636                                const struct dpll *clock)
637 {
638         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
639                 INTELPllInvalid("n out of range\n");
640         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
641                 INTELPllInvalid("p1 out of range\n");
642         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
643                 INTELPllInvalid("m2 out of range\n");
644         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
645                 INTELPllInvalid("m1 out of range\n");
646
647         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
648             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
649                 if (clock->m1 <= clock->m2)
650                         INTELPllInvalid("m1 <= m2\n");
651
652         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
653             !IS_GEN9_LP(dev_priv)) {
654                 if (clock->p < limit->p.min || limit->p.max < clock->p)
655                         INTELPllInvalid("p out of range\n");
656                 if (clock->m < limit->m.min || limit->m.max < clock->m)
657                         INTELPllInvalid("m out of range\n");
658         }
659
660         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
661                 INTELPllInvalid("vco out of range\n");
662         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
663          * connector, etc., rather than just a single range.
664          */
665         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
666                 INTELPllInvalid("dot out of range\n");
667
668         return true;
669 }
670
671 static int
672 i9xx_select_p2_div(const struct intel_limit *limit,
673                    const struct intel_crtc_state *crtc_state,
674                    int target)
675 {
676         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
677
678         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
679                 /*
680                  * For LVDS just rely on its current settings for dual-channel.
681                  * We haven't figured out how to reliably set up different
682                  * single/dual channel state, if we even can.
683                  */
684                 if (intel_is_dual_link_lvds(dev_priv))
685                         return limit->p2.p2_fast;
686                 else
687                         return limit->p2.p2_slow;
688         } else {
689                 if (target < limit->p2.dot_limit)
690                         return limit->p2.p2_slow;
691                 else
692                         return limit->p2.p2_fast;
693         }
694 }
695
696 /*
697  * Returns a set of divisors for the desired target clock with the given
698  * refclk, or FALSE.  The returned values represent the clock equation:
699  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
700  *
701  * Target and reference clocks are specified in kHz.
702  *
703  * If match_clock is provided, then best_clock P divider must match the P
704  * divider from @match_clock used for LVDS downclocking.
705  */
706 static bool
707 i9xx_find_best_dpll(const struct intel_limit *limit,
708                     struct intel_crtc_state *crtc_state,
709                     int target, int refclk, struct dpll *match_clock,
710                     struct dpll *best_clock)
711 {
712         struct drm_device *dev = crtc_state->uapi.crtc->dev;
713         struct dpll clock;
714         int err = target;
715
716         memset(best_clock, 0, sizeof(*best_clock));
717
718         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
719
720         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
721              clock.m1++) {
722                 for (clock.m2 = limit->m2.min;
723                      clock.m2 <= limit->m2.max; clock.m2++) {
724                         if (clock.m2 >= clock.m1)
725                                 break;
726                         for (clock.n = limit->n.min;
727                              clock.n <= limit->n.max; clock.n++) {
728                                 for (clock.p1 = limit->p1.min;
729                                         clock.p1 <= limit->p1.max; clock.p1++) {
730                                         int this_err;
731
732                                         i9xx_calc_dpll_params(refclk, &clock);
733                                         if (!intel_PLL_is_valid(to_i915(dev),
734                                                                 limit,
735                                                                 &clock))
736                                                 continue;
737                                         if (match_clock &&
738                                             clock.p != match_clock->p)
739                                                 continue;
740
741                                         this_err = abs(clock.dot - target);
742                                         if (this_err < err) {
743                                                 *best_clock = clock;
744                                                 err = this_err;
745                                         }
746                                 }
747                         }
748                 }
749         }
750
751         return (err != target);
752 }
753
754 /*
755  * Returns a set of divisors for the desired target clock with the given
756  * refclk, or FALSE.  The returned values represent the clock equation:
757  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
758  *
759  * Target and reference clocks are specified in kHz.
760  *
761  * If match_clock is provided, then best_clock P divider must match the P
762  * divider from @match_clock used for LVDS downclocking.
763  */
764 static bool
765 pnv_find_best_dpll(const struct intel_limit *limit,
766                    struct intel_crtc_state *crtc_state,
767                    int target, int refclk, struct dpll *match_clock,
768                    struct dpll *best_clock)
769 {
770         struct drm_device *dev = crtc_state->uapi.crtc->dev;
771         struct dpll clock;
772         int err = target;
773
774         memset(best_clock, 0, sizeof(*best_clock));
775
776         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
777
778         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
779              clock.m1++) {
780                 for (clock.m2 = limit->m2.min;
781                      clock.m2 <= limit->m2.max; clock.m2++) {
782                         for (clock.n = limit->n.min;
783                              clock.n <= limit->n.max; clock.n++) {
784                                 for (clock.p1 = limit->p1.min;
785                                         clock.p1 <= limit->p1.max; clock.p1++) {
786                                         int this_err;
787
788                                         pnv_calc_dpll_params(refclk, &clock);
789                                         if (!intel_PLL_is_valid(to_i915(dev),
790                                                                 limit,
791                                                                 &clock))
792                                                 continue;
793                                         if (match_clock &&
794                                             clock.p != match_clock->p)
795                                                 continue;
796
797                                         this_err = abs(clock.dot - target);
798                                         if (this_err < err) {
799                                                 *best_clock = clock;
800                                                 err = this_err;
801                                         }
802                                 }
803                         }
804                 }
805         }
806
807         return (err != target);
808 }
809
810 /*
811  * Returns a set of divisors for the desired target clock with the given
812  * refclk, or FALSE.  The returned values represent the clock equation:
813  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
814  *
815  * Target and reference clocks are specified in kHz.
816  *
817  * If match_clock is provided, then best_clock P divider must match the P
818  * divider from @match_clock used for LVDS downclocking.
819  */
820 static bool
821 g4x_find_best_dpll(const struct intel_limit *limit,
822                    struct intel_crtc_state *crtc_state,
823                    int target, int refclk, struct dpll *match_clock,
824                    struct dpll *best_clock)
825 {
826         struct drm_device *dev = crtc_state->uapi.crtc->dev;
827         struct dpll clock;
828         int max_n;
829         bool found = false;
830         /* approximately equals target * 0.00585 */
831         int err_most = (target >> 8) + (target >> 9);
832
833         memset(best_clock, 0, sizeof(*best_clock));
834
835         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
836
837         max_n = limit->n.max;
838         /* based on hardware requirement, prefer smaller n to precision */
839         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
840                 /* based on hardware requirement, prefere larger m1,m2 */
841                 for (clock.m1 = limit->m1.max;
842                      clock.m1 >= limit->m1.min; clock.m1--) {
843                         for (clock.m2 = limit->m2.max;
844                              clock.m2 >= limit->m2.min; clock.m2--) {
845                                 for (clock.p1 = limit->p1.max;
846                                      clock.p1 >= limit->p1.min; clock.p1--) {
847                                         int this_err;
848
849                                         i9xx_calc_dpll_params(refclk, &clock);
850                                         if (!intel_PLL_is_valid(to_i915(dev),
851                                                                 limit,
852                                                                 &clock))
853                                                 continue;
854
855                                         this_err = abs(clock.dot - target);
856                                         if (this_err < err_most) {
857                                                 *best_clock = clock;
858                                                 err_most = this_err;
859                                                 max_n = clock.n;
860                                                 found = true;
861                                         }
862                                 }
863                         }
864                 }
865         }
866         return found;
867 }
868
869 /*
870  * Check if the calculated PLL configuration is more optimal compared to the
871  * best configuration and error found so far. Return the calculated error.
872  */
873 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
874                                const struct dpll *calculated_clock,
875                                const struct dpll *best_clock,
876                                unsigned int best_error_ppm,
877                                unsigned int *error_ppm)
878 {
879         /*
880          * For CHV ignore the error and consider only the P value.
881          * Prefer a bigger P value based on HW requirements.
882          */
883         if (IS_CHERRYVIEW(to_i915(dev))) {
884                 *error_ppm = 0;
885
886                 return calculated_clock->p > best_clock->p;
887         }
888
889         if (WARN_ON_ONCE(!target_freq))
890                 return false;
891
892         *error_ppm = div_u64(1000000ULL *
893                                 abs(target_freq - calculated_clock->dot),
894                              target_freq);
895         /*
896          * Prefer a better P value over a better (smaller) error if the error
897          * is small. Ensure this preference for future configurations too by
898          * setting the error to 0.
899          */
900         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
901                 *error_ppm = 0;
902
903                 return true;
904         }
905
906         return *error_ppm + 10 < best_error_ppm;
907 }
908
909 /*
910  * Returns a set of divisors for the desired target clock with the given
911  * refclk, or FALSE.  The returned values represent the clock equation:
912  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
913  */
914 static bool
915 vlv_find_best_dpll(const struct intel_limit *limit,
916                    struct intel_crtc_state *crtc_state,
917                    int target, int refclk, struct dpll *match_clock,
918                    struct dpll *best_clock)
919 {
920         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
921         struct drm_device *dev = crtc->base.dev;
922         struct dpll clock;
923         unsigned int bestppm = 1000000;
924         /* min update 19.2 MHz */
925         int max_n = min(limit->n.max, refclk / 19200);
926         bool found = false;
927
928         target *= 5; /* fast clock */
929
930         memset(best_clock, 0, sizeof(*best_clock));
931
932         /* based on hardware requirement, prefer smaller n to precision */
933         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
934                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
935                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
936                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
937                                 clock.p = clock.p1 * clock.p2;
938                                 /* based on hardware requirement, prefer bigger m1,m2 values */
939                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
940                                         unsigned int ppm;
941
942                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
943                                                                      refclk * clock.m1);
944
945                                         vlv_calc_dpll_params(refclk, &clock);
946
947                                         if (!intel_PLL_is_valid(to_i915(dev),
948                                                                 limit,
949                                                                 &clock))
950                                                 continue;
951
952                                         if (!vlv_PLL_is_optimal(dev, target,
953                                                                 &clock,
954                                                                 best_clock,
955                                                                 bestppm, &ppm))
956                                                 continue;
957
958                                         *best_clock = clock;
959                                         bestppm = ppm;
960                                         found = true;
961                                 }
962                         }
963                 }
964         }
965
966         return found;
967 }
968
969 /*
970  * Returns a set of divisors for the desired target clock with the given
971  * refclk, or FALSE.  The returned values represent the clock equation:
972  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
973  */
974 static bool
975 chv_find_best_dpll(const struct intel_limit *limit,
976                    struct intel_crtc_state *crtc_state,
977                    int target, int refclk, struct dpll *match_clock,
978                    struct dpll *best_clock)
979 {
980         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
981         struct drm_device *dev = crtc->base.dev;
982         unsigned int best_error_ppm;
983         struct dpll clock;
984         u64 m2;
985         int found = false;
986
987         memset(best_clock, 0, sizeof(*best_clock));
988         best_error_ppm = 1000000;
989
990         /*
991          * Based on hardware doc, the n always set to 1, and m1 always
992          * set to 2.  If requires to support 200Mhz refclk, we need to
993          * revisit this because n may not 1 anymore.
994          */
995         clock.n = 1, clock.m1 = 2;
996         target *= 5;    /* fast clock */
997
998         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
999                 for (clock.p2 = limit->p2.p2_fast;
1000                                 clock.p2 >= limit->p2.p2_slow;
1001                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1002                         unsigned int error_ppm;
1003
1004                         clock.p = clock.p1 * clock.p2;
1005
1006                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1007                                                    refclk * clock.m1);
1008
1009                         if (m2 > INT_MAX/clock.m1)
1010                                 continue;
1011
1012                         clock.m2 = m2;
1013
1014                         chv_calc_dpll_params(refclk, &clock);
1015
1016                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1017                                 continue;
1018
1019                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1020                                                 best_error_ppm, &error_ppm))
1021                                 continue;
1022
1023                         *best_clock = clock;
1024                         best_error_ppm = error_ppm;
1025                         found = true;
1026                 }
1027         }
1028
1029         return found;
1030 }
1031
1032 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1033                         struct dpll *best_clock)
1034 {
1035         int refclk = 100000;
1036         const struct intel_limit *limit = &intel_limits_bxt;
1037
1038         return chv_find_best_dpll(limit, crtc_state,
1039                                   crtc_state->port_clock, refclk,
1040                                   NULL, best_clock);
1041 }
1042
1043 bool intel_crtc_active(struct intel_crtc *crtc)
1044 {
1045         /* Be paranoid as we can arrive here with only partial
1046          * state retrieved from the hardware during setup.
1047          *
1048          * We can ditch the adjusted_mode.crtc_clock check as soon
1049          * as Haswell has gained clock readout/fastboot support.
1050          *
1051          * We can ditch the crtc->primary->state->fb check as soon as we can
1052          * properly reconstruct framebuffers.
1053          *
1054          * FIXME: The intel_crtc->active here should be switched to
1055          * crtc->state->active once we have proper CRTC states wired up
1056          * for atomic.
1057          */
1058         return crtc->active && crtc->base.primary->state->fb &&
1059                 crtc->config->hw.adjusted_mode.crtc_clock;
1060 }
1061
1062 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1063                                              enum pipe pipe)
1064 {
1065         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1066
1067         return crtc->config->cpu_transcoder;
1068 }
1069
1070 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1071                                     enum pipe pipe)
1072 {
1073         i915_reg_t reg = PIPEDSL(pipe);
1074         u32 line1, line2;
1075         u32 line_mask;
1076
1077         if (IS_GEN(dev_priv, 2))
1078                 line_mask = DSL_LINEMASK_GEN2;
1079         else
1080                 line_mask = DSL_LINEMASK_GEN3;
1081
1082         line1 = I915_READ(reg) & line_mask;
1083         msleep(5);
1084         line2 = I915_READ(reg) & line_mask;
1085
1086         return line1 != line2;
1087 }
1088
1089 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1090 {
1091         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1092         enum pipe pipe = crtc->pipe;
1093
1094         /* Wait for the display line to settle/start moving */
1095         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1096                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1097                           pipe_name(pipe), onoff(state));
1098 }
1099
1100 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1101 {
1102         wait_for_pipe_scanline_moving(crtc, false);
1103 }
1104
1105 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1106 {
1107         wait_for_pipe_scanline_moving(crtc, true);
1108 }
1109
1110 static void
1111 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1112 {
1113         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1114         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1115
1116         if (INTEL_GEN(dev_priv) >= 4) {
1117                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1118                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1119
1120                 /* Wait for the Pipe State to go off */
1121                 if (intel_de_wait_for_clear(dev_priv, reg,
1122                                             I965_PIPECONF_ACTIVE, 100))
1123                         WARN(1, "pipe_off wait timed out\n");
1124         } else {
1125                 intel_wait_for_pipe_scanline_stopped(crtc);
1126         }
1127 }
1128
1129 /* Only for pre-ILK configs */
1130 void assert_pll(struct drm_i915_private *dev_priv,
1131                 enum pipe pipe, bool state)
1132 {
1133         u32 val;
1134         bool cur_state;
1135
1136         val = I915_READ(DPLL(pipe));
1137         cur_state = !!(val & DPLL_VCO_ENABLE);
1138         I915_STATE_WARN(cur_state != state,
1139              "PLL state assertion failure (expected %s, current %s)\n",
1140                         onoff(state), onoff(cur_state));
1141 }
1142
1143 /* XXX: the dsi pll is shared between MIPI DSI ports */
1144 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1145 {
1146         u32 val;
1147         bool cur_state;
1148
1149         vlv_cck_get(dev_priv);
1150         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1151         vlv_cck_put(dev_priv);
1152
1153         cur_state = val & DSI_PLL_VCO_EN;
1154         I915_STATE_WARN(cur_state != state,
1155              "DSI PLL state assertion failure (expected %s, current %s)\n",
1156                         onoff(state), onoff(cur_state));
1157 }
1158
1159 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1160                           enum pipe pipe, bool state)
1161 {
1162         bool cur_state;
1163         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1164                                                                       pipe);
1165
1166         if (HAS_DDI(dev_priv)) {
1167                 /* DDI does not have a specific FDI_TX register */
1168                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1169                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1170         } else {
1171                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1172                 cur_state = !!(val & FDI_TX_ENABLE);
1173         }
1174         I915_STATE_WARN(cur_state != state,
1175              "FDI TX state assertion failure (expected %s, current %s)\n",
1176                         onoff(state), onoff(cur_state));
1177 }
1178 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1179 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1180
1181 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1182                           enum pipe pipe, bool state)
1183 {
1184         u32 val;
1185         bool cur_state;
1186
1187         val = I915_READ(FDI_RX_CTL(pipe));
1188         cur_state = !!(val & FDI_RX_ENABLE);
1189         I915_STATE_WARN(cur_state != state,
1190              "FDI RX state assertion failure (expected %s, current %s)\n",
1191                         onoff(state), onoff(cur_state));
1192 }
1193 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1194 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1195
1196 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1197                                       enum pipe pipe)
1198 {
1199         u32 val;
1200
1201         /* ILK FDI PLL is always enabled */
1202         if (IS_GEN(dev_priv, 5))
1203                 return;
1204
1205         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1206         if (HAS_DDI(dev_priv))
1207                 return;
1208
1209         val = I915_READ(FDI_TX_CTL(pipe));
1210         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1211 }
1212
1213 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1214                        enum pipe pipe, bool state)
1215 {
1216         u32 val;
1217         bool cur_state;
1218
1219         val = I915_READ(FDI_RX_CTL(pipe));
1220         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1221         I915_STATE_WARN(cur_state != state,
1222              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1223                         onoff(state), onoff(cur_state));
1224 }
1225
1226 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1227 {
1228         i915_reg_t pp_reg;
1229         u32 val;
1230         enum pipe panel_pipe = INVALID_PIPE;
1231         bool locked = true;
1232
1233         if (WARN_ON(HAS_DDI(dev_priv)))
1234                 return;
1235
1236         if (HAS_PCH_SPLIT(dev_priv)) {
1237                 u32 port_sel;
1238
1239                 pp_reg = PP_CONTROL(0);
1240                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1241
1242                 switch (port_sel) {
1243                 case PANEL_PORT_SELECT_LVDS:
1244                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1245                         break;
1246                 case PANEL_PORT_SELECT_DPA:
1247                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1248                         break;
1249                 case PANEL_PORT_SELECT_DPC:
1250                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1251                         break;
1252                 case PANEL_PORT_SELECT_DPD:
1253                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1254                         break;
1255                 default:
1256                         MISSING_CASE(port_sel);
1257                         break;
1258                 }
1259         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1260                 /* presumably write lock depends on pipe, not port select */
1261                 pp_reg = PP_CONTROL(pipe);
1262                 panel_pipe = pipe;
1263         } else {
1264                 u32 port_sel;
1265
1266                 pp_reg = PP_CONTROL(0);
1267                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1268
1269                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1270                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1271         }
1272
1273         val = I915_READ(pp_reg);
1274         if (!(val & PANEL_POWER_ON) ||
1275             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1276                 locked = false;
1277
1278         I915_STATE_WARN(panel_pipe == pipe && locked,
1279              "panel assertion failure, pipe %c regs locked\n",
1280              pipe_name(pipe));
1281 }
1282
1283 void assert_pipe(struct drm_i915_private *dev_priv,
1284                  enum pipe pipe, bool state)
1285 {
1286         bool cur_state;
1287         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1288                                                                       pipe);
1289         enum intel_display_power_domain power_domain;
1290         intel_wakeref_t wakeref;
1291
1292         /* we keep both pipes enabled on 830 */
1293         if (IS_I830(dev_priv))
1294                 state = true;
1295
1296         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1297         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1298         if (wakeref) {
1299                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1300                 cur_state = !!(val & PIPECONF_ENABLE);
1301
1302                 intel_display_power_put(dev_priv, power_domain, wakeref);
1303         } else {
1304                 cur_state = false;
1305         }
1306
1307         I915_STATE_WARN(cur_state != state,
1308              "pipe %c assertion failure (expected %s, current %s)\n",
1309                         pipe_name(pipe), onoff(state), onoff(cur_state));
1310 }
1311
1312 static void assert_plane(struct intel_plane *plane, bool state)
1313 {
1314         enum pipe pipe;
1315         bool cur_state;
1316
1317         cur_state = plane->get_hw_state(plane, &pipe);
1318
1319         I915_STATE_WARN(cur_state != state,
1320                         "%s assertion failure (expected %s, current %s)\n",
1321                         plane->base.name, onoff(state), onoff(cur_state));
1322 }
1323
1324 #define assert_plane_enabled(p) assert_plane(p, true)
1325 #define assert_plane_disabled(p) assert_plane(p, false)
1326
1327 static void assert_planes_disabled(struct intel_crtc *crtc)
1328 {
1329         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1330         struct intel_plane *plane;
1331
1332         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1333                 assert_plane_disabled(plane);
1334 }
1335
1336 static void assert_vblank_disabled(struct drm_crtc *crtc)
1337 {
1338         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1339                 drm_crtc_vblank_put(crtc);
1340 }
1341
1342 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1343                                     enum pipe pipe)
1344 {
1345         u32 val;
1346         bool enabled;
1347
1348         val = I915_READ(PCH_TRANSCONF(pipe));
1349         enabled = !!(val & TRANS_ENABLE);
1350         I915_STATE_WARN(enabled,
1351              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1352              pipe_name(pipe));
1353 }
1354
1355 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1356                                    enum pipe pipe, enum port port,
1357                                    i915_reg_t dp_reg)
1358 {
1359         enum pipe port_pipe;
1360         bool state;
1361
1362         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1363
1364         I915_STATE_WARN(state && port_pipe == pipe,
1365                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1366                         port_name(port), pipe_name(pipe));
1367
1368         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1369                         "IBX PCH DP %c still using transcoder B\n",
1370                         port_name(port));
1371 }
1372
1373 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1374                                      enum pipe pipe, enum port port,
1375                                      i915_reg_t hdmi_reg)
1376 {
1377         enum pipe port_pipe;
1378         bool state;
1379
1380         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1381
1382         I915_STATE_WARN(state && port_pipe == pipe,
1383                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1384                         port_name(port), pipe_name(pipe));
1385
1386         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1387                         "IBX PCH HDMI %c still using transcoder B\n",
1388                         port_name(port));
1389 }
1390
1391 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1392                                       enum pipe pipe)
1393 {
1394         enum pipe port_pipe;
1395
1396         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1397         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1398         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1399
1400         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1401                         port_pipe == pipe,
1402                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1403                         pipe_name(pipe));
1404
1405         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1406                         port_pipe == pipe,
1407                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1408                         pipe_name(pipe));
1409
1410         /* PCH SDVOB multiplex with HDMIB */
1411         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1412         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1413         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1414 }
1415
1416 static void _vlv_enable_pll(struct intel_crtc *crtc,
1417                             const struct intel_crtc_state *pipe_config)
1418 {
1419         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1420         enum pipe pipe = crtc->pipe;
1421
1422         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1423         POSTING_READ(DPLL(pipe));
1424         udelay(150);
1425
1426         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1427                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1428 }
1429
1430 static void vlv_enable_pll(struct intel_crtc *crtc,
1431                            const struct intel_crtc_state *pipe_config)
1432 {
1433         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1434         enum pipe pipe = crtc->pipe;
1435
1436         assert_pipe_disabled(dev_priv, pipe);
1437
1438         /* PLL is protected by panel, make sure we can write it */
1439         assert_panel_unlocked(dev_priv, pipe);
1440
1441         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1442                 _vlv_enable_pll(crtc, pipe_config);
1443
1444         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1445         POSTING_READ(DPLL_MD(pipe));
1446 }
1447
1448
1449 static void _chv_enable_pll(struct intel_crtc *crtc,
1450                             const struct intel_crtc_state *pipe_config)
1451 {
1452         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1453         enum pipe pipe = crtc->pipe;
1454         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1455         u32 tmp;
1456
1457         vlv_dpio_get(dev_priv);
1458
1459         /* Enable back the 10bit clock to display controller */
1460         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1461         tmp |= DPIO_DCLKP_EN;
1462         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1463
1464         vlv_dpio_put(dev_priv);
1465
1466         /*
1467          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1468          */
1469         udelay(1);
1470
1471         /* Enable PLL */
1472         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1473
1474         /* Check PLL is locked */
1475         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1476                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1477 }
1478
1479 static void chv_enable_pll(struct intel_crtc *crtc,
1480                            const struct intel_crtc_state *pipe_config)
1481 {
1482         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1483         enum pipe pipe = crtc->pipe;
1484
1485         assert_pipe_disabled(dev_priv, pipe);
1486
1487         /* PLL is protected by panel, make sure we can write it */
1488         assert_panel_unlocked(dev_priv, pipe);
1489
1490         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1491                 _chv_enable_pll(crtc, pipe_config);
1492
1493         if (pipe != PIPE_A) {
1494                 /*
1495                  * WaPixelRepeatModeFixForC0:chv
1496                  *
1497                  * DPLLCMD is AWOL. Use chicken bits to propagate
1498                  * the value from DPLLBMD to either pipe B or C.
1499                  */
1500                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1501                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1502                 I915_WRITE(CBR4_VLV, 0);
1503                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1504
1505                 /*
1506                  * DPLLB VGA mode also seems to cause problems.
1507                  * We should always have it disabled.
1508                  */
1509                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1510         } else {
1511                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1512                 POSTING_READ(DPLL_MD(pipe));
1513         }
1514 }
1515
1516 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1517 {
1518         if (IS_I830(dev_priv))
1519                 return false;
1520
1521         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1522 }
1523
1524 static void i9xx_enable_pll(struct intel_crtc *crtc,
1525                             const struct intel_crtc_state *crtc_state)
1526 {
1527         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1528         i915_reg_t reg = DPLL(crtc->pipe);
1529         u32 dpll = crtc_state->dpll_hw_state.dpll;
1530         int i;
1531
1532         assert_pipe_disabled(dev_priv, crtc->pipe);
1533
1534         /* PLL is protected by panel, make sure we can write it */
1535         if (i9xx_has_pps(dev_priv))
1536                 assert_panel_unlocked(dev_priv, crtc->pipe);
1537
1538         /*
1539          * Apparently we need to have VGA mode enabled prior to changing
1540          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1541          * dividers, even though the register value does change.
1542          */
1543         I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1544         I915_WRITE(reg, dpll);
1545
1546         /* Wait for the clocks to stabilize. */
1547         POSTING_READ(reg);
1548         udelay(150);
1549
1550         if (INTEL_GEN(dev_priv) >= 4) {
1551                 I915_WRITE(DPLL_MD(crtc->pipe),
1552                            crtc_state->dpll_hw_state.dpll_md);
1553         } else {
1554                 /* The pixel multiplier can only be updated once the
1555                  * DPLL is enabled and the clocks are stable.
1556                  *
1557                  * So write it again.
1558                  */
1559                 I915_WRITE(reg, dpll);
1560         }
1561
1562         /* We do this three times for luck */
1563         for (i = 0; i < 3; i++) {
1564                 I915_WRITE(reg, dpll);
1565                 POSTING_READ(reg);
1566                 udelay(150); /* wait for warmup */
1567         }
1568 }
1569
1570 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1571 {
1572         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1573         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1574         enum pipe pipe = crtc->pipe;
1575
1576         /* Don't disable pipe or pipe PLLs if needed */
1577         if (IS_I830(dev_priv))
1578                 return;
1579
1580         /* Make sure the pipe isn't still relying on us */
1581         assert_pipe_disabled(dev_priv, pipe);
1582
1583         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1584         POSTING_READ(DPLL(pipe));
1585 }
1586
1587 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1588 {
1589         u32 val;
1590
1591         /* Make sure the pipe isn't still relying on us */
1592         assert_pipe_disabled(dev_priv, pipe);
1593
1594         val = DPLL_INTEGRATED_REF_CLK_VLV |
1595                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1596         if (pipe != PIPE_A)
1597                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1598
1599         I915_WRITE(DPLL(pipe), val);
1600         POSTING_READ(DPLL(pipe));
1601 }
1602
1603 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1604 {
1605         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1606         u32 val;
1607
1608         /* Make sure the pipe isn't still relying on us */
1609         assert_pipe_disabled(dev_priv, pipe);
1610
1611         val = DPLL_SSC_REF_CLK_CHV |
1612                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1613         if (pipe != PIPE_A)
1614                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1615
1616         I915_WRITE(DPLL(pipe), val);
1617         POSTING_READ(DPLL(pipe));
1618
1619         vlv_dpio_get(dev_priv);
1620
1621         /* Disable 10bit clock to display controller */
1622         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1623         val &= ~DPIO_DCLKP_EN;
1624         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1625
1626         vlv_dpio_put(dev_priv);
1627 }
1628
1629 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1630                          struct intel_digital_port *dport,
1631                          unsigned int expected_mask)
1632 {
1633         u32 port_mask;
1634         i915_reg_t dpll_reg;
1635
1636         switch (dport->base.port) {
1637         case PORT_B:
1638                 port_mask = DPLL_PORTB_READY_MASK;
1639                 dpll_reg = DPLL(0);
1640                 break;
1641         case PORT_C:
1642                 port_mask = DPLL_PORTC_READY_MASK;
1643                 dpll_reg = DPLL(0);
1644                 expected_mask <<= 4;
1645                 break;
1646         case PORT_D:
1647                 port_mask = DPLL_PORTD_READY_MASK;
1648                 dpll_reg = DPIO_PHY_STATUS;
1649                 break;
1650         default:
1651                 BUG();
1652         }
1653
1654         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1655                                        port_mask, expected_mask, 1000))
1656                 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1657                      dport->base.base.base.id, dport->base.base.name,
1658                      I915_READ(dpll_reg) & port_mask, expected_mask);
1659 }
1660
1661 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1662 {
1663         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1664         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1665         enum pipe pipe = crtc->pipe;
1666         i915_reg_t reg;
1667         u32 val, pipeconf_val;
1668
1669         /* Make sure PCH DPLL is enabled */
1670         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1671
1672         /* FDI must be feeding us bits for PCH ports */
1673         assert_fdi_tx_enabled(dev_priv, pipe);
1674         assert_fdi_rx_enabled(dev_priv, pipe);
1675
1676         if (HAS_PCH_CPT(dev_priv)) {
1677                 /* Workaround: Set the timing override bit before enabling the
1678                  * pch transcoder. */
1679                 reg = TRANS_CHICKEN2(pipe);
1680                 val = I915_READ(reg);
1681                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1682                 I915_WRITE(reg, val);
1683         }
1684
1685         reg = PCH_TRANSCONF(pipe);
1686         val = I915_READ(reg);
1687         pipeconf_val = I915_READ(PIPECONF(pipe));
1688
1689         if (HAS_PCH_IBX(dev_priv)) {
1690                 /*
1691                  * Make the BPC in transcoder be consistent with
1692                  * that in pipeconf reg. For HDMI we must use 8bpc
1693                  * here for both 8bpc and 12bpc.
1694                  */
1695                 val &= ~PIPECONF_BPC_MASK;
1696                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1697                         val |= PIPECONF_8BPC;
1698                 else
1699                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1700         }
1701
1702         val &= ~TRANS_INTERLACE_MASK;
1703         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1704                 if (HAS_PCH_IBX(dev_priv) &&
1705                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1706                         val |= TRANS_LEGACY_INTERLACED_ILK;
1707                 else
1708                         val |= TRANS_INTERLACED;
1709         } else {
1710                 val |= TRANS_PROGRESSIVE;
1711         }
1712
1713         I915_WRITE(reg, val | TRANS_ENABLE);
1714         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1715                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1716 }
1717
1718 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1719                                       enum transcoder cpu_transcoder)
1720 {
1721         u32 val, pipeconf_val;
1722
1723         /* FDI must be feeding us bits for PCH ports */
1724         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1725         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1726
1727         /* Workaround: set timing override bit. */
1728         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1729         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1730         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1731
1732         val = TRANS_ENABLE;
1733         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1734
1735         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1736             PIPECONF_INTERLACED_ILK)
1737                 val |= TRANS_INTERLACED;
1738         else
1739                 val |= TRANS_PROGRESSIVE;
1740
1741         I915_WRITE(LPT_TRANSCONF, val);
1742         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1743                                   TRANS_STATE_ENABLE, 100))
1744                 DRM_ERROR("Failed to enable PCH transcoder\n");
1745 }
1746
1747 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1748                                             enum pipe pipe)
1749 {
1750         i915_reg_t reg;
1751         u32 val;
1752
1753         /* FDI relies on the transcoder */
1754         assert_fdi_tx_disabled(dev_priv, pipe);
1755         assert_fdi_rx_disabled(dev_priv, pipe);
1756
1757         /* Ports must be off as well */
1758         assert_pch_ports_disabled(dev_priv, pipe);
1759
1760         reg = PCH_TRANSCONF(pipe);
1761         val = I915_READ(reg);
1762         val &= ~TRANS_ENABLE;
1763         I915_WRITE(reg, val);
1764         /* wait for PCH transcoder off, transcoder state */
1765         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1766                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1767
1768         if (HAS_PCH_CPT(dev_priv)) {
1769                 /* Workaround: Clear the timing override chicken bit again. */
1770                 reg = TRANS_CHICKEN2(pipe);
1771                 val = I915_READ(reg);
1772                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1773                 I915_WRITE(reg, val);
1774         }
1775 }
1776
1777 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1778 {
1779         u32 val;
1780
1781         val = I915_READ(LPT_TRANSCONF);
1782         val &= ~TRANS_ENABLE;
1783         I915_WRITE(LPT_TRANSCONF, val);
1784         /* wait for PCH transcoder off, transcoder state */
1785         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1786                                     TRANS_STATE_ENABLE, 50))
1787                 DRM_ERROR("Failed to disable PCH transcoder\n");
1788
1789         /* Workaround: clear timing override bit. */
1790         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1791         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1792         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1793 }
1794
1795 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1796 {
1797         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1798
1799         if (HAS_PCH_LPT(dev_priv))
1800                 return PIPE_A;
1801         else
1802                 return crtc->pipe;
1803 }
1804
1805 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1806 {
1807         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1808
1809         /*
1810          * On i965gm the hardware frame counter reads
1811          * zero when the TV encoder is enabled :(
1812          */
1813         if (IS_I965GM(dev_priv) &&
1814             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1815                 return 0;
1816
1817         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1818                 return 0xffffffff; /* full 32 bit counter */
1819         else if (INTEL_GEN(dev_priv) >= 3)
1820                 return 0xffffff; /* only 24 bits of frame count */
1821         else
1822                 return 0; /* Gen2 doesn't have a hardware frame counter */
1823 }
1824
1825 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1826 {
1827         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1828
1829         drm_crtc_set_max_vblank_count(&crtc->base,
1830                                       intel_crtc_max_vblank_count(crtc_state));
1831         drm_crtc_vblank_on(&crtc->base);
1832 }
1833
1834 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1835 {
1836         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1837         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1838         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1839         enum pipe pipe = crtc->pipe;
1840         i915_reg_t reg;
1841         u32 val;
1842
1843         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1844
1845         assert_planes_disabled(crtc);
1846
1847         /*
1848          * A pipe without a PLL won't actually be able to drive bits from
1849          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1850          * need the check.
1851          */
1852         if (HAS_GMCH(dev_priv)) {
1853                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1854                         assert_dsi_pll_enabled(dev_priv);
1855                 else
1856                         assert_pll_enabled(dev_priv, pipe);
1857         } else {
1858                 if (new_crtc_state->has_pch_encoder) {
1859                         /* if driving the PCH, we need FDI enabled */
1860                         assert_fdi_rx_pll_enabled(dev_priv,
1861                                                   intel_crtc_pch_transcoder(crtc));
1862                         assert_fdi_tx_pll_enabled(dev_priv,
1863                                                   (enum pipe) cpu_transcoder);
1864                 }
1865                 /* FIXME: assert CPU port conditions for SNB+ */
1866         }
1867
1868         trace_intel_pipe_enable(crtc);
1869
1870         reg = PIPECONF(cpu_transcoder);
1871         val = I915_READ(reg);
1872         if (val & PIPECONF_ENABLE) {
1873                 /* we keep both pipes enabled on 830 */
1874                 WARN_ON(!IS_I830(dev_priv));
1875                 return;
1876         }
1877
1878         I915_WRITE(reg, val | PIPECONF_ENABLE);
1879         POSTING_READ(reg);
1880
1881         /*
1882          * Until the pipe starts PIPEDSL reads will return a stale value,
1883          * which causes an apparent vblank timestamp jump when PIPEDSL
1884          * resets to its proper value. That also messes up the frame count
1885          * when it's derived from the timestamps. So let's wait for the
1886          * pipe to start properly before we call drm_crtc_vblank_on()
1887          */
1888         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1889                 intel_wait_for_pipe_scanline_moving(crtc);
1890 }
1891
1892 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1893 {
1894         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1895         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1896         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1897         enum pipe pipe = crtc->pipe;
1898         i915_reg_t reg;
1899         u32 val;
1900
1901         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1902
1903         /*
1904          * Make sure planes won't keep trying to pump pixels to us,
1905          * or we might hang the display.
1906          */
1907         assert_planes_disabled(crtc);
1908
1909         trace_intel_pipe_disable(crtc);
1910
1911         reg = PIPECONF(cpu_transcoder);
1912         val = I915_READ(reg);
1913         if ((val & PIPECONF_ENABLE) == 0)
1914                 return;
1915
1916         /*
1917          * Double wide has implications for planes
1918          * so best keep it disabled when not needed.
1919          */
1920         if (old_crtc_state->double_wide)
1921                 val &= ~PIPECONF_DOUBLE_WIDE;
1922
1923         /* Don't disable pipe or pipe PLLs if needed */
1924         if (!IS_I830(dev_priv))
1925                 val &= ~PIPECONF_ENABLE;
1926
1927         I915_WRITE(reg, val);
1928         if ((val & PIPECONF_ENABLE) == 0)
1929                 intel_wait_for_pipe_off(old_crtc_state);
1930 }
1931
1932 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1933 {
1934         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1935 }
1936
1937 static unsigned int
1938 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1939 {
1940         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1941         unsigned int cpp = fb->format->cpp[color_plane];
1942
1943         switch (fb->modifier) {
1944         case DRM_FORMAT_MOD_LINEAR:
1945                 return intel_tile_size(dev_priv);
1946         case I915_FORMAT_MOD_X_TILED:
1947                 if (IS_GEN(dev_priv, 2))
1948                         return 128;
1949                 else
1950                         return 512;
1951         case I915_FORMAT_MOD_Y_TILED_CCS:
1952                 if (color_plane == 1)
1953                         return 128;
1954                 /* fall through */
1955         case I915_FORMAT_MOD_Y_TILED:
1956                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1957                         return 128;
1958                 else
1959                         return 512;
1960         case I915_FORMAT_MOD_Yf_TILED_CCS:
1961                 if (color_plane == 1)
1962                         return 128;
1963                 /* fall through */
1964         case I915_FORMAT_MOD_Yf_TILED:
1965                 switch (cpp) {
1966                 case 1:
1967                         return 64;
1968                 case 2:
1969                 case 4:
1970                         return 128;
1971                 case 8:
1972                 case 16:
1973                         return 256;
1974                 default:
1975                         MISSING_CASE(cpp);
1976                         return cpp;
1977                 }
1978                 break;
1979         default:
1980                 MISSING_CASE(fb->modifier);
1981                 return cpp;
1982         }
1983 }
1984
1985 static unsigned int
1986 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1987 {
1988         return intel_tile_size(to_i915(fb->dev)) /
1989                 intel_tile_width_bytes(fb, color_plane);
1990 }
1991
1992 /* Return the tile dimensions in pixel units */
1993 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1994                             unsigned int *tile_width,
1995                             unsigned int *tile_height)
1996 {
1997         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1998         unsigned int cpp = fb->format->cpp[color_plane];
1999
2000         *tile_width = tile_width_bytes / cpp;
2001         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
2002 }
2003
2004 unsigned int
2005 intel_fb_align_height(const struct drm_framebuffer *fb,
2006                       int color_plane, unsigned int height)
2007 {
2008         unsigned int tile_height = intel_tile_height(fb, color_plane);
2009
2010         return ALIGN(height, tile_height);
2011 }
2012
2013 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2014 {
2015         unsigned int size = 0;
2016         int i;
2017
2018         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2019                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2020
2021         return size;
2022 }
2023
2024 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2025 {
2026         unsigned int size = 0;
2027         int i;
2028
2029         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2030                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2031
2032         return size;
2033 }
2034
2035 static void
2036 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2037                         const struct drm_framebuffer *fb,
2038                         unsigned int rotation)
2039 {
2040         view->type = I915_GGTT_VIEW_NORMAL;
2041         if (drm_rotation_90_or_270(rotation)) {
2042                 view->type = I915_GGTT_VIEW_ROTATED;
2043                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2044         }
2045 }
2046
2047 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2048 {
2049         if (IS_I830(dev_priv))
2050                 return 16 * 1024;
2051         else if (IS_I85X(dev_priv))
2052                 return 256;
2053         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2054                 return 32;
2055         else
2056                 return 4 * 1024;
2057 }
2058
2059 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2060 {
2061         if (INTEL_GEN(dev_priv) >= 9)
2062                 return 256 * 1024;
2063         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2064                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2065                 return 128 * 1024;
2066         else if (INTEL_GEN(dev_priv) >= 4)
2067                 return 4 * 1024;
2068         else
2069                 return 0;
2070 }
2071
2072 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2073                                          int color_plane)
2074 {
2075         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2076
2077         /* AUX_DIST needs only 4K alignment */
2078         if (color_plane == 1)
2079                 return 4096;
2080
2081         switch (fb->modifier) {
2082         case DRM_FORMAT_MOD_LINEAR:
2083                 return intel_linear_alignment(dev_priv);
2084         case I915_FORMAT_MOD_X_TILED:
2085                 if (INTEL_GEN(dev_priv) >= 9)
2086                         return 256 * 1024;
2087                 return 0;
2088         case I915_FORMAT_MOD_Y_TILED_CCS:
2089         case I915_FORMAT_MOD_Yf_TILED_CCS:
2090         case I915_FORMAT_MOD_Y_TILED:
2091         case I915_FORMAT_MOD_Yf_TILED:
2092                 return 1 * 1024 * 1024;
2093         default:
2094                 MISSING_CASE(fb->modifier);
2095                 return 0;
2096         }
2097 }
2098
2099 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2100 {
2101         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2102         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2103
2104         return INTEL_GEN(dev_priv) < 4 ||
2105                 (plane->has_fbc &&
2106                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2107 }
2108
2109 struct i915_vma *
2110 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2111                            const struct i915_ggtt_view *view,
2112                            bool uses_fence,
2113                            unsigned long *out_flags)
2114 {
2115         struct drm_device *dev = fb->dev;
2116         struct drm_i915_private *dev_priv = to_i915(dev);
2117         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2118         intel_wakeref_t wakeref;
2119         struct i915_vma *vma;
2120         unsigned int pinctl;
2121         u32 alignment;
2122
2123         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2124                 return ERR_PTR(-EINVAL);
2125
2126         alignment = intel_surf_alignment(fb, 0);
2127
2128         /* Note that the w/a also requires 64 PTE of padding following the
2129          * bo. We currently fill all unused PTE with the shadow page and so
2130          * we should always have valid PTE following the scanout preventing
2131          * the VT-d warning.
2132          */
2133         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2134                 alignment = 256 * 1024;
2135
2136         /*
2137          * Global gtt pte registers are special registers which actually forward
2138          * writes to a chunk of system memory. Which means that there is no risk
2139          * that the register values disappear as soon as we call
2140          * intel_runtime_pm_put(), so it is correct to wrap only the
2141          * pin/unpin/fence and not more.
2142          */
2143         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2144         i915_gem_object_lock(obj);
2145
2146         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2147
2148         pinctl = 0;
2149
2150         /* Valleyview is definitely limited to scanning out the first
2151          * 512MiB. Lets presume this behaviour was inherited from the
2152          * g4x display engine and that all earlier gen are similarly
2153          * limited. Testing suggests that it is a little more
2154          * complicated than this. For example, Cherryview appears quite
2155          * happy to scanout from anywhere within its global aperture.
2156          */
2157         if (HAS_GMCH(dev_priv))
2158                 pinctl |= PIN_MAPPABLE;
2159
2160         vma = i915_gem_object_pin_to_display_plane(obj,
2161                                                    alignment, view, pinctl);
2162         if (IS_ERR(vma))
2163                 goto err;
2164
2165         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2166                 int ret;
2167
2168                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2169                  * fence, whereas 965+ only requires a fence if using
2170                  * framebuffer compression.  For simplicity, we always, when
2171                  * possible, install a fence as the cost is not that onerous.
2172                  *
2173                  * If we fail to fence the tiled scanout, then either the
2174                  * modeset will reject the change (which is highly unlikely as
2175                  * the affected systems, all but one, do not have unmappable
2176                  * space) or we will not be able to enable full powersaving
2177                  * techniques (also likely not to apply due to various limits
2178                  * FBC and the like impose on the size of the buffer, which
2179                  * presumably we violated anyway with this unmappable buffer).
2180                  * Anyway, it is presumably better to stumble onwards with
2181                  * something and try to run the system in a "less than optimal"
2182                  * mode that matches the user configuration.
2183                  */
2184                 ret = i915_vma_pin_fence(vma);
2185                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2186                         i915_gem_object_unpin_from_display_plane(vma);
2187                         vma = ERR_PTR(ret);
2188                         goto err;
2189                 }
2190
2191                 if (ret == 0 && vma->fence)
2192                         *out_flags |= PLANE_HAS_FENCE;
2193         }
2194
2195         i915_vma_get(vma);
2196 err:
2197         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2198
2199         i915_gem_object_unlock(obj);
2200         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2201         return vma;
2202 }
2203
2204 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2205 {
2206         i915_gem_object_lock(vma->obj);
2207         if (flags & PLANE_HAS_FENCE)
2208                 i915_vma_unpin_fence(vma);
2209         i915_gem_object_unpin_from_display_plane(vma);
2210         i915_gem_object_unlock(vma->obj);
2211
2212         i915_vma_put(vma);
2213 }
2214
2215 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2216                           unsigned int rotation)
2217 {
2218         if (drm_rotation_90_or_270(rotation))
2219                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2220         else
2221                 return fb->pitches[color_plane];
2222 }
2223
2224 /*
2225  * Convert the x/y offsets into a linear offset.
2226  * Only valid with 0/180 degree rotation, which is fine since linear
2227  * offset is only used with linear buffers on pre-hsw and tiled buffers
2228  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2229  */
2230 u32 intel_fb_xy_to_linear(int x, int y,
2231                           const struct intel_plane_state *state,
2232                           int color_plane)
2233 {
2234         const struct drm_framebuffer *fb = state->hw.fb;
2235         unsigned int cpp = fb->format->cpp[color_plane];
2236         unsigned int pitch = state->color_plane[color_plane].stride;
2237
2238         return y * pitch + x * cpp;
2239 }
2240
2241 /*
2242  * Add the x/y offsets derived from fb->offsets[] to the user
2243  * specified plane src x/y offsets. The resulting x/y offsets
2244  * specify the start of scanout from the beginning of the gtt mapping.
2245  */
2246 void intel_add_fb_offsets(int *x, int *y,
2247                           const struct intel_plane_state *state,
2248                           int color_plane)
2249
2250 {
2251         *x += state->color_plane[color_plane].x;
2252         *y += state->color_plane[color_plane].y;
2253 }
2254
2255 static u32 intel_adjust_tile_offset(int *x, int *y,
2256                                     unsigned int tile_width,
2257                                     unsigned int tile_height,
2258                                     unsigned int tile_size,
2259                                     unsigned int pitch_tiles,
2260                                     u32 old_offset,
2261                                     u32 new_offset)
2262 {
2263         unsigned int pitch_pixels = pitch_tiles * tile_width;
2264         unsigned int tiles;
2265
2266         WARN_ON(old_offset & (tile_size - 1));
2267         WARN_ON(new_offset & (tile_size - 1));
2268         WARN_ON(new_offset > old_offset);
2269
2270         tiles = (old_offset - new_offset) / tile_size;
2271
2272         *y += tiles / pitch_tiles * tile_height;
2273         *x += tiles % pitch_tiles * tile_width;
2274
2275         /* minimize x in case it got needlessly big */
2276         *y += *x / pitch_pixels * tile_height;
2277         *x %= pitch_pixels;
2278
2279         return new_offset;
2280 }
2281
2282 static bool is_surface_linear(u64 modifier, int color_plane)
2283 {
2284         return modifier == DRM_FORMAT_MOD_LINEAR;
2285 }
2286
2287 static u32 intel_adjust_aligned_offset(int *x, int *y,
2288                                        const struct drm_framebuffer *fb,
2289                                        int color_plane,
2290                                        unsigned int rotation,
2291                                        unsigned int pitch,
2292                                        u32 old_offset, u32 new_offset)
2293 {
2294         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2295         unsigned int cpp = fb->format->cpp[color_plane];
2296
2297         WARN_ON(new_offset > old_offset);
2298
2299         if (!is_surface_linear(fb->modifier, color_plane)) {
2300                 unsigned int tile_size, tile_width, tile_height;
2301                 unsigned int pitch_tiles;
2302
2303                 tile_size = intel_tile_size(dev_priv);
2304                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2305
2306                 if (drm_rotation_90_or_270(rotation)) {
2307                         pitch_tiles = pitch / tile_height;
2308                         swap(tile_width, tile_height);
2309                 } else {
2310                         pitch_tiles = pitch / (tile_width * cpp);
2311                 }
2312
2313                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2314                                          tile_size, pitch_tiles,
2315                                          old_offset, new_offset);
2316         } else {
2317                 old_offset += *y * pitch + *x * cpp;
2318
2319                 *y = (old_offset - new_offset) / pitch;
2320                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2321         }
2322
2323         return new_offset;
2324 }
2325
2326 /*
2327  * Adjust the tile offset by moving the difference into
2328  * the x/y offsets.
2329  */
2330 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2331                                              const struct intel_plane_state *state,
2332                                              int color_plane,
2333                                              u32 old_offset, u32 new_offset)
2334 {
2335         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2336                                            state->hw.rotation,
2337                                            state->color_plane[color_plane].stride,
2338                                            old_offset, new_offset);
2339 }
2340
2341 /*
2342  * Computes the aligned offset to the base tile and adjusts
2343  * x, y. bytes per pixel is assumed to be a power-of-two.
2344  *
2345  * In the 90/270 rotated case, x and y are assumed
2346  * to be already rotated to match the rotated GTT view, and
2347  * pitch is the tile_height aligned framebuffer height.
2348  *
2349  * This function is used when computing the derived information
2350  * under intel_framebuffer, so using any of that information
2351  * here is not allowed. Anything under drm_framebuffer can be
2352  * used. This is why the user has to pass in the pitch since it
2353  * is specified in the rotated orientation.
2354  */
2355 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2356                                         int *x, int *y,
2357                                         const struct drm_framebuffer *fb,
2358                                         int color_plane,
2359                                         unsigned int pitch,
2360                                         unsigned int rotation,
2361                                         u32 alignment)
2362 {
2363         unsigned int cpp = fb->format->cpp[color_plane];
2364         u32 offset, offset_aligned;
2365
2366         if (alignment)
2367                 alignment--;
2368
2369         if (!is_surface_linear(fb->modifier, color_plane)) {
2370                 unsigned int tile_size, tile_width, tile_height;
2371                 unsigned int tile_rows, tiles, pitch_tiles;
2372
2373                 tile_size = intel_tile_size(dev_priv);
2374                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2375
2376                 if (drm_rotation_90_or_270(rotation)) {
2377                         pitch_tiles = pitch / tile_height;
2378                         swap(tile_width, tile_height);
2379                 } else {
2380                         pitch_tiles = pitch / (tile_width * cpp);
2381                 }
2382
2383                 tile_rows = *y / tile_height;
2384                 *y %= tile_height;
2385
2386                 tiles = *x / tile_width;
2387                 *x %= tile_width;
2388
2389                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2390                 offset_aligned = offset & ~alignment;
2391
2392                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2393                                          tile_size, pitch_tiles,
2394                                          offset, offset_aligned);
2395         } else {
2396                 offset = *y * pitch + *x * cpp;
2397                 offset_aligned = offset & ~alignment;
2398
2399                 *y = (offset & alignment) / pitch;
2400                 *x = ((offset & alignment) - *y * pitch) / cpp;
2401         }
2402
2403         return offset_aligned;
2404 }
2405
2406 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2407                                               const struct intel_plane_state *state,
2408                                               int color_plane)
2409 {
2410         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2411         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2412         const struct drm_framebuffer *fb = state->hw.fb;
2413         unsigned int rotation = state->hw.rotation;
2414         int pitch = state->color_plane[color_plane].stride;
2415         u32 alignment;
2416
2417         if (intel_plane->id == PLANE_CURSOR)
2418                 alignment = intel_cursor_alignment(dev_priv);
2419         else
2420                 alignment = intel_surf_alignment(fb, color_plane);
2421
2422         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2423                                             pitch, rotation, alignment);
2424 }
2425
2426 /* Convert the fb->offset[] into x/y offsets */
2427 static int intel_fb_offset_to_xy(int *x, int *y,
2428                                  const struct drm_framebuffer *fb,
2429                                  int color_plane)
2430 {
2431         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2432         unsigned int height;
2433
2434         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2435             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2436                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2437                               fb->offsets[color_plane], color_plane);
2438                 return -EINVAL;
2439         }
2440
2441         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2442         height = ALIGN(height, intel_tile_height(fb, color_plane));
2443
2444         /* Catch potential overflows early */
2445         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2446                             fb->offsets[color_plane])) {
2447                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2448                               fb->offsets[color_plane], fb->pitches[color_plane],
2449                               color_plane);
2450                 return -ERANGE;
2451         }
2452
2453         *x = 0;
2454         *y = 0;
2455
2456         intel_adjust_aligned_offset(x, y,
2457                                     fb, color_plane, DRM_MODE_ROTATE_0,
2458                                     fb->pitches[color_plane],
2459                                     fb->offsets[color_plane], 0);
2460
2461         return 0;
2462 }
2463
2464 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2465 {
2466         switch (fb_modifier) {
2467         case I915_FORMAT_MOD_X_TILED:
2468                 return I915_TILING_X;
2469         case I915_FORMAT_MOD_Y_TILED:
2470         case I915_FORMAT_MOD_Y_TILED_CCS:
2471                 return I915_TILING_Y;
2472         default:
2473                 return I915_TILING_NONE;
2474         }
2475 }
2476
2477 /*
2478  * From the Sky Lake PRM:
2479  * "The Color Control Surface (CCS) contains the compression status of
2480  *  the cache-line pairs. The compression state of the cache-line pair
2481  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2482  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2483  *  cache-line-pairs. CCS is always Y tiled."
2484  *
2485  * Since cache line pairs refers to horizontally adjacent cache lines,
2486  * each cache line in the CCS corresponds to an area of 32x16 cache
2487  * lines on the main surface. Since each pixel is 4 bytes, this gives
2488  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2489  * main surface.
2490  */
2491 static const struct drm_format_info ccs_formats[] = {
2492         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2493           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2494         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2495           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2496         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2497           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2498         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2499           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2500 };
2501
2502 static const struct drm_format_info *
2503 lookup_format_info(const struct drm_format_info formats[],
2504                    int num_formats, u32 format)
2505 {
2506         int i;
2507
2508         for (i = 0; i < num_formats; i++) {
2509                 if (formats[i].format == format)
2510                         return &formats[i];
2511         }
2512
2513         return NULL;
2514 }
2515
2516 static const struct drm_format_info *
2517 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2518 {
2519         switch (cmd->modifier[0]) {
2520         case I915_FORMAT_MOD_Y_TILED_CCS:
2521         case I915_FORMAT_MOD_Yf_TILED_CCS:
2522                 return lookup_format_info(ccs_formats,
2523                                           ARRAY_SIZE(ccs_formats),
2524                                           cmd->pixel_format);
2525         default:
2526                 return NULL;
2527         }
2528 }
2529
2530 bool is_ccs_modifier(u64 modifier)
2531 {
2532         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2533                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2534 }
2535
2536 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2537                               u32 pixel_format, u64 modifier)
2538 {
2539         struct intel_crtc *crtc;
2540         struct intel_plane *plane;
2541
2542         /*
2543          * We assume the primary plane for pipe A has
2544          * the highest stride limits of them all.
2545          */
2546         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2547         if (!crtc)
2548                 return 0;
2549
2550         plane = to_intel_plane(crtc->base.primary);
2551
2552         return plane->max_stride(plane, pixel_format, modifier,
2553                                  DRM_MODE_ROTATE_0);
2554 }
2555
2556 static
2557 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2558                         u32 pixel_format, u64 modifier)
2559 {
2560         /*
2561          * Arbitrary limit for gen4+ chosen to match the
2562          * render engine max stride.
2563          *
2564          * The new CCS hash mode makes remapping impossible
2565          */
2566         if (!is_ccs_modifier(modifier)) {
2567                 if (INTEL_GEN(dev_priv) >= 7)
2568                         return 256*1024;
2569                 else if (INTEL_GEN(dev_priv) >= 4)
2570                         return 128*1024;
2571         }
2572
2573         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2574 }
2575
2576 static u32
2577 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2578 {
2579         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2580
2581         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2582                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2583                                                            fb->format->format,
2584                                                            fb->modifier);
2585
2586                 /*
2587                  * To make remapping with linear generally feasible
2588                  * we need the stride to be page aligned.
2589                  */
2590                 if (fb->pitches[color_plane] > max_stride)
2591                         return intel_tile_size(dev_priv);
2592                 else
2593                         return 64;
2594         } else {
2595                 return intel_tile_width_bytes(fb, color_plane);
2596         }
2597 }
2598
2599 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2600 {
2601         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2602         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2603         const struct drm_framebuffer *fb = plane_state->hw.fb;
2604         int i;
2605
2606         /* We don't want to deal with remapping with cursors */
2607         if (plane->id == PLANE_CURSOR)
2608                 return false;
2609
2610         /*
2611          * The display engine limits already match/exceed the
2612          * render engine limits, so not much point in remapping.
2613          * Would also need to deal with the fence POT alignment
2614          * and gen2 2KiB GTT tile size.
2615          */
2616         if (INTEL_GEN(dev_priv) < 4)
2617                 return false;
2618
2619         /*
2620          * The new CCS hash mode isn't compatible with remapping as
2621          * the virtual address of the pages affects the compressed data.
2622          */
2623         if (is_ccs_modifier(fb->modifier))
2624                 return false;
2625
2626         /* Linear needs a page aligned stride for remapping */
2627         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2628                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2629
2630                 for (i = 0; i < fb->format->num_planes; i++) {
2631                         if (fb->pitches[i] & alignment)
2632                                 return false;
2633                 }
2634         }
2635
2636         return true;
2637 }
2638
2639 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2640 {
2641         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2642         const struct drm_framebuffer *fb = plane_state->hw.fb;
2643         unsigned int rotation = plane_state->hw.rotation;
2644         u32 stride, max_stride;
2645
2646         /*
2647          * No remapping for invisible planes since we don't have
2648          * an actual source viewport to remap.
2649          */
2650         if (!plane_state->uapi.visible)
2651                 return false;
2652
2653         if (!intel_plane_can_remap(plane_state))
2654                 return false;
2655
2656         /*
2657          * FIXME: aux plane limits on gen9+ are
2658          * unclear in Bspec, for now no checking.
2659          */
2660         stride = intel_fb_pitch(fb, 0, rotation);
2661         max_stride = plane->max_stride(plane, fb->format->format,
2662                                        fb->modifier, rotation);
2663
2664         return stride > max_stride;
2665 }
2666
2667 static int
2668 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2669                    struct drm_framebuffer *fb)
2670 {
2671         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2672         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2673         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2674         u32 gtt_offset_rotated = 0;
2675         unsigned int max_size = 0;
2676         int i, num_planes = fb->format->num_planes;
2677         unsigned int tile_size = intel_tile_size(dev_priv);
2678
2679         for (i = 0; i < num_planes; i++) {
2680                 unsigned int width, height;
2681                 unsigned int cpp, size;
2682                 u32 offset;
2683                 int x, y;
2684                 int ret;
2685
2686                 cpp = fb->format->cpp[i];
2687                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2688                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2689
2690                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2691                 if (ret) {
2692                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2693                                       i, fb->offsets[i]);
2694                         return ret;
2695                 }
2696
2697                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2698                         int hsub = fb->format->hsub;
2699                         int vsub = fb->format->vsub;
2700                         int tile_width, tile_height;
2701                         int main_x, main_y;
2702                         int ccs_x, ccs_y;
2703
2704                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2705                         tile_width *= hsub;
2706                         tile_height *= vsub;
2707
2708                         ccs_x = (x * hsub) % tile_width;
2709                         ccs_y = (y * vsub) % tile_height;
2710                         main_x = intel_fb->normal[0].x % tile_width;
2711                         main_y = intel_fb->normal[0].y % tile_height;
2712
2713                         /*
2714                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2715                          * x/y offsets must match between CCS and the main surface.
2716                          */
2717                         if (main_x != ccs_x || main_y != ccs_y) {
2718                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2719                                               main_x, main_y,
2720                                               ccs_x, ccs_y,
2721                                               intel_fb->normal[0].x,
2722                                               intel_fb->normal[0].y,
2723                                               x, y);
2724                                 return -EINVAL;
2725                         }
2726                 }
2727
2728                 /*
2729                  * The fence (if used) is aligned to the start of the object
2730                  * so having the framebuffer wrap around across the edge of the
2731                  * fenced region doesn't really work. We have no API to configure
2732                  * the fence start offset within the object (nor could we probably
2733                  * on gen2/3). So it's just easier if we just require that the
2734                  * fb layout agrees with the fence layout. We already check that the
2735                  * fb stride matches the fence stride elsewhere.
2736                  */
2737                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2738                     (x + width) * cpp > fb->pitches[i]) {
2739                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2740                                       i, fb->offsets[i]);
2741                         return -EINVAL;
2742                 }
2743
2744                 /*
2745                  * First pixel of the framebuffer from
2746                  * the start of the normal gtt mapping.
2747                  */
2748                 intel_fb->normal[i].x = x;
2749                 intel_fb->normal[i].y = y;
2750
2751                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2752                                                       fb->pitches[i],
2753                                                       DRM_MODE_ROTATE_0,
2754                                                       tile_size);
2755                 offset /= tile_size;
2756
2757                 if (!is_surface_linear(fb->modifier, i)) {
2758                         unsigned int tile_width, tile_height;
2759                         unsigned int pitch_tiles;
2760                         struct drm_rect r;
2761
2762                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2763
2764                         rot_info->plane[i].offset = offset;
2765                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2766                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2767                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2768
2769                         intel_fb->rotated[i].pitch =
2770                                 rot_info->plane[i].height * tile_height;
2771
2772                         /* how many tiles does this plane need */
2773                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2774                         /*
2775                          * If the plane isn't horizontally tile aligned,
2776                          * we need one more tile.
2777                          */
2778                         if (x != 0)
2779                                 size++;
2780
2781                         /* rotate the x/y offsets to match the GTT view */
2782                         drm_rect_init(&r, x, y, width, height);
2783                         drm_rect_rotate(&r,
2784                                         rot_info->plane[i].width * tile_width,
2785                                         rot_info->plane[i].height * tile_height,
2786                                         DRM_MODE_ROTATE_270);
2787                         x = r.x1;
2788                         y = r.y1;
2789
2790                         /* rotate the tile dimensions to match the GTT view */
2791                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2792                         swap(tile_width, tile_height);
2793
2794                         /*
2795                          * We only keep the x/y offsets, so push all of the
2796                          * gtt offset into the x/y offsets.
2797                          */
2798                         intel_adjust_tile_offset(&x, &y,
2799                                                  tile_width, tile_height,
2800                                                  tile_size, pitch_tiles,
2801                                                  gtt_offset_rotated * tile_size, 0);
2802
2803                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2804
2805                         /*
2806                          * First pixel of the framebuffer from
2807                          * the start of the rotated gtt mapping.
2808                          */
2809                         intel_fb->rotated[i].x = x;
2810                         intel_fb->rotated[i].y = y;
2811                 } else {
2812                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2813                                             x * cpp, tile_size);
2814                 }
2815
2816                 /* how many tiles in total needed in the bo */
2817                 max_size = max(max_size, offset + size);
2818         }
2819
2820         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2821                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2822                               mul_u32_u32(max_size, tile_size), obj->base.size);
2823                 return -EINVAL;
2824         }
2825
2826         return 0;
2827 }
2828
2829 static void
2830 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2831 {
2832         struct drm_i915_private *dev_priv =
2833                 to_i915(plane_state->uapi.plane->dev);
2834         struct drm_framebuffer *fb = plane_state->hw.fb;
2835         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2836         struct intel_rotation_info *info = &plane_state->view.rotated;
2837         unsigned int rotation = plane_state->hw.rotation;
2838         int i, num_planes = fb->format->num_planes;
2839         unsigned int tile_size = intel_tile_size(dev_priv);
2840         unsigned int src_x, src_y;
2841         unsigned int src_w, src_h;
2842         u32 gtt_offset = 0;
2843
2844         memset(&plane_state->view, 0, sizeof(plane_state->view));
2845         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2846                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2847
2848         src_x = plane_state->uapi.src.x1 >> 16;
2849         src_y = plane_state->uapi.src.y1 >> 16;
2850         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
2851         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
2852
2853         WARN_ON(is_ccs_modifier(fb->modifier));
2854
2855         /* Make src coordinates relative to the viewport */
2856         drm_rect_translate(&plane_state->uapi.src,
2857                            -(src_x << 16), -(src_y << 16));
2858
2859         /* Rotate src coordinates to match rotated GTT view */
2860         if (drm_rotation_90_or_270(rotation))
2861                 drm_rect_rotate(&plane_state->uapi.src,
2862                                 src_w << 16, src_h << 16,
2863                                 DRM_MODE_ROTATE_270);
2864
2865         for (i = 0; i < num_planes; i++) {
2866                 unsigned int hsub = i ? fb->format->hsub : 1;
2867                 unsigned int vsub = i ? fb->format->vsub : 1;
2868                 unsigned int cpp = fb->format->cpp[i];
2869                 unsigned int tile_width, tile_height;
2870                 unsigned int width, height;
2871                 unsigned int pitch_tiles;
2872                 unsigned int x, y;
2873                 u32 offset;
2874
2875                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2876
2877                 x = src_x / hsub;
2878                 y = src_y / vsub;
2879                 width = src_w / hsub;
2880                 height = src_h / vsub;
2881
2882                 /*
2883                  * First pixel of the src viewport from the
2884                  * start of the normal gtt mapping.
2885                  */
2886                 x += intel_fb->normal[i].x;
2887                 y += intel_fb->normal[i].y;
2888
2889                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2890                                                       fb, i, fb->pitches[i],
2891                                                       DRM_MODE_ROTATE_0, tile_size);
2892                 offset /= tile_size;
2893
2894                 info->plane[i].offset = offset;
2895                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2896                                                      tile_width * cpp);
2897                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2898                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2899
2900                 if (drm_rotation_90_or_270(rotation)) {
2901                         struct drm_rect r;
2902
2903                         /* rotate the x/y offsets to match the GTT view */
2904                         drm_rect_init(&r, x, y, width, height);
2905                         drm_rect_rotate(&r,
2906                                         info->plane[i].width * tile_width,
2907                                         info->plane[i].height * tile_height,
2908                                         DRM_MODE_ROTATE_270);
2909                         x = r.x1;
2910                         y = r.y1;
2911
2912                         pitch_tiles = info->plane[i].height;
2913                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2914
2915                         /* rotate the tile dimensions to match the GTT view */
2916                         swap(tile_width, tile_height);
2917                 } else {
2918                         pitch_tiles = info->plane[i].width;
2919                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2920                 }
2921
2922                 /*
2923                  * We only keep the x/y offsets, so push all of the
2924                  * gtt offset into the x/y offsets.
2925                  */
2926                 intel_adjust_tile_offset(&x, &y,
2927                                          tile_width, tile_height,
2928                                          tile_size, pitch_tiles,
2929                                          gtt_offset * tile_size, 0);
2930
2931                 gtt_offset += info->plane[i].width * info->plane[i].height;
2932
2933                 plane_state->color_plane[i].offset = 0;
2934                 plane_state->color_plane[i].x = x;
2935                 plane_state->color_plane[i].y = y;
2936         }
2937 }
2938
2939 static int
2940 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2941 {
2942         const struct intel_framebuffer *fb =
2943                 to_intel_framebuffer(plane_state->hw.fb);
2944         unsigned int rotation = plane_state->hw.rotation;
2945         int i, num_planes;
2946
2947         if (!fb)
2948                 return 0;
2949
2950         num_planes = fb->base.format->num_planes;
2951
2952         if (intel_plane_needs_remap(plane_state)) {
2953                 intel_plane_remap_gtt(plane_state);
2954
2955                 /*
2956                  * Sometimes even remapping can't overcome
2957                  * the stride limitations :( Can happen with
2958                  * big plane sizes and suitably misaligned
2959                  * offsets.
2960                  */
2961                 return intel_plane_check_stride(plane_state);
2962         }
2963
2964         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2965
2966         for (i = 0; i < num_planes; i++) {
2967                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2968                 plane_state->color_plane[i].offset = 0;
2969
2970                 if (drm_rotation_90_or_270(rotation)) {
2971                         plane_state->color_plane[i].x = fb->rotated[i].x;
2972                         plane_state->color_plane[i].y = fb->rotated[i].y;
2973                 } else {
2974                         plane_state->color_plane[i].x = fb->normal[i].x;
2975                         plane_state->color_plane[i].y = fb->normal[i].y;
2976                 }
2977         }
2978
2979         /* Rotate src coordinates to match rotated GTT view */
2980         if (drm_rotation_90_or_270(rotation))
2981                 drm_rect_rotate(&plane_state->uapi.src,
2982                                 fb->base.width << 16, fb->base.height << 16,
2983                                 DRM_MODE_ROTATE_270);
2984
2985         return intel_plane_check_stride(plane_state);
2986 }
2987
2988 static int i9xx_format_to_fourcc(int format)
2989 {
2990         switch (format) {
2991         case DISPPLANE_8BPP:
2992                 return DRM_FORMAT_C8;
2993         case DISPPLANE_BGRA555:
2994                 return DRM_FORMAT_ARGB1555;
2995         case DISPPLANE_BGRX555:
2996                 return DRM_FORMAT_XRGB1555;
2997         case DISPPLANE_BGRX565:
2998                 return DRM_FORMAT_RGB565;
2999         default:
3000         case DISPPLANE_BGRX888:
3001                 return DRM_FORMAT_XRGB8888;
3002         case DISPPLANE_RGBX888:
3003                 return DRM_FORMAT_XBGR8888;
3004         case DISPPLANE_BGRA888:
3005                 return DRM_FORMAT_ARGB8888;
3006         case DISPPLANE_RGBA888:
3007                 return DRM_FORMAT_ABGR8888;
3008         case DISPPLANE_BGRX101010:
3009                 return DRM_FORMAT_XRGB2101010;
3010         case DISPPLANE_RGBX101010:
3011                 return DRM_FORMAT_XBGR2101010;
3012         case DISPPLANE_BGRA101010:
3013                 return DRM_FORMAT_ARGB2101010;
3014         case DISPPLANE_RGBA101010:
3015                 return DRM_FORMAT_ABGR2101010;
3016         case DISPPLANE_RGBX161616:
3017                 return DRM_FORMAT_XBGR16161616F;
3018         }
3019 }
3020
3021 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3022 {
3023         switch (format) {
3024         case PLANE_CTL_FORMAT_RGB_565:
3025                 return DRM_FORMAT_RGB565;
3026         case PLANE_CTL_FORMAT_NV12:
3027                 return DRM_FORMAT_NV12;
3028         case PLANE_CTL_FORMAT_P010:
3029                 return DRM_FORMAT_P010;
3030         case PLANE_CTL_FORMAT_P012:
3031                 return DRM_FORMAT_P012;
3032         case PLANE_CTL_FORMAT_P016:
3033                 return DRM_FORMAT_P016;
3034         case PLANE_CTL_FORMAT_Y210:
3035                 return DRM_FORMAT_Y210;
3036         case PLANE_CTL_FORMAT_Y212:
3037                 return DRM_FORMAT_Y212;
3038         case PLANE_CTL_FORMAT_Y216:
3039                 return DRM_FORMAT_Y216;
3040         case PLANE_CTL_FORMAT_Y410:
3041                 return DRM_FORMAT_XVYU2101010;
3042         case PLANE_CTL_FORMAT_Y412:
3043                 return DRM_FORMAT_XVYU12_16161616;
3044         case PLANE_CTL_FORMAT_Y416:
3045                 return DRM_FORMAT_XVYU16161616;
3046         default:
3047         case PLANE_CTL_FORMAT_XRGB_8888:
3048                 if (rgb_order) {
3049                         if (alpha)
3050                                 return DRM_FORMAT_ABGR8888;
3051                         else
3052                                 return DRM_FORMAT_XBGR8888;
3053                 } else {
3054                         if (alpha)
3055                                 return DRM_FORMAT_ARGB8888;
3056                         else
3057                                 return DRM_FORMAT_XRGB8888;
3058                 }
3059         case PLANE_CTL_FORMAT_XRGB_2101010:
3060                 if (rgb_order) {
3061                         if (alpha)
3062                                 return DRM_FORMAT_ABGR2101010;
3063                         else
3064                                 return DRM_FORMAT_XBGR2101010;
3065                 } else {
3066                         if (alpha)
3067                                 return DRM_FORMAT_ARGB2101010;
3068                         else
3069                                 return DRM_FORMAT_XRGB2101010;
3070                 }
3071         case PLANE_CTL_FORMAT_XRGB_16161616F:
3072                 if (rgb_order) {
3073                         if (alpha)
3074                                 return DRM_FORMAT_ABGR16161616F;
3075                         else
3076                                 return DRM_FORMAT_XBGR16161616F;
3077                 } else {
3078                         if (alpha)
3079                                 return DRM_FORMAT_ARGB16161616F;
3080                         else
3081                                 return DRM_FORMAT_XRGB16161616F;
3082                 }
3083         }
3084 }
3085
3086 static bool
3087 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3088                               struct intel_initial_plane_config *plane_config)
3089 {
3090         struct drm_device *dev = crtc->base.dev;
3091         struct drm_i915_private *dev_priv = to_i915(dev);
3092         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3093         struct drm_framebuffer *fb = &plane_config->fb->base;
3094         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3095         u32 size_aligned = round_up(plane_config->base + plane_config->size,
3096                                     PAGE_SIZE);
3097         struct drm_i915_gem_object *obj;
3098         bool ret = false;
3099
3100         size_aligned -= base_aligned;
3101
3102         if (plane_config->size == 0)
3103                 return false;
3104
3105         /* If the FB is too big, just don't use it since fbdev is not very
3106          * important and we should probably use that space with FBC or other
3107          * features. */
3108         if (size_aligned * 2 > dev_priv->stolen_usable_size)
3109                 return false;
3110
3111         switch (fb->modifier) {
3112         case DRM_FORMAT_MOD_LINEAR:
3113         case I915_FORMAT_MOD_X_TILED:
3114         case I915_FORMAT_MOD_Y_TILED:
3115                 break;
3116         default:
3117                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3118                                  fb->modifier);
3119                 return false;
3120         }
3121
3122         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3123                                                              base_aligned,
3124                                                              base_aligned,
3125                                                              size_aligned);
3126         if (IS_ERR(obj))
3127                 return false;
3128
3129         switch (plane_config->tiling) {
3130         case I915_TILING_NONE:
3131                 break;
3132         case I915_TILING_X:
3133         case I915_TILING_Y:
3134                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3135                 break;
3136         default:
3137                 MISSING_CASE(plane_config->tiling);
3138                 goto out;
3139         }
3140
3141         mode_cmd.pixel_format = fb->format->format;
3142         mode_cmd.width = fb->width;
3143         mode_cmd.height = fb->height;
3144         mode_cmd.pitches[0] = fb->pitches[0];
3145         mode_cmd.modifier[0] = fb->modifier;
3146         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3147
3148         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3149                 DRM_DEBUG_KMS("intel fb init failed\n");
3150                 goto out;
3151         }
3152
3153
3154         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3155         ret = true;
3156 out:
3157         i915_gem_object_put(obj);
3158         return ret;
3159 }
3160
3161 static void
3162 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3163                         struct intel_plane_state *plane_state,
3164                         bool visible)
3165 {
3166         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3167
3168         plane_state->uapi.visible = visible;
3169
3170         if (visible)
3171                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3172         else
3173                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3174 }
3175
3176 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3177 {
3178         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3179         struct drm_plane *plane;
3180
3181         /*
3182          * Active_planes aliases if multiple "primary" or cursor planes
3183          * have been used on the same (or wrong) pipe. plane_mask uses
3184          * unique ids, hence we can use that to reconstruct active_planes.
3185          */
3186         crtc_state->active_planes = 0;
3187
3188         drm_for_each_plane_mask(plane, &dev_priv->drm,
3189                                 crtc_state->uapi.plane_mask)
3190                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3191 }
3192
3193 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3194                                          struct intel_plane *plane)
3195 {
3196         struct intel_crtc_state *crtc_state =
3197                 to_intel_crtc_state(crtc->base.state);
3198         struct intel_plane_state *plane_state =
3199                 to_intel_plane_state(plane->base.state);
3200
3201         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3202                       plane->base.base.id, plane->base.name,
3203                       crtc->base.base.id, crtc->base.name);
3204
3205         intel_set_plane_visible(crtc_state, plane_state, false);
3206         fixup_active_planes(crtc_state);
3207         crtc_state->data_rate[plane->id] = 0;
3208         crtc_state->min_cdclk[plane->id] = 0;
3209
3210         if (plane->id == PLANE_PRIMARY)
3211                 intel_pre_disable_primary_noatomic(&crtc->base);
3212
3213         intel_disable_plane(plane, crtc_state);
3214 }
3215
3216 static struct intel_frontbuffer *
3217 to_intel_frontbuffer(struct drm_framebuffer *fb)
3218 {
3219         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3220 }
3221
3222 static void
3223 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3224                              struct intel_initial_plane_config *plane_config)
3225 {
3226         struct drm_device *dev = intel_crtc->base.dev;
3227         struct drm_i915_private *dev_priv = to_i915(dev);
3228         struct drm_crtc *c;
3229         struct drm_plane *primary = intel_crtc->base.primary;
3230         struct drm_plane_state *plane_state = primary->state;
3231         struct intel_plane *intel_plane = to_intel_plane(primary);
3232         struct intel_plane_state *intel_state =
3233                 to_intel_plane_state(plane_state);
3234         struct drm_framebuffer *fb;
3235
3236         if (!plane_config->fb)
3237                 return;
3238
3239         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3240                 fb = &plane_config->fb->base;
3241                 goto valid_fb;
3242         }
3243
3244         kfree(plane_config->fb);
3245
3246         /*
3247          * Failed to alloc the obj, check to see if we should share
3248          * an fb with another CRTC instead
3249          */
3250         for_each_crtc(dev, c) {
3251                 struct intel_plane_state *state;
3252
3253                 if (c == &intel_crtc->base)
3254                         continue;
3255
3256                 if (!to_intel_crtc(c)->active)
3257                         continue;
3258
3259                 state = to_intel_plane_state(c->primary->state);
3260                 if (!state->vma)
3261                         continue;
3262
3263                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3264                         fb = state->hw.fb;
3265                         drm_framebuffer_get(fb);
3266                         goto valid_fb;
3267                 }
3268         }
3269
3270         /*
3271          * We've failed to reconstruct the BIOS FB.  Current display state
3272          * indicates that the primary plane is visible, but has a NULL FB,
3273          * which will lead to problems later if we don't fix it up.  The
3274          * simplest solution is to just disable the primary plane now and
3275          * pretend the BIOS never had it enabled.
3276          */
3277         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3278
3279         return;
3280
3281 valid_fb:
3282         intel_state->hw.rotation = plane_config->rotation;
3283         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3284                                 intel_state->hw.rotation);
3285         intel_state->color_plane[0].stride =
3286                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3287
3288         intel_state->vma =
3289                 intel_pin_and_fence_fb_obj(fb,
3290                                            &intel_state->view,
3291                                            intel_plane_uses_fence(intel_state),
3292                                            &intel_state->flags);
3293         if (IS_ERR(intel_state->vma)) {
3294                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3295                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
3296
3297                 intel_state->vma = NULL;
3298                 drm_framebuffer_put(fb);
3299                 return;
3300         }
3301
3302         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3303
3304         plane_state->src_x = 0;
3305         plane_state->src_y = 0;
3306         plane_state->src_w = fb->width << 16;
3307         plane_state->src_h = fb->height << 16;
3308
3309         plane_state->crtc_x = 0;
3310         plane_state->crtc_y = 0;
3311         plane_state->crtc_w = fb->width;
3312         plane_state->crtc_h = fb->height;
3313
3314         intel_state->uapi.src = drm_plane_state_src(plane_state);
3315         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3316
3317         if (plane_config->tiling)
3318                 dev_priv->preserve_bios_swizzle = true;
3319
3320         plane_state->fb = fb;
3321         plane_state->crtc = &intel_crtc->base;
3322         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3323
3324         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3325                   &to_intel_frontbuffer(fb)->bits);
3326 }
3327
3328 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3329                                int color_plane,
3330                                unsigned int rotation)
3331 {
3332         int cpp = fb->format->cpp[color_plane];
3333
3334         switch (fb->modifier) {
3335         case DRM_FORMAT_MOD_LINEAR:
3336         case I915_FORMAT_MOD_X_TILED:
3337                 /*
3338                  * Validated limit is 4k, but has 5k should
3339                  * work apart from the following features:
3340                  * - Ytile (already limited to 4k)
3341                  * - FP16 (already limited to 4k)
3342                  * - render compression (already limited to 4k)
3343                  * - KVMR sprite and cursor (don't care)
3344                  * - horizontal panning (TODO verify this)
3345                  * - pipe and plane scaling (TODO verify this)
3346                  */
3347                 if (cpp == 8)
3348                         return 4096;
3349                 else
3350                         return 5120;
3351         case I915_FORMAT_MOD_Y_TILED_CCS:
3352         case I915_FORMAT_MOD_Yf_TILED_CCS:
3353                 /* FIXME AUX plane? */
3354         case I915_FORMAT_MOD_Y_TILED:
3355         case I915_FORMAT_MOD_Yf_TILED:
3356                 if (cpp == 8)
3357                         return 2048;
3358                 else
3359                         return 4096;
3360         default:
3361                 MISSING_CASE(fb->modifier);
3362                 return 2048;
3363         }
3364 }
3365
3366 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3367                                int color_plane,
3368                                unsigned int rotation)
3369 {
3370         int cpp = fb->format->cpp[color_plane];
3371
3372         switch (fb->modifier) {
3373         case DRM_FORMAT_MOD_LINEAR:
3374         case I915_FORMAT_MOD_X_TILED:
3375                 if (cpp == 8)
3376                         return 4096;
3377                 else
3378                         return 5120;
3379         case I915_FORMAT_MOD_Y_TILED_CCS:
3380         case I915_FORMAT_MOD_Yf_TILED_CCS:
3381                 /* FIXME AUX plane? */
3382         case I915_FORMAT_MOD_Y_TILED:
3383         case I915_FORMAT_MOD_Yf_TILED:
3384                 if (cpp == 8)
3385                         return 2048;
3386                 else
3387                         return 5120;
3388         default:
3389                 MISSING_CASE(fb->modifier);
3390                 return 2048;
3391         }
3392 }
3393
3394 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3395                                int color_plane,
3396                                unsigned int rotation)
3397 {
3398         return 5120;
3399 }
3400
3401 static int skl_max_plane_height(void)
3402 {
3403         return 4096;
3404 }
3405
3406 static int icl_max_plane_height(void)
3407 {
3408         return 4320;
3409 }
3410
3411 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3412                                            int main_x, int main_y, u32 main_offset)
3413 {
3414         const struct drm_framebuffer *fb = plane_state->hw.fb;
3415         int hsub = fb->format->hsub;
3416         int vsub = fb->format->vsub;
3417         int aux_x = plane_state->color_plane[1].x;
3418         int aux_y = plane_state->color_plane[1].y;
3419         u32 aux_offset = plane_state->color_plane[1].offset;
3420         u32 alignment = intel_surf_alignment(fb, 1);
3421
3422         while (aux_offset >= main_offset && aux_y <= main_y) {
3423                 int x, y;
3424
3425                 if (aux_x == main_x && aux_y == main_y)
3426                         break;
3427
3428                 if (aux_offset == 0)
3429                         break;
3430
3431                 x = aux_x / hsub;
3432                 y = aux_y / vsub;
3433                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3434                                                                aux_offset, aux_offset - alignment);
3435                 aux_x = x * hsub + aux_x % hsub;
3436                 aux_y = y * vsub + aux_y % vsub;
3437         }
3438
3439         if (aux_x != main_x || aux_y != main_y)
3440                 return false;
3441
3442         plane_state->color_plane[1].offset = aux_offset;
3443         plane_state->color_plane[1].x = aux_x;
3444         plane_state->color_plane[1].y = aux_y;
3445
3446         return true;
3447 }
3448
3449 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3450 {
3451         struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3452         const struct drm_framebuffer *fb = plane_state->hw.fb;
3453         unsigned int rotation = plane_state->hw.rotation;
3454         int x = plane_state->uapi.src.x1 >> 16;
3455         int y = plane_state->uapi.src.y1 >> 16;
3456         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3457         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3458         int max_width;
3459         int max_height;
3460         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3461
3462         if (INTEL_GEN(dev_priv) >= 11)
3463                 max_width = icl_max_plane_width(fb, 0, rotation);
3464         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3465                 max_width = glk_max_plane_width(fb, 0, rotation);
3466         else
3467                 max_width = skl_max_plane_width(fb, 0, rotation);
3468
3469         if (INTEL_GEN(dev_priv) >= 11)
3470                 max_height = icl_max_plane_height();
3471         else
3472                 max_height = skl_max_plane_height();
3473
3474         if (w > max_width || h > max_height) {
3475                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3476                               w, h, max_width, max_height);
3477                 return -EINVAL;
3478         }
3479
3480         intel_add_fb_offsets(&x, &y, plane_state, 0);
3481         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3482         alignment = intel_surf_alignment(fb, 0);
3483
3484         /*
3485          * AUX surface offset is specified as the distance from the
3486          * main surface offset, and it must be non-negative. Make
3487          * sure that is what we will get.
3488          */
3489         if (offset > aux_offset)
3490                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3491                                                            offset, aux_offset & ~(alignment - 1));
3492
3493         /*
3494          * When using an X-tiled surface, the plane blows up
3495          * if the x offset + width exceed the stride.
3496          *
3497          * TODO: linear and Y-tiled seem fine, Yf untested,
3498          */
3499         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3500                 int cpp = fb->format->cpp[0];
3501
3502                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3503                         if (offset == 0) {
3504                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3505                                 return -EINVAL;
3506                         }
3507
3508                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3509                                                                    offset, offset - alignment);
3510                 }
3511         }
3512
3513         /*
3514          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3515          * they match with the main surface x/y offsets.
3516          */
3517         if (is_ccs_modifier(fb->modifier)) {
3518                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3519                         if (offset == 0)
3520                                 break;
3521
3522                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3523                                                                    offset, offset - alignment);
3524                 }
3525
3526                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3527                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3528                         return -EINVAL;
3529                 }
3530         }
3531
3532         plane_state->color_plane[0].offset = offset;
3533         plane_state->color_plane[0].x = x;
3534         plane_state->color_plane[0].y = y;
3535
3536         /*
3537          * Put the final coordinates back so that the src
3538          * coordinate checks will see the right values.
3539          */
3540         drm_rect_translate_to(&plane_state->uapi.src,
3541                               x << 16, y << 16);
3542
3543         return 0;
3544 }
3545
3546 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3547 {
3548         const struct drm_framebuffer *fb = plane_state->hw.fb;
3549         unsigned int rotation = plane_state->hw.rotation;
3550         int max_width = skl_max_plane_width(fb, 1, rotation);
3551         int max_height = 4096;
3552         int x = plane_state->uapi.src.x1 >> 17;
3553         int y = plane_state->uapi.src.y1 >> 17;
3554         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3555         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3556         u32 offset;
3557
3558         intel_add_fb_offsets(&x, &y, plane_state, 1);
3559         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3560
3561         /* FIXME not quite sure how/if these apply to the chroma plane */
3562         if (w > max_width || h > max_height) {
3563                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3564                               w, h, max_width, max_height);
3565                 return -EINVAL;
3566         }
3567
3568         plane_state->color_plane[1].offset = offset;
3569         plane_state->color_plane[1].x = x;
3570         plane_state->color_plane[1].y = y;
3571
3572         return 0;
3573 }
3574
3575 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3576 {
3577         const struct drm_framebuffer *fb = plane_state->hw.fb;
3578         int src_x = plane_state->uapi.src.x1 >> 16;
3579         int src_y = plane_state->uapi.src.y1 >> 16;
3580         int hsub = fb->format->hsub;
3581         int vsub = fb->format->vsub;
3582         int x = src_x / hsub;
3583         int y = src_y / vsub;
3584         u32 offset;
3585
3586         intel_add_fb_offsets(&x, &y, plane_state, 1);
3587         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3588
3589         plane_state->color_plane[1].offset = offset;
3590         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3591         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3592
3593         return 0;
3594 }
3595
3596 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3597 {
3598         const struct drm_framebuffer *fb = plane_state->hw.fb;
3599         int ret;
3600
3601         ret = intel_plane_compute_gtt(plane_state);
3602         if (ret)
3603                 return ret;
3604
3605         if (!plane_state->uapi.visible)
3606                 return 0;
3607
3608         /*
3609          * Handle the AUX surface first since
3610          * the main surface setup depends on it.
3611          */
3612         if (drm_format_info_is_yuv_semiplanar(fb->format)) {
3613                 ret = skl_check_nv12_aux_surface(plane_state);
3614                 if (ret)
3615                         return ret;
3616         } else if (is_ccs_modifier(fb->modifier)) {
3617                 ret = skl_check_ccs_aux_surface(plane_state);
3618                 if (ret)
3619                         return ret;
3620         } else {
3621                 plane_state->color_plane[1].offset = ~0xfff;
3622                 plane_state->color_plane[1].x = 0;
3623                 plane_state->color_plane[1].y = 0;
3624         }
3625
3626         ret = skl_check_main_surface(plane_state);
3627         if (ret)
3628                 return ret;
3629
3630         return 0;
3631 }
3632
3633 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
3634                              const struct intel_plane_state *plane_state,
3635                              unsigned int *num, unsigned int *den)
3636 {
3637         const struct drm_framebuffer *fb = plane_state->hw.fb;
3638         unsigned int cpp = fb->format->cpp[0];
3639
3640         /*
3641          * g4x bspec says 64bpp pixel rate can't exceed 80%
3642          * of cdclk when the sprite plane is enabled on the
3643          * same pipe. ilk/snb bspec says 64bpp pixel rate is
3644          * never allowed to exceed 80% of cdclk. Let's just go
3645          * with the ilk/snb limit always.
3646          */
3647         if (cpp == 8) {
3648                 *num = 10;
3649                 *den = 8;
3650         } else {
3651                 *num = 1;
3652                 *den = 1;
3653         }
3654 }
3655
3656 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
3657                                 const struct intel_plane_state *plane_state)
3658 {
3659         unsigned int pixel_rate;
3660         unsigned int num, den;
3661
3662         /*
3663          * Note that crtc_state->pixel_rate accounts for both
3664          * horizontal and vertical panel fitter downscaling factors.
3665          * Pre-HSW bspec tells us to only consider the horizontal
3666          * downscaling factor here. We ignore that and just consider
3667          * both for simplicity.
3668          */
3669         pixel_rate = crtc_state->pixel_rate;
3670
3671         i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
3672
3673         /* two pixels per clock with double wide pipe */
3674         if (crtc_state->double_wide)
3675                 den *= 2;
3676
3677         return DIV_ROUND_UP(pixel_rate * num, den);
3678 }
3679
3680 unsigned int
3681 i9xx_plane_max_stride(struct intel_plane *plane,
3682                       u32 pixel_format, u64 modifier,
3683                       unsigned int rotation)
3684 {
3685         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3686
3687         if (!HAS_GMCH(dev_priv)) {
3688                 return 32*1024;
3689         } else if (INTEL_GEN(dev_priv) >= 4) {
3690                 if (modifier == I915_FORMAT_MOD_X_TILED)
3691                         return 16*1024;
3692                 else
3693                         return 32*1024;
3694         } else if (INTEL_GEN(dev_priv) >= 3) {
3695                 if (modifier == I915_FORMAT_MOD_X_TILED)
3696                         return 8*1024;
3697                 else
3698                         return 16*1024;
3699         } else {
3700                 if (plane->i9xx_plane == PLANE_C)
3701                         return 4*1024;
3702                 else
3703                         return 8*1024;
3704         }
3705 }
3706
3707 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3708 {
3709         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3710         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3711         u32 dspcntr = 0;
3712
3713         if (crtc_state->gamma_enable)
3714                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3715
3716         if (crtc_state->csc_enable)
3717                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3718
3719         if (INTEL_GEN(dev_priv) < 5)
3720                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3721
3722         return dspcntr;
3723 }
3724
3725 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3726                           const struct intel_plane_state *plane_state)
3727 {
3728         struct drm_i915_private *dev_priv =
3729                 to_i915(plane_state->uapi.plane->dev);
3730         const struct drm_framebuffer *fb = plane_state->hw.fb;
3731         unsigned int rotation = plane_state->hw.rotation;
3732         u32 dspcntr;
3733
3734         dspcntr = DISPLAY_PLANE_ENABLE;
3735
3736         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3737             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3738                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3739
3740         switch (fb->format->format) {
3741         case DRM_FORMAT_C8:
3742                 dspcntr |= DISPPLANE_8BPP;
3743                 break;
3744         case DRM_FORMAT_XRGB1555:
3745                 dspcntr |= DISPPLANE_BGRX555;
3746                 break;
3747         case DRM_FORMAT_ARGB1555:
3748                 dspcntr |= DISPPLANE_BGRA555;
3749                 break;
3750         case DRM_FORMAT_RGB565:
3751                 dspcntr |= DISPPLANE_BGRX565;
3752                 break;
3753         case DRM_FORMAT_XRGB8888:
3754                 dspcntr |= DISPPLANE_BGRX888;
3755                 break;
3756         case DRM_FORMAT_XBGR8888:
3757                 dspcntr |= DISPPLANE_RGBX888;
3758                 break;
3759         case DRM_FORMAT_ARGB8888:
3760                 dspcntr |= DISPPLANE_BGRA888;
3761                 break;
3762         case DRM_FORMAT_ABGR8888:
3763                 dspcntr |= DISPPLANE_RGBA888;
3764                 break;
3765         case DRM_FORMAT_XRGB2101010:
3766                 dspcntr |= DISPPLANE_BGRX101010;
3767                 break;
3768         case DRM_FORMAT_XBGR2101010:
3769                 dspcntr |= DISPPLANE_RGBX101010;
3770                 break;
3771         case DRM_FORMAT_ARGB2101010:
3772                 dspcntr |= DISPPLANE_BGRA101010;
3773                 break;
3774         case DRM_FORMAT_ABGR2101010:
3775                 dspcntr |= DISPPLANE_RGBA101010;
3776                 break;
3777         case DRM_FORMAT_XBGR16161616F:
3778                 dspcntr |= DISPPLANE_RGBX161616;
3779                 break;
3780         default:
3781                 MISSING_CASE(fb->format->format);
3782                 return 0;
3783         }
3784
3785         if (INTEL_GEN(dev_priv) >= 4 &&
3786             fb->modifier == I915_FORMAT_MOD_X_TILED)
3787                 dspcntr |= DISPPLANE_TILED;
3788
3789         if (rotation & DRM_MODE_ROTATE_180)
3790                 dspcntr |= DISPPLANE_ROTATE_180;
3791
3792         if (rotation & DRM_MODE_REFLECT_X)
3793                 dspcntr |= DISPPLANE_MIRROR;
3794
3795         return dspcntr;
3796 }
3797
3798 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3799 {
3800         struct drm_i915_private *dev_priv =
3801                 to_i915(plane_state->uapi.plane->dev);
3802         const struct drm_framebuffer *fb = plane_state->hw.fb;
3803         int src_x, src_y, src_w;
3804         u32 offset;
3805         int ret;
3806
3807         ret = intel_plane_compute_gtt(plane_state);
3808         if (ret)
3809                 return ret;
3810
3811         if (!plane_state->uapi.visible)
3812                 return 0;
3813
3814         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3815         src_x = plane_state->uapi.src.x1 >> 16;
3816         src_y = plane_state->uapi.src.y1 >> 16;
3817
3818         /* Undocumented hardware limit on i965/g4x/vlv/chv */
3819         if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
3820                 return -EINVAL;
3821
3822         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3823
3824         if (INTEL_GEN(dev_priv) >= 4)
3825                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3826                                                             plane_state, 0);
3827         else
3828                 offset = 0;
3829
3830         /*
3831          * Put the final coordinates back so that the src
3832          * coordinate checks will see the right values.
3833          */
3834         drm_rect_translate_to(&plane_state->uapi.src,
3835                               src_x << 16, src_y << 16);
3836
3837         /* HSW/BDW do this automagically in hardware */
3838         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3839                 unsigned int rotation = plane_state->hw.rotation;
3840                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3841                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3842
3843                 if (rotation & DRM_MODE_ROTATE_180) {
3844                         src_x += src_w - 1;
3845                         src_y += src_h - 1;
3846                 } else if (rotation & DRM_MODE_REFLECT_X) {
3847                         src_x += src_w - 1;
3848                 }
3849         }
3850
3851         plane_state->color_plane[0].offset = offset;
3852         plane_state->color_plane[0].x = src_x;
3853         plane_state->color_plane[0].y = src_y;
3854
3855         return 0;
3856 }
3857
3858 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
3859 {
3860         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3861         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3862
3863         if (IS_CHERRYVIEW(dev_priv))
3864                 return i9xx_plane == PLANE_B;
3865         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3866                 return false;
3867         else if (IS_GEN(dev_priv, 4))
3868                 return i9xx_plane == PLANE_C;
3869         else
3870                 return i9xx_plane == PLANE_B ||
3871                         i9xx_plane == PLANE_C;
3872 }
3873
3874 static int
3875 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3876                  struct intel_plane_state *plane_state)
3877 {
3878         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3879         int ret;
3880
3881         ret = chv_plane_check_rotation(plane_state);
3882         if (ret)
3883                 return ret;
3884
3885         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
3886                                                   &crtc_state->uapi,
3887                                                   DRM_PLANE_HELPER_NO_SCALING,
3888                                                   DRM_PLANE_HELPER_NO_SCALING,
3889                                                   i9xx_plane_has_windowing(plane),
3890                                                   true);
3891         if (ret)
3892                 return ret;
3893
3894         ret = i9xx_check_plane_surface(plane_state);
3895         if (ret)
3896                 return ret;
3897
3898         if (!plane_state->uapi.visible)
3899                 return 0;
3900
3901         ret = intel_plane_check_src_coordinates(plane_state);
3902         if (ret)
3903                 return ret;
3904
3905         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3906
3907         return 0;
3908 }
3909
3910 static void i9xx_update_plane(struct intel_plane *plane,
3911                               const struct intel_crtc_state *crtc_state,
3912                               const struct intel_plane_state *plane_state)
3913 {
3914         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3915         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3916         u32 linear_offset;
3917         int x = plane_state->color_plane[0].x;
3918         int y = plane_state->color_plane[0].y;
3919         int crtc_x = plane_state->uapi.dst.x1;
3920         int crtc_y = plane_state->uapi.dst.y1;
3921         int crtc_w = drm_rect_width(&plane_state->uapi.dst);
3922         int crtc_h = drm_rect_height(&plane_state->uapi.dst);
3923         unsigned long irqflags;
3924         u32 dspaddr_offset;
3925         u32 dspcntr;
3926
3927         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3928
3929         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3930
3931         if (INTEL_GEN(dev_priv) >= 4)
3932                 dspaddr_offset = plane_state->color_plane[0].offset;
3933         else
3934                 dspaddr_offset = linear_offset;
3935
3936         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3937
3938         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3939
3940         if (INTEL_GEN(dev_priv) < 4) {
3941                 /*
3942                  * PLANE_A doesn't actually have a full window
3943                  * generator but let's assume we still need to
3944                  * program whatever is there.
3945                  */
3946                 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3947                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3948                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3949         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3950                 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3951                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3952                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3953                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3954         }
3955
3956         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3957                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3958         } else if (INTEL_GEN(dev_priv) >= 4) {
3959                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3960                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3961         }
3962
3963         /*
3964          * The control register self-arms if the plane was previously
3965          * disabled. Try to make the plane enable atomic by writing
3966          * the control register just before the surface register.
3967          */
3968         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3969         if (INTEL_GEN(dev_priv) >= 4)
3970                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3971                               intel_plane_ggtt_offset(plane_state) +
3972                               dspaddr_offset);
3973         else
3974                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3975                               intel_plane_ggtt_offset(plane_state) +
3976                               dspaddr_offset);
3977
3978         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3979 }
3980
3981 static void i9xx_disable_plane(struct intel_plane *plane,
3982                                const struct intel_crtc_state *crtc_state)
3983 {
3984         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3985         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3986         unsigned long irqflags;
3987         u32 dspcntr;
3988
3989         /*
3990          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3991          * enable on ilk+ affect the pipe bottom color as
3992          * well, so we must configure them even if the plane
3993          * is disabled.
3994          *
3995          * On pre-g4x there is no way to gamma correct the
3996          * pipe bottom color but we'll keep on doing this
3997          * anyway so that the crtc state readout works correctly.
3998          */
3999         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4000
4001         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4002
4003         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
4004         if (INTEL_GEN(dev_priv) >= 4)
4005                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
4006         else
4007                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
4008
4009         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4010 }
4011
4012 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4013                                     enum pipe *pipe)
4014 {
4015         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4016         enum intel_display_power_domain power_domain;
4017         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4018         intel_wakeref_t wakeref;
4019         bool ret;
4020         u32 val;
4021
4022         /*
4023          * Not 100% correct for planes that can move between pipes,
4024          * but that's only the case for gen2-4 which don't have any
4025          * display power wells.
4026          */
4027         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4028         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4029         if (!wakeref)
4030                 return false;
4031
4032         val = I915_READ(DSPCNTR(i9xx_plane));
4033
4034         ret = val & DISPLAY_PLANE_ENABLE;
4035
4036         if (INTEL_GEN(dev_priv) >= 5)
4037                 *pipe = plane->pipe;
4038         else
4039                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4040                         DISPPLANE_SEL_PIPE_SHIFT;
4041
4042         intel_display_power_put(dev_priv, power_domain, wakeref);
4043
4044         return ret;
4045 }
4046
4047 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4048 {
4049         struct drm_device *dev = intel_crtc->base.dev;
4050         struct drm_i915_private *dev_priv = to_i915(dev);
4051
4052         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4053         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4054         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4055 }
4056
4057 /*
4058  * This function detaches (aka. unbinds) unused scalers in hardware
4059  */
4060 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4061 {
4062         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4063         const struct intel_crtc_scaler_state *scaler_state =
4064                 &crtc_state->scaler_state;
4065         int i;
4066
4067         /* loop through and disable scalers that aren't in use */
4068         for (i = 0; i < intel_crtc->num_scalers; i++) {
4069                 if (!scaler_state->scalers[i].in_use)
4070                         skl_detach_scaler(intel_crtc, i);
4071         }
4072 }
4073
4074 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4075                                           int color_plane, unsigned int rotation)
4076 {
4077         /*
4078          * The stride is either expressed as a multiple of 64 bytes chunks for
4079          * linear buffers or in number of tiles for tiled buffers.
4080          */
4081         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
4082                 return 64;
4083         else if (drm_rotation_90_or_270(rotation))
4084                 return intel_tile_height(fb, color_plane);
4085         else
4086                 return intel_tile_width_bytes(fb, color_plane);
4087 }
4088
4089 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4090                      int color_plane)
4091 {
4092         const struct drm_framebuffer *fb = plane_state->hw.fb;
4093         unsigned int rotation = plane_state->hw.rotation;
4094         u32 stride = plane_state->color_plane[color_plane].stride;
4095
4096         if (color_plane >= fb->format->num_planes)
4097                 return 0;
4098
4099         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4100 }
4101
4102 static u32 skl_plane_ctl_format(u32 pixel_format)
4103 {
4104         switch (pixel_format) {
4105         case DRM_FORMAT_C8:
4106                 return PLANE_CTL_FORMAT_INDEXED;
4107         case DRM_FORMAT_RGB565:
4108                 return PLANE_CTL_FORMAT_RGB_565;
4109         case DRM_FORMAT_XBGR8888:
4110         case DRM_FORMAT_ABGR8888:
4111                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4112         case DRM_FORMAT_XRGB8888:
4113         case DRM_FORMAT_ARGB8888:
4114                 return PLANE_CTL_FORMAT_XRGB_8888;
4115         case DRM_FORMAT_XBGR2101010:
4116         case DRM_FORMAT_ABGR2101010:
4117                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4118         case DRM_FORMAT_XRGB2101010:
4119         case DRM_FORMAT_ARGB2101010:
4120                 return PLANE_CTL_FORMAT_XRGB_2101010;
4121         case DRM_FORMAT_XBGR16161616F:
4122         case DRM_FORMAT_ABGR16161616F:
4123                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4124         case DRM_FORMAT_XRGB16161616F:
4125         case DRM_FORMAT_ARGB16161616F:
4126                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4127         case DRM_FORMAT_YUYV:
4128                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4129         case DRM_FORMAT_YVYU:
4130                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4131         case DRM_FORMAT_UYVY:
4132                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4133         case DRM_FORMAT_VYUY:
4134                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4135         case DRM_FORMAT_NV12:
4136                 return PLANE_CTL_FORMAT_NV12;
4137         case DRM_FORMAT_P010:
4138                 return PLANE_CTL_FORMAT_P010;
4139         case DRM_FORMAT_P012:
4140                 return PLANE_CTL_FORMAT_P012;
4141         case DRM_FORMAT_P016:
4142                 return PLANE_CTL_FORMAT_P016;
4143         case DRM_FORMAT_Y210:
4144                 return PLANE_CTL_FORMAT_Y210;
4145         case DRM_FORMAT_Y212:
4146                 return PLANE_CTL_FORMAT_Y212;
4147         case DRM_FORMAT_Y216:
4148                 return PLANE_CTL_FORMAT_Y216;
4149         case DRM_FORMAT_XVYU2101010:
4150                 return PLANE_CTL_FORMAT_Y410;
4151         case DRM_FORMAT_XVYU12_16161616:
4152                 return PLANE_CTL_FORMAT_Y412;
4153         case DRM_FORMAT_XVYU16161616:
4154                 return PLANE_CTL_FORMAT_Y416;
4155         default:
4156                 MISSING_CASE(pixel_format);
4157         }
4158
4159         return 0;
4160 }
4161
4162 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4163 {
4164         if (!plane_state->hw.fb->format->has_alpha)
4165                 return PLANE_CTL_ALPHA_DISABLE;
4166
4167         switch (plane_state->hw.pixel_blend_mode) {
4168         case DRM_MODE_BLEND_PIXEL_NONE:
4169                 return PLANE_CTL_ALPHA_DISABLE;
4170         case DRM_MODE_BLEND_PREMULTI:
4171                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4172         case DRM_MODE_BLEND_COVERAGE:
4173                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4174         default:
4175                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4176                 return PLANE_CTL_ALPHA_DISABLE;
4177         }
4178 }
4179
4180 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4181 {
4182         if (!plane_state->hw.fb->format->has_alpha)
4183                 return PLANE_COLOR_ALPHA_DISABLE;
4184
4185         switch (plane_state->hw.pixel_blend_mode) {
4186         case DRM_MODE_BLEND_PIXEL_NONE:
4187                 return PLANE_COLOR_ALPHA_DISABLE;
4188         case DRM_MODE_BLEND_PREMULTI:
4189                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4190         case DRM_MODE_BLEND_COVERAGE:
4191                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4192         default:
4193                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4194                 return PLANE_COLOR_ALPHA_DISABLE;
4195         }
4196 }
4197
4198 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4199 {
4200         switch (fb_modifier) {
4201         case DRM_FORMAT_MOD_LINEAR:
4202                 break;
4203         case I915_FORMAT_MOD_X_TILED:
4204                 return PLANE_CTL_TILED_X;
4205         case I915_FORMAT_MOD_Y_TILED:
4206                 return PLANE_CTL_TILED_Y;
4207         case I915_FORMAT_MOD_Y_TILED_CCS:
4208                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4209         case I915_FORMAT_MOD_Yf_TILED:
4210                 return PLANE_CTL_TILED_YF;
4211         case I915_FORMAT_MOD_Yf_TILED_CCS:
4212                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4213         default:
4214                 MISSING_CASE(fb_modifier);
4215         }
4216
4217         return 0;
4218 }
4219
4220 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4221 {
4222         switch (rotate) {
4223         case DRM_MODE_ROTATE_0:
4224                 break;
4225         /*
4226          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4227          * while i915 HW rotation is clockwise, thats why this swapping.
4228          */
4229         case DRM_MODE_ROTATE_90:
4230                 return PLANE_CTL_ROTATE_270;
4231         case DRM_MODE_ROTATE_180:
4232                 return PLANE_CTL_ROTATE_180;
4233         case DRM_MODE_ROTATE_270:
4234                 return PLANE_CTL_ROTATE_90;
4235         default:
4236                 MISSING_CASE(rotate);
4237         }
4238
4239         return 0;
4240 }
4241
4242 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4243 {
4244         switch (reflect) {
4245         case 0:
4246                 break;
4247         case DRM_MODE_REFLECT_X:
4248                 return PLANE_CTL_FLIP_HORIZONTAL;
4249         case DRM_MODE_REFLECT_Y:
4250         default:
4251                 MISSING_CASE(reflect);
4252         }
4253
4254         return 0;
4255 }
4256
4257 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4258 {
4259         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4260         u32 plane_ctl = 0;
4261
4262         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4263                 return plane_ctl;
4264
4265         if (crtc_state->gamma_enable)
4266                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4267
4268         if (crtc_state->csc_enable)
4269                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4270
4271         return plane_ctl;
4272 }
4273
4274 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4275                   const struct intel_plane_state *plane_state)
4276 {
4277         struct drm_i915_private *dev_priv =
4278                 to_i915(plane_state->uapi.plane->dev);
4279         const struct drm_framebuffer *fb = plane_state->hw.fb;
4280         unsigned int rotation = plane_state->hw.rotation;
4281         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4282         u32 plane_ctl;
4283
4284         plane_ctl = PLANE_CTL_ENABLE;
4285
4286         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4287                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4288                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4289
4290                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4291                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4292
4293                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4294                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4295         }
4296
4297         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4298         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4299         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4300
4301         if (INTEL_GEN(dev_priv) >= 10)
4302                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4303                                                 DRM_MODE_REFLECT_MASK);
4304
4305         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4306                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4307         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4308                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4309
4310         return plane_ctl;
4311 }
4312
4313 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4314 {
4315         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4316         u32 plane_color_ctl = 0;
4317
4318         if (INTEL_GEN(dev_priv) >= 11)
4319                 return plane_color_ctl;
4320
4321         if (crtc_state->gamma_enable)
4322                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4323
4324         if (crtc_state->csc_enable)
4325                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4326
4327         return plane_color_ctl;
4328 }
4329
4330 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4331                         const struct intel_plane_state *plane_state)
4332 {
4333         struct drm_i915_private *dev_priv =
4334                 to_i915(plane_state->uapi.plane->dev);
4335         const struct drm_framebuffer *fb = plane_state->hw.fb;
4336         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4337         u32 plane_color_ctl = 0;
4338
4339         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4340         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4341
4342         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4343                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4344                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4345                 else
4346                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4347
4348                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4349                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4350         } else if (fb->format->is_yuv) {
4351                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4352         }
4353
4354         return plane_color_ctl;
4355 }
4356
4357 static int
4358 __intel_display_resume(struct drm_device *dev,
4359                        struct drm_atomic_state *state,
4360                        struct drm_modeset_acquire_ctx *ctx)
4361 {
4362         struct drm_crtc_state *crtc_state;
4363         struct drm_crtc *crtc;
4364         int i, ret;
4365
4366         intel_modeset_setup_hw_state(dev, ctx);
4367         intel_vga_redisable(to_i915(dev));
4368
4369         if (!state)
4370                 return 0;
4371
4372         /*
4373          * We've duplicated the state, pointers to the old state are invalid.
4374          *
4375          * Don't attempt to use the old state until we commit the duplicated state.
4376          */
4377         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4378                 /*
4379                  * Force recalculation even if we restore
4380                  * current state. With fast modeset this may not result
4381                  * in a modeset when the state is compatible.
4382                  */
4383                 crtc_state->mode_changed = true;
4384         }
4385
4386         /* ignore any reset values/BIOS leftovers in the WM registers */
4387         if (!HAS_GMCH(to_i915(dev)))
4388                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4389
4390         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4391
4392         WARN_ON(ret == -EDEADLK);
4393         return ret;
4394 }
4395
4396 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4397 {
4398         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4399                 intel_has_gpu_reset(&dev_priv->gt));
4400 }
4401
4402 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4403 {
4404         struct drm_device *dev = &dev_priv->drm;
4405         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4406         struct drm_atomic_state *state;
4407         int ret;
4408
4409         /* reset doesn't touch the display */
4410         if (!i915_modparams.force_reset_modeset_test &&
4411             !gpu_reset_clobbers_display(dev_priv))
4412                 return;
4413
4414         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4415         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4416         smp_mb__after_atomic();
4417         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4418
4419         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4420                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4421                 intel_gt_set_wedged(&dev_priv->gt);
4422         }
4423
4424         /*
4425          * Need mode_config.mutex so that we don't
4426          * trample ongoing ->detect() and whatnot.
4427          */
4428         mutex_lock(&dev->mode_config.mutex);
4429         drm_modeset_acquire_init(ctx, 0);
4430         while (1) {
4431                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4432                 if (ret != -EDEADLK)
4433                         break;
4434
4435                 drm_modeset_backoff(ctx);
4436         }
4437         /*
4438          * Disabling the crtcs gracefully seems nicer. Also the
4439          * g33 docs say we should at least disable all the planes.
4440          */
4441         state = drm_atomic_helper_duplicate_state(dev, ctx);
4442         if (IS_ERR(state)) {
4443                 ret = PTR_ERR(state);
4444                 DRM_ERROR("Duplicating state failed with %i\n", ret);
4445                 return;
4446         }
4447
4448         ret = drm_atomic_helper_disable_all(dev, ctx);
4449         if (ret) {
4450                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4451                 drm_atomic_state_put(state);
4452                 return;
4453         }
4454
4455         dev_priv->modeset_restore_state = state;
4456         state->acquire_ctx = ctx;
4457 }
4458
4459 void intel_finish_reset(struct drm_i915_private *dev_priv)
4460 {
4461         struct drm_device *dev = &dev_priv->drm;
4462         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4463         struct drm_atomic_state *state;
4464         int ret;
4465
4466         /* reset doesn't touch the display */
4467         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4468                 return;
4469
4470         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4471         if (!state)
4472                 goto unlock;
4473
4474         /* reset doesn't touch the display */
4475         if (!gpu_reset_clobbers_display(dev_priv)) {
4476                 /* for testing only restore the display */
4477                 ret = __intel_display_resume(dev, state, ctx);
4478                 if (ret)
4479                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4480         } else {
4481                 /*
4482                  * The display has been reset as well,
4483                  * so need a full re-initialization.
4484                  */
4485                 intel_pps_unlock_regs_wa(dev_priv);
4486                 intel_modeset_init_hw(dev_priv);
4487                 intel_init_clock_gating(dev_priv);
4488
4489                 spin_lock_irq(&dev_priv->irq_lock);
4490                 if (dev_priv->display.hpd_irq_setup)
4491                         dev_priv->display.hpd_irq_setup(dev_priv);
4492                 spin_unlock_irq(&dev_priv->irq_lock);
4493
4494                 ret = __intel_display_resume(dev, state, ctx);
4495                 if (ret)
4496                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4497
4498                 intel_hpd_init(dev_priv);
4499         }
4500
4501         drm_atomic_state_put(state);
4502 unlock:
4503         drm_modeset_drop_locks(ctx);
4504         drm_modeset_acquire_fini(ctx);
4505         mutex_unlock(&dev->mode_config.mutex);
4506
4507         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4508 }
4509
4510 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4511 {
4512         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4513         enum pipe pipe = crtc->pipe;
4514         u32 tmp;
4515
4516         tmp = I915_READ(PIPE_CHICKEN(pipe));
4517
4518         /*
4519          * Display WA #1153: icl
4520          * enable hardware to bypass the alpha math
4521          * and rounding for per-pixel values 00 and 0xff
4522          */
4523         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4524         /*
4525          * Display WA # 1605353570: icl
4526          * Set the pixel rounding bit to 1 for allowing
4527          * passthrough of Frame buffer pixels unmodified
4528          * across pipe
4529          */
4530         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4531         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4532 }
4533
4534 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4535 {
4536         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4537         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4538         u32 trans_ddi_func_ctl2_val;
4539         u8 master_select;
4540
4541         /*
4542          * Configure the master select and enable Transcoder Port Sync for
4543          * Slave CRTCs transcoder.
4544          */
4545         if (crtc_state->master_transcoder == INVALID_TRANSCODER)
4546                 return;
4547
4548         if (crtc_state->master_transcoder == TRANSCODER_EDP)
4549                 master_select = 0;
4550         else
4551                 master_select = crtc_state->master_transcoder + 1;
4552
4553         /* Set the master select bits for Tranascoder Port Sync */
4554         trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
4555                                    PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
4556                 PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
4557         /* Enable Transcoder Port Sync */
4558         trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
4559
4560         I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
4561                    trans_ddi_func_ctl2_val);
4562 }
4563
4564 static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
4565 {
4566         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4567         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4568         i915_reg_t reg;
4569         u32 trans_ddi_func_ctl2_val;
4570
4571         if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
4572                 return;
4573
4574         DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
4575                       transcoder_name(old_crtc_state->cpu_transcoder));
4576
4577         reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
4578         trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
4579                                     PORT_SYNC_MODE_MASTER_SELECT_MASK);
4580         I915_WRITE(reg, trans_ddi_func_ctl2_val);
4581 }
4582
4583 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4584 {
4585         struct drm_device *dev = crtc->base.dev;
4586         struct drm_i915_private *dev_priv = to_i915(dev);
4587         enum pipe pipe = crtc->pipe;
4588         i915_reg_t reg;
4589         u32 temp;
4590
4591         /* enable normal train */
4592         reg = FDI_TX_CTL(pipe);
4593         temp = I915_READ(reg);
4594         if (IS_IVYBRIDGE(dev_priv)) {
4595                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4596                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4597         } else {
4598                 temp &= ~FDI_LINK_TRAIN_NONE;
4599                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4600         }
4601         I915_WRITE(reg, temp);
4602
4603         reg = FDI_RX_CTL(pipe);
4604         temp = I915_READ(reg);
4605         if (HAS_PCH_CPT(dev_priv)) {
4606                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4607                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4608         } else {
4609                 temp &= ~FDI_LINK_TRAIN_NONE;
4610                 temp |= FDI_LINK_TRAIN_NONE;
4611         }
4612         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4613
4614         /* wait one idle pattern time */
4615         POSTING_READ(reg);
4616         udelay(1000);
4617
4618         /* IVB wants error correction enabled */
4619         if (IS_IVYBRIDGE(dev_priv))
4620                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4621                            FDI_FE_ERRC_ENABLE);
4622 }
4623
4624 /* The FDI link training functions for ILK/Ibexpeak. */
4625 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4626                                     const struct intel_crtc_state *crtc_state)
4627 {
4628         struct drm_device *dev = crtc->base.dev;
4629         struct drm_i915_private *dev_priv = to_i915(dev);
4630         enum pipe pipe = crtc->pipe;
4631         i915_reg_t reg;
4632         u32 temp, tries;
4633
4634         /* FDI needs bits from pipe first */
4635         assert_pipe_enabled(dev_priv, pipe);
4636
4637         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4638            for train result */
4639         reg = FDI_RX_IMR(pipe);
4640         temp = I915_READ(reg);
4641         temp &= ~FDI_RX_SYMBOL_LOCK;
4642         temp &= ~FDI_RX_BIT_LOCK;
4643         I915_WRITE(reg, temp);
4644         I915_READ(reg);
4645         udelay(150);
4646
4647         /* enable CPU FDI TX and PCH FDI RX */
4648         reg = FDI_TX_CTL(pipe);
4649         temp = I915_READ(reg);
4650         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4651         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4652         temp &= ~FDI_LINK_TRAIN_NONE;
4653         temp |= FDI_LINK_TRAIN_PATTERN_1;
4654         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4655
4656         reg = FDI_RX_CTL(pipe);
4657         temp = I915_READ(reg);
4658         temp &= ~FDI_LINK_TRAIN_NONE;
4659         temp |= FDI_LINK_TRAIN_PATTERN_1;
4660         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4661
4662         POSTING_READ(reg);
4663         udelay(150);
4664
4665         /* Ironlake workaround, enable clock pointer after FDI enable*/
4666         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4667         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4668                    FDI_RX_PHASE_SYNC_POINTER_EN);
4669
4670         reg = FDI_RX_IIR(pipe);
4671         for (tries = 0; tries < 5; tries++) {
4672                 temp = I915_READ(reg);
4673                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4674
4675                 if ((temp & FDI_RX_BIT_LOCK)) {
4676                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4677                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4678                         break;
4679                 }
4680         }
4681         if (tries == 5)
4682                 DRM_ERROR("FDI train 1 fail!\n");
4683
4684         /* Train 2 */
4685         reg = FDI_TX_CTL(pipe);
4686         temp = I915_READ(reg);
4687         temp &= ~FDI_LINK_TRAIN_NONE;
4688         temp |= FDI_LINK_TRAIN_PATTERN_2;
4689         I915_WRITE(reg, temp);
4690
4691         reg = FDI_RX_CTL(pipe);
4692         temp = I915_READ(reg);
4693         temp &= ~FDI_LINK_TRAIN_NONE;
4694         temp |= FDI_LINK_TRAIN_PATTERN_2;
4695         I915_WRITE(reg, temp);
4696
4697         POSTING_READ(reg);
4698         udelay(150);
4699
4700         reg = FDI_RX_IIR(pipe);
4701         for (tries = 0; tries < 5; tries++) {
4702                 temp = I915_READ(reg);
4703                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4704
4705                 if (temp & FDI_RX_SYMBOL_LOCK) {
4706                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4707                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4708                         break;
4709                 }
4710         }
4711         if (tries == 5)
4712                 DRM_ERROR("FDI train 2 fail!\n");
4713
4714         DRM_DEBUG_KMS("FDI train done\n");
4715
4716 }
4717
4718 static const int snb_b_fdi_train_param[] = {
4719         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4720         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4721         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4722         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4723 };
4724
4725 /* The FDI link training functions for SNB/Cougarpoint. */
4726 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4727                                 const struct intel_crtc_state *crtc_state)
4728 {
4729         struct drm_device *dev = crtc->base.dev;
4730         struct drm_i915_private *dev_priv = to_i915(dev);
4731         enum pipe pipe = crtc->pipe;
4732         i915_reg_t reg;
4733         u32 temp, i, retry;
4734
4735         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4736            for train result */
4737         reg = FDI_RX_IMR(pipe);
4738         temp = I915_READ(reg);
4739         temp &= ~FDI_RX_SYMBOL_LOCK;
4740         temp &= ~FDI_RX_BIT_LOCK;
4741         I915_WRITE(reg, temp);
4742
4743         POSTING_READ(reg);
4744         udelay(150);
4745
4746         /* enable CPU FDI TX and PCH FDI RX */
4747         reg = FDI_TX_CTL(pipe);
4748         temp = I915_READ(reg);
4749         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4750         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4751         temp &= ~FDI_LINK_TRAIN_NONE;
4752         temp |= FDI_LINK_TRAIN_PATTERN_1;
4753         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4754         /* SNB-B */
4755         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4756         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4757
4758         I915_WRITE(FDI_RX_MISC(pipe),
4759                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4760
4761         reg = FDI_RX_CTL(pipe);
4762         temp = I915_READ(reg);
4763         if (HAS_PCH_CPT(dev_priv)) {
4764                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4765                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4766         } else {
4767                 temp &= ~FDI_LINK_TRAIN_NONE;
4768                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4769         }
4770         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4771
4772         POSTING_READ(reg);
4773         udelay(150);
4774
4775         for (i = 0; i < 4; i++) {
4776                 reg = FDI_TX_CTL(pipe);
4777                 temp = I915_READ(reg);
4778                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4779                 temp |= snb_b_fdi_train_param[i];
4780                 I915_WRITE(reg, temp);
4781
4782                 POSTING_READ(reg);
4783                 udelay(500);
4784
4785                 for (retry = 0; retry < 5; retry++) {
4786                         reg = FDI_RX_IIR(pipe);
4787                         temp = I915_READ(reg);
4788                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4789                         if (temp & FDI_RX_BIT_LOCK) {
4790                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4791                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4792                                 break;
4793                         }
4794                         udelay(50);
4795                 }
4796                 if (retry < 5)
4797                         break;
4798         }
4799         if (i == 4)
4800                 DRM_ERROR("FDI train 1 fail!\n");
4801
4802         /* Train 2 */
4803         reg = FDI_TX_CTL(pipe);
4804         temp = I915_READ(reg);
4805         temp &= ~FDI_LINK_TRAIN_NONE;
4806         temp |= FDI_LINK_TRAIN_PATTERN_2;
4807         if (IS_GEN(dev_priv, 6)) {
4808                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4809                 /* SNB-B */
4810                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4811         }
4812         I915_WRITE(reg, temp);
4813
4814         reg = FDI_RX_CTL(pipe);
4815         temp = I915_READ(reg);
4816         if (HAS_PCH_CPT(dev_priv)) {
4817                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4818                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4819         } else {
4820                 temp &= ~FDI_LINK_TRAIN_NONE;
4821                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4822         }
4823         I915_WRITE(reg, temp);
4824
4825         POSTING_READ(reg);
4826         udelay(150);
4827
4828         for (i = 0; i < 4; i++) {
4829                 reg = FDI_TX_CTL(pipe);
4830                 temp = I915_READ(reg);
4831                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4832                 temp |= snb_b_fdi_train_param[i];
4833                 I915_WRITE(reg, temp);
4834
4835                 POSTING_READ(reg);
4836                 udelay(500);
4837
4838                 for (retry = 0; retry < 5; retry++) {
4839                         reg = FDI_RX_IIR(pipe);
4840                         temp = I915_READ(reg);
4841                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4842                         if (temp & FDI_RX_SYMBOL_LOCK) {
4843                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4844                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4845                                 break;
4846                         }
4847                         udelay(50);
4848                 }
4849                 if (retry < 5)
4850                         break;
4851         }
4852         if (i == 4)
4853                 DRM_ERROR("FDI train 2 fail!\n");
4854
4855         DRM_DEBUG_KMS("FDI train done.\n");
4856 }
4857
4858 /* Manual link training for Ivy Bridge A0 parts */
4859 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4860                                       const struct intel_crtc_state *crtc_state)
4861 {
4862         struct drm_device *dev = crtc->base.dev;
4863         struct drm_i915_private *dev_priv = to_i915(dev);
4864         enum pipe pipe = crtc->pipe;
4865         i915_reg_t reg;
4866         u32 temp, i, j;
4867
4868         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4869            for train result */
4870         reg = FDI_RX_IMR(pipe);
4871         temp = I915_READ(reg);
4872         temp &= ~FDI_RX_SYMBOL_LOCK;
4873         temp &= ~FDI_RX_BIT_LOCK;
4874         I915_WRITE(reg, temp);
4875
4876         POSTING_READ(reg);
4877         udelay(150);
4878
4879         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4880                       I915_READ(FDI_RX_IIR(pipe)));
4881
4882         /* Try each vswing and preemphasis setting twice before moving on */
4883         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4884                 /* disable first in case we need to retry */
4885                 reg = FDI_TX_CTL(pipe);
4886                 temp = I915_READ(reg);
4887                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4888                 temp &= ~FDI_TX_ENABLE;
4889                 I915_WRITE(reg, temp);
4890
4891                 reg = FDI_RX_CTL(pipe);
4892                 temp = I915_READ(reg);
4893                 temp &= ~FDI_LINK_TRAIN_AUTO;
4894                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4895                 temp &= ~FDI_RX_ENABLE;
4896                 I915_WRITE(reg, temp);
4897
4898                 /* enable CPU FDI TX and PCH FDI RX */
4899                 reg = FDI_TX_CTL(pipe);
4900                 temp = I915_READ(reg);
4901                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4902                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4903                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4904                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4905                 temp |= snb_b_fdi_train_param[j/2];
4906                 temp |= FDI_COMPOSITE_SYNC;
4907                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4908
4909                 I915_WRITE(FDI_RX_MISC(pipe),
4910                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4911
4912                 reg = FDI_RX_CTL(pipe);
4913                 temp = I915_READ(reg);
4914                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4915                 temp |= FDI_COMPOSITE_SYNC;
4916                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4917
4918                 POSTING_READ(reg);
4919                 udelay(1); /* should be 0.5us */
4920
4921                 for (i = 0; i < 4; i++) {
4922                         reg = FDI_RX_IIR(pipe);
4923                         temp = I915_READ(reg);
4924                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4925
4926                         if (temp & FDI_RX_BIT_LOCK ||
4927                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4928                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4929                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4930                                               i);
4931                                 break;
4932                         }
4933                         udelay(1); /* should be 0.5us */
4934                 }
4935                 if (i == 4) {
4936                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4937                         continue;
4938                 }
4939
4940                 /* Train 2 */
4941                 reg = FDI_TX_CTL(pipe);
4942                 temp = I915_READ(reg);
4943                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4944                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4945                 I915_WRITE(reg, temp);
4946
4947                 reg = FDI_RX_CTL(pipe);
4948                 temp = I915_READ(reg);
4949                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4950                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4951                 I915_WRITE(reg, temp);
4952
4953                 POSTING_READ(reg);
4954                 udelay(2); /* should be 1.5us */
4955
4956                 for (i = 0; i < 4; i++) {
4957                         reg = FDI_RX_IIR(pipe);
4958                         temp = I915_READ(reg);
4959                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4960
4961                         if (temp & FDI_RX_SYMBOL_LOCK ||
4962                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4963                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4964                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4965                                               i);
4966                                 goto train_done;
4967                         }
4968                         udelay(2); /* should be 1.5us */
4969                 }
4970                 if (i == 4)
4971                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4972         }
4973
4974 train_done:
4975         DRM_DEBUG_KMS("FDI train done.\n");
4976 }
4977
4978 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4979 {
4980         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4981         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4982         enum pipe pipe = intel_crtc->pipe;
4983         i915_reg_t reg;
4984         u32 temp;
4985
4986         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4987         reg = FDI_RX_CTL(pipe);
4988         temp = I915_READ(reg);
4989         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4990         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4991         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4992         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4993
4994         POSTING_READ(reg);
4995         udelay(200);
4996
4997         /* Switch from Rawclk to PCDclk */
4998         temp = I915_READ(reg);
4999         I915_WRITE(reg, temp | FDI_PCDCLK);
5000
5001         POSTING_READ(reg);
5002         udelay(200);
5003
5004         /* Enable CPU FDI TX PLL, always on for Ironlake */
5005         reg = FDI_TX_CTL(pipe);
5006         temp = I915_READ(reg);
5007         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5008                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5009
5010                 POSTING_READ(reg);
5011                 udelay(100);
5012         }
5013 }
5014
5015 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
5016 {
5017         struct drm_device *dev = intel_crtc->base.dev;
5018         struct drm_i915_private *dev_priv = to_i915(dev);
5019         enum pipe pipe = intel_crtc->pipe;
5020         i915_reg_t reg;
5021         u32 temp;
5022
5023         /* Switch from PCDclk to Rawclk */
5024         reg = FDI_RX_CTL(pipe);
5025         temp = I915_READ(reg);
5026         I915_WRITE(reg, temp & ~FDI_PCDCLK);
5027
5028         /* Disable CPU FDI TX PLL */
5029         reg = FDI_TX_CTL(pipe);
5030         temp = I915_READ(reg);
5031         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
5032
5033         POSTING_READ(reg);
5034         udelay(100);
5035
5036         reg = FDI_RX_CTL(pipe);
5037         temp = I915_READ(reg);
5038         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
5039
5040         /* Wait for the clocks to turn off. */
5041         POSTING_READ(reg);
5042         udelay(100);
5043 }
5044
5045 static void ironlake_fdi_disable(struct drm_crtc *crtc)
5046 {
5047         struct drm_device *dev = crtc->dev;
5048         struct drm_i915_private *dev_priv = to_i915(dev);
5049         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5050         enum pipe pipe = intel_crtc->pipe;
5051         i915_reg_t reg;
5052         u32 temp;
5053
5054         /* disable CPU FDI tx and PCH FDI rx */
5055         reg = FDI_TX_CTL(pipe);
5056         temp = I915_READ(reg);
5057         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
5058         POSTING_READ(reg);
5059
5060         reg = FDI_RX_CTL(pipe);
5061         temp = I915_READ(reg);
5062         temp &= ~(0x7 << 16);
5063         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5064         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
5065
5066         POSTING_READ(reg);
5067         udelay(100);
5068
5069         /* Ironlake workaround, disable clock pointer after downing FDI */
5070         if (HAS_PCH_IBX(dev_priv))
5071                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
5072
5073         /* still set train pattern 1 */
5074         reg = FDI_TX_CTL(pipe);
5075         temp = I915_READ(reg);
5076         temp &= ~FDI_LINK_TRAIN_NONE;
5077         temp |= FDI_LINK_TRAIN_PATTERN_1;
5078         I915_WRITE(reg, temp);
5079
5080         reg = FDI_RX_CTL(pipe);
5081         temp = I915_READ(reg);
5082         if (HAS_PCH_CPT(dev_priv)) {
5083                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5084                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5085         } else {
5086                 temp &= ~FDI_LINK_TRAIN_NONE;
5087                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5088         }
5089         /* BPC in FDI rx is consistent with that in PIPECONF */
5090         temp &= ~(0x07 << 16);
5091         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5092         I915_WRITE(reg, temp);
5093
5094         POSTING_READ(reg);
5095         udelay(100);
5096 }
5097
5098 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5099 {
5100         struct drm_crtc *crtc;
5101         bool cleanup_done;
5102
5103         drm_for_each_crtc(crtc, &dev_priv->drm) {
5104                 struct drm_crtc_commit *commit;
5105                 spin_lock(&crtc->commit_lock);
5106                 commit = list_first_entry_or_null(&crtc->commit_list,
5107                                                   struct drm_crtc_commit, commit_entry);
5108                 cleanup_done = commit ?
5109                         try_wait_for_completion(&commit->cleanup_done) : true;
5110                 spin_unlock(&crtc->commit_lock);
5111
5112                 if (cleanup_done)
5113                         continue;
5114
5115                 drm_crtc_wait_one_vblank(crtc);
5116
5117                 return true;
5118         }
5119
5120         return false;
5121 }
5122
5123 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5124 {
5125         u32 temp;
5126
5127         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
5128
5129         mutex_lock(&dev_priv->sb_lock);
5130
5131         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5132         temp |= SBI_SSCCTL_DISABLE;
5133         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5134
5135         mutex_unlock(&dev_priv->sb_lock);
5136 }
5137
5138 /* Program iCLKIP clock to the desired frequency */
5139 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5140 {
5141         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5142         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5143         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5144         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5145         u32 temp;
5146
5147         lpt_disable_iclkip(dev_priv);
5148
5149         /* The iCLK virtual clock root frequency is in MHz,
5150          * but the adjusted_mode->crtc_clock in in KHz. To get the
5151          * divisors, it is necessary to divide one by another, so we
5152          * convert the virtual clock precision to KHz here for higher
5153          * precision.
5154          */
5155         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5156                 u32 iclk_virtual_root_freq = 172800 * 1000;
5157                 u32 iclk_pi_range = 64;
5158                 u32 desired_divisor;
5159
5160                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5161                                                     clock << auxdiv);
5162                 divsel = (desired_divisor / iclk_pi_range) - 2;
5163                 phaseinc = desired_divisor % iclk_pi_range;
5164
5165                 /*
5166                  * Near 20MHz is a corner case which is
5167                  * out of range for the 7-bit divisor
5168                  */
5169                 if (divsel <= 0x7f)
5170                         break;
5171         }
5172
5173         /* This should not happen with any sane values */
5174         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5175                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5176         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5177                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5178
5179         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5180                         clock,
5181                         auxdiv,
5182                         divsel,
5183                         phasedir,
5184                         phaseinc);
5185
5186         mutex_lock(&dev_priv->sb_lock);
5187
5188         /* Program SSCDIVINTPHASE6 */
5189         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5190         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5191         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5192         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5193         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5194         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5195         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5196         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5197
5198         /* Program SSCAUXDIV */
5199         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5200         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5201         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5202         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5203
5204         /* Enable modulator and associated divider */
5205         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5206         temp &= ~SBI_SSCCTL_DISABLE;
5207         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5208
5209         mutex_unlock(&dev_priv->sb_lock);
5210
5211         /* Wait for initialization time */
5212         udelay(24);
5213
5214         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5215 }
5216
5217 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5218 {
5219         u32 divsel, phaseinc, auxdiv;
5220         u32 iclk_virtual_root_freq = 172800 * 1000;
5221         u32 iclk_pi_range = 64;
5222         u32 desired_divisor;
5223         u32 temp;
5224
5225         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5226                 return 0;
5227
5228         mutex_lock(&dev_priv->sb_lock);
5229
5230         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5231         if (temp & SBI_SSCCTL_DISABLE) {
5232                 mutex_unlock(&dev_priv->sb_lock);
5233                 return 0;
5234         }
5235
5236         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5237         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5238                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5239         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5240                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5241
5242         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5243         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5244                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5245
5246         mutex_unlock(&dev_priv->sb_lock);
5247
5248         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5249
5250         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5251                                  desired_divisor << auxdiv);
5252 }
5253
5254 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5255                                                 enum pipe pch_transcoder)
5256 {
5257         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5258         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5259         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5260
5261         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5262                    I915_READ(HTOTAL(cpu_transcoder)));
5263         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5264                    I915_READ(HBLANK(cpu_transcoder)));
5265         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5266                    I915_READ(HSYNC(cpu_transcoder)));
5267
5268         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5269                    I915_READ(VTOTAL(cpu_transcoder)));
5270         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5271                    I915_READ(VBLANK(cpu_transcoder)));
5272         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5273                    I915_READ(VSYNC(cpu_transcoder)));
5274         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5275                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
5276 }
5277
5278 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5279 {
5280         u32 temp;
5281
5282         temp = I915_READ(SOUTH_CHICKEN1);
5283         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5284                 return;
5285
5286         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5287         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5288
5289         temp &= ~FDI_BC_BIFURCATION_SELECT;
5290         if (enable)
5291                 temp |= FDI_BC_BIFURCATION_SELECT;
5292
5293         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5294         I915_WRITE(SOUTH_CHICKEN1, temp);
5295         POSTING_READ(SOUTH_CHICKEN1);
5296 }
5297
5298 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5299 {
5300         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5301         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5302
5303         switch (crtc->pipe) {
5304         case PIPE_A:
5305                 break;
5306         case PIPE_B:
5307                 if (crtc_state->fdi_lanes > 2)
5308                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5309                 else
5310                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5311
5312                 break;
5313         case PIPE_C:
5314                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5315
5316                 break;
5317         default:
5318                 BUG();
5319         }
5320 }
5321
5322 /*
5323  * Finds the encoder associated with the given CRTC. This can only be
5324  * used when we know that the CRTC isn't feeding multiple encoders!
5325  */
5326 static struct intel_encoder *
5327 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5328                            const struct intel_crtc_state *crtc_state)
5329 {
5330         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5331         const struct drm_connector_state *connector_state;
5332         const struct drm_connector *connector;
5333         struct intel_encoder *encoder = NULL;
5334         int num_encoders = 0;
5335         int i;
5336
5337         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5338                 if (connector_state->crtc != &crtc->base)
5339                         continue;
5340
5341                 encoder = to_intel_encoder(connector_state->best_encoder);
5342                 num_encoders++;
5343         }
5344
5345         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5346              num_encoders, pipe_name(crtc->pipe));
5347
5348         return encoder;
5349 }
5350
5351 /*
5352  * Enable PCH resources required for PCH ports:
5353  *   - PCH PLLs
5354  *   - FDI training & RX/TX
5355  *   - update transcoder timings
5356  *   - DP transcoding bits
5357  *   - transcoder
5358  */
5359 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5360                                 const struct intel_crtc_state *crtc_state)
5361 {
5362         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5363         struct drm_device *dev = crtc->base.dev;
5364         struct drm_i915_private *dev_priv = to_i915(dev);
5365         enum pipe pipe = crtc->pipe;
5366         u32 temp;
5367
5368         assert_pch_transcoder_disabled(dev_priv, pipe);
5369
5370         if (IS_IVYBRIDGE(dev_priv))
5371                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5372
5373         /* Write the TU size bits before fdi link training, so that error
5374          * detection works. */
5375         I915_WRITE(FDI_RX_TUSIZE1(pipe),
5376                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5377
5378         /* For PCH output, training FDI link */
5379         dev_priv->display.fdi_link_train(crtc, crtc_state);
5380
5381         /* We need to program the right clock selection before writing the pixel
5382          * mutliplier into the DPLL. */
5383         if (HAS_PCH_CPT(dev_priv)) {
5384                 u32 sel;
5385
5386                 temp = I915_READ(PCH_DPLL_SEL);
5387                 temp |= TRANS_DPLL_ENABLE(pipe);
5388                 sel = TRANS_DPLLB_SEL(pipe);
5389                 if (crtc_state->shared_dpll ==
5390                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5391                         temp |= sel;
5392                 else
5393                         temp &= ~sel;
5394                 I915_WRITE(PCH_DPLL_SEL, temp);
5395         }
5396
5397         /* XXX: pch pll's can be enabled any time before we enable the PCH
5398          * transcoder, and we actually should do this to not upset any PCH
5399          * transcoder that already use the clock when we share it.
5400          *
5401          * Note that enable_shared_dpll tries to do the right thing, but
5402          * get_shared_dpll unconditionally resets the pll - we need that to have
5403          * the right LVDS enable sequence. */
5404         intel_enable_shared_dpll(crtc_state);
5405
5406         /* set transcoder timing, panel must allow it */
5407         assert_panel_unlocked(dev_priv, pipe);
5408         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5409
5410         intel_fdi_normal_train(crtc);
5411
5412         /* For PCH DP, enable TRANS_DP_CTL */
5413         if (HAS_PCH_CPT(dev_priv) &&
5414             intel_crtc_has_dp_encoder(crtc_state)) {
5415                 const struct drm_display_mode *adjusted_mode =
5416                         &crtc_state->hw.adjusted_mode;
5417                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5418                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5419                 enum port port;
5420
5421                 temp = I915_READ(reg);
5422                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5423                           TRANS_DP_SYNC_MASK |
5424                           TRANS_DP_BPC_MASK);
5425                 temp |= TRANS_DP_OUTPUT_ENABLE;
5426                 temp |= bpc << 9; /* same format but at 11:9 */
5427
5428                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5429                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5430                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5431                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5432
5433                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5434                 WARN_ON(port < PORT_B || port > PORT_D);
5435                 temp |= TRANS_DP_PORT_SEL(port);
5436
5437                 I915_WRITE(reg, temp);
5438         }
5439
5440         ironlake_enable_pch_transcoder(crtc_state);
5441 }
5442
5443 static void lpt_pch_enable(const struct intel_atomic_state *state,
5444                            const struct intel_crtc_state *crtc_state)
5445 {
5446         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5447         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5448         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5449
5450         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5451
5452         lpt_program_iclkip(crtc_state);
5453
5454         /* Set transcoder timing. */
5455         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5456
5457         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5458 }
5459
5460 static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
5461 {
5462         struct drm_i915_private *dev_priv = to_i915(dev);
5463         i915_reg_t dslreg = PIPEDSL(pipe);
5464         u32 temp;
5465
5466         temp = I915_READ(dslreg);
5467         udelay(500);
5468         if (wait_for(I915_READ(dslreg) != temp, 5)) {
5469                 if (wait_for(I915_READ(dslreg) != temp, 5))
5470                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5471         }
5472 }
5473
5474 /*
5475  * The hardware phase 0.0 refers to the center of the pixel.
5476  * We want to start from the top/left edge which is phase
5477  * -0.5. That matches how the hardware calculates the scaling
5478  * factors (from top-left of the first pixel to bottom-right
5479  * of the last pixel, as opposed to the pixel centers).
5480  *
5481  * For 4:2:0 subsampled chroma planes we obviously have to
5482  * adjust that so that the chroma sample position lands in
5483  * the right spot.
5484  *
5485  * Note that for packed YCbCr 4:2:2 formats there is no way to
5486  * control chroma siting. The hardware simply replicates the
5487  * chroma samples for both of the luma samples, and thus we don't
5488  * actually get the expected MPEG2 chroma siting convention :(
5489  * The same behaviour is observed on pre-SKL platforms as well.
5490  *
5491  * Theory behind the formula (note that we ignore sub-pixel
5492  * source coordinates):
5493  * s = source sample position
5494  * d = destination sample position
5495  *
5496  * Downscaling 4:1:
5497  * -0.5
5498  * | 0.0
5499  * | |     1.5 (initial phase)
5500  * | |     |
5501  * v v     v
5502  * | s | s | s | s |
5503  * |       d       |
5504  *
5505  * Upscaling 1:4:
5506  * -0.5
5507  * | -0.375 (initial phase)
5508  * | |     0.0
5509  * | |     |
5510  * v v     v
5511  * |       s       |
5512  * | d | d | d | d |
5513  */
5514 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5515 {
5516         int phase = -0x8000;
5517         u16 trip = 0;
5518
5519         if (chroma_cosited)
5520                 phase += (sub - 1) * 0x8000 / sub;
5521
5522         phase += scale / (2 * sub);
5523
5524         /*
5525          * Hardware initial phase limited to [-0.5:1.5].
5526          * Since the max hardware scale factor is 3.0, we
5527          * should never actually excdeed 1.0 here.
5528          */
5529         WARN_ON(phase < -0x8000 || phase > 0x18000);
5530
5531         if (phase < 0)
5532                 phase = 0x10000 + phase;
5533         else
5534                 trip = PS_PHASE_TRIP;
5535
5536         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5537 }
5538
5539 #define SKL_MIN_SRC_W 8
5540 #define SKL_MAX_SRC_W 4096
5541 #define SKL_MIN_SRC_H 8
5542 #define SKL_MAX_SRC_H 4096
5543 #define SKL_MIN_DST_W 8
5544 #define SKL_MAX_DST_W 4096
5545 #define SKL_MIN_DST_H 8
5546 #define SKL_MAX_DST_H 4096
5547 #define ICL_MAX_SRC_W 5120
5548 #define ICL_MAX_SRC_H 4096
5549 #define ICL_MAX_DST_W 5120
5550 #define ICL_MAX_DST_H 4096
5551 #define SKL_MIN_YUV_420_SRC_W 16
5552 #define SKL_MIN_YUV_420_SRC_H 16
5553
5554 static int
5555 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5556                   unsigned int scaler_user, int *scaler_id,
5557                   int src_w, int src_h, int dst_w, int dst_h,
5558                   const struct drm_format_info *format, bool need_scaler)
5559 {
5560         struct intel_crtc_scaler_state *scaler_state =
5561                 &crtc_state->scaler_state;
5562         struct intel_crtc *intel_crtc =
5563                 to_intel_crtc(crtc_state->uapi.crtc);
5564         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5565         const struct drm_display_mode *adjusted_mode =
5566                 &crtc_state->hw.adjusted_mode;
5567
5568         /*
5569          * Src coordinates are already rotated by 270 degrees for
5570          * the 90/270 degree plane rotation cases (to match the
5571          * GTT mapping), hence no need to account for rotation here.
5572          */
5573         if (src_w != dst_w || src_h != dst_h)
5574                 need_scaler = true;
5575
5576         /*
5577          * Scaling/fitting not supported in IF-ID mode in GEN9+
5578          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5579          * Once NV12 is enabled, handle it here while allocating scaler
5580          * for NV12.
5581          */
5582         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
5583             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5584                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5585                 return -EINVAL;
5586         }
5587
5588         /*
5589          * if plane is being disabled or scaler is no more required or force detach
5590          *  - free scaler binded to this plane/crtc
5591          *  - in order to do this, update crtc->scaler_usage
5592          *
5593          * Here scaler state in crtc_state is set free so that
5594          * scaler can be assigned to other user. Actual register
5595          * update to free the scaler is done in plane/panel-fit programming.
5596          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5597          */
5598         if (force_detach || !need_scaler) {
5599                 if (*scaler_id >= 0) {
5600                         scaler_state->scaler_users &= ~(1 << scaler_user);
5601                         scaler_state->scalers[*scaler_id].in_use = 0;
5602
5603                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5604                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5605                                 intel_crtc->pipe, scaler_user, *scaler_id,
5606                                 scaler_state->scaler_users);
5607                         *scaler_id = -1;
5608                 }
5609                 return 0;
5610         }
5611
5612         if (format && drm_format_info_is_yuv_semiplanar(format) &&
5613             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5614                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5615                 return -EINVAL;
5616         }
5617
5618         /* range checks */
5619         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5620             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5621             (INTEL_GEN(dev_priv) >= 11 &&
5622              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5623               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5624             (INTEL_GEN(dev_priv) < 11 &&
5625              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5626               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5627                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5628                         "size is out of scaler range\n",
5629                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5630                 return -EINVAL;
5631         }
5632
5633         /* mark this plane as a scaler user in crtc_state */
5634         scaler_state->scaler_users |= (1 << scaler_user);
5635         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5636                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5637                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5638                 scaler_state->scaler_users);
5639
5640         return 0;
5641 }
5642
5643 /**
5644  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5645  *
5646  * @state: crtc's scaler state
5647  *
5648  * Return
5649  *     0 - scaler_usage updated successfully
5650  *    error - requested scaling cannot be supported or other error condition
5651  */
5652 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5653 {
5654         const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
5655         bool need_scaler = false;
5656
5657         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5658                 need_scaler = true;
5659
5660         return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
5661                                  &state->scaler_state.scaler_id,
5662                                  state->pipe_src_w, state->pipe_src_h,
5663                                  adjusted_mode->crtc_hdisplay,
5664                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5665 }
5666
5667 /**
5668  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5669  * @crtc_state: crtc's scaler state
5670  * @plane_state: atomic plane state to update
5671  *
5672  * Return
5673  *     0 - scaler_usage updated successfully
5674  *    error - requested scaling cannot be supported or other error condition
5675  */
5676 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5677                                    struct intel_plane_state *plane_state)
5678 {
5679         struct intel_plane *intel_plane =
5680                 to_intel_plane(plane_state->uapi.plane);
5681         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5682         struct drm_framebuffer *fb = plane_state->hw.fb;
5683         int ret;
5684         bool force_detach = !fb || !plane_state->uapi.visible;
5685         bool need_scaler = false;
5686
5687         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5688         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5689             fb && drm_format_info_is_yuv_semiplanar(fb->format))
5690                 need_scaler = true;
5691
5692         ret = skl_update_scaler(crtc_state, force_detach,
5693                                 drm_plane_index(&intel_plane->base),
5694                                 &plane_state->scaler_id,
5695                                 drm_rect_width(&plane_state->uapi.src) >> 16,
5696                                 drm_rect_height(&plane_state->uapi.src) >> 16,
5697                                 drm_rect_width(&plane_state->uapi.dst),
5698                                 drm_rect_height(&plane_state->uapi.dst),
5699                                 fb ? fb->format : NULL, need_scaler);
5700
5701         if (ret || plane_state->scaler_id < 0)
5702                 return ret;
5703
5704         /* check colorkey */
5705         if (plane_state->ckey.flags) {
5706                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5707                               intel_plane->base.base.id,
5708                               intel_plane->base.name);
5709                 return -EINVAL;
5710         }
5711
5712         /* Check src format */
5713         switch (fb->format->format) {
5714         case DRM_FORMAT_RGB565:
5715         case DRM_FORMAT_XBGR8888:
5716         case DRM_FORMAT_XRGB8888:
5717         case DRM_FORMAT_ABGR8888:
5718         case DRM_FORMAT_ARGB8888:
5719         case DRM_FORMAT_XRGB2101010:
5720         case DRM_FORMAT_XBGR2101010:
5721         case DRM_FORMAT_ARGB2101010:
5722         case DRM_FORMAT_ABGR2101010:
5723         case DRM_FORMAT_YUYV:
5724         case DRM_FORMAT_YVYU:
5725         case DRM_FORMAT_UYVY:
5726         case DRM_FORMAT_VYUY:
5727         case DRM_FORMAT_NV12:
5728         case DRM_FORMAT_P010:
5729         case DRM_FORMAT_P012:
5730         case DRM_FORMAT_P016:
5731         case DRM_FORMAT_Y210:
5732         case DRM_FORMAT_Y212:
5733         case DRM_FORMAT_Y216:
5734         case DRM_FORMAT_XVYU2101010:
5735         case DRM_FORMAT_XVYU12_16161616:
5736         case DRM_FORMAT_XVYU16161616:
5737                 break;
5738         case DRM_FORMAT_XBGR16161616F:
5739         case DRM_FORMAT_ABGR16161616F:
5740         case DRM_FORMAT_XRGB16161616F:
5741         case DRM_FORMAT_ARGB16161616F:
5742                 if (INTEL_GEN(dev_priv) >= 11)
5743                         break;
5744                 /* fall through */
5745         default:
5746                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5747                               intel_plane->base.base.id, intel_plane->base.name,
5748                               fb->base.id, fb->format->format);
5749                 return -EINVAL;
5750         }
5751
5752         return 0;
5753 }
5754
5755 static void skylake_scaler_disable(struct intel_crtc *crtc)
5756 {
5757         int i;
5758
5759         for (i = 0; i < crtc->num_scalers; i++)
5760                 skl_detach_scaler(crtc, i);
5761 }
5762
5763 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5764 {
5765         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5766         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5767         enum pipe pipe = crtc->pipe;
5768         const struct intel_crtc_scaler_state *scaler_state =
5769                 &crtc_state->scaler_state;
5770
5771         if (crtc_state->pch_pfit.enabled) {
5772                 u16 uv_rgb_hphase, uv_rgb_vphase;
5773                 int pfit_w, pfit_h, hscale, vscale;
5774                 int id;
5775
5776                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5777                         return;
5778
5779                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5780                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5781
5782                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5783                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5784
5785                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5786                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5787
5788                 id = scaler_state->scaler_id;
5789                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5790                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5791                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5792                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5793                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5794                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5795                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5796                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5797         }
5798 }
5799
5800 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5801 {
5802         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5803         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5804         enum pipe pipe = crtc->pipe;
5805
5806         if (crtc_state->pch_pfit.enabled) {
5807                 /* Force use of hard-coded filter coefficients
5808                  * as some pre-programmed values are broken,
5809                  * e.g. x201.
5810                  */
5811                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5812                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5813                                                  PF_PIPE_SEL_IVB(pipe));
5814                 else
5815                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5816                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5817                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5818         }
5819 }
5820
5821 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5822 {
5823         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5824         struct drm_device *dev = crtc->base.dev;
5825         struct drm_i915_private *dev_priv = to_i915(dev);
5826
5827         if (!crtc_state->ips_enabled)
5828                 return;
5829
5830         /*
5831          * We can only enable IPS after we enable a plane and wait for a vblank
5832          * This function is called from post_plane_update, which is run after
5833          * a vblank wait.
5834          */
5835         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5836
5837         if (IS_BROADWELL(dev_priv)) {
5838                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5839                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5840                 /* Quoting Art Runyan: "its not safe to expect any particular
5841                  * value in IPS_CTL bit 31 after enabling IPS through the
5842                  * mailbox." Moreover, the mailbox may return a bogus state,
5843                  * so we need to just enable it and continue on.
5844                  */
5845         } else {
5846                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5847                 /* The bit only becomes 1 in the next vblank, so this wait here
5848                  * is essentially intel_wait_for_vblank. If we don't have this
5849                  * and don't wait for vblanks until the end of crtc_enable, then
5850                  * the HW state readout code will complain that the expected
5851                  * IPS_CTL value is not the one we read. */
5852                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5853                         DRM_ERROR("Timed out waiting for IPS enable\n");
5854         }
5855 }
5856
5857 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5858 {
5859         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5860         struct drm_device *dev = crtc->base.dev;
5861         struct drm_i915_private *dev_priv = to_i915(dev);
5862
5863         if (!crtc_state->ips_enabled)
5864                 return;
5865
5866         if (IS_BROADWELL(dev_priv)) {
5867                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5868                 /*
5869                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5870                  * 42ms timeout value leads to occasional timeouts so use 100ms
5871                  * instead.
5872                  */
5873                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5874                         DRM_ERROR("Timed out waiting for IPS disable\n");
5875         } else {
5876                 I915_WRITE(IPS_CTL, 0);
5877                 POSTING_READ(IPS_CTL);
5878         }
5879
5880         /* We need to wait for a vblank before we can disable the plane. */
5881         intel_wait_for_vblank(dev_priv, crtc->pipe);
5882 }
5883
5884 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5885 {
5886         if (intel_crtc->overlay)
5887                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5888
5889         /* Let userspace switch the overlay on again. In most cases userspace
5890          * has to recompute where to put it anyway.
5891          */
5892 }
5893
5894 /**
5895  * intel_post_enable_primary - Perform operations after enabling primary plane
5896  * @crtc: the CRTC whose primary plane was just enabled
5897  * @new_crtc_state: the enabling state
5898  *
5899  * Performs potentially sleeping operations that must be done after the primary
5900  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5901  * called due to an explicit primary plane update, or due to an implicit
5902  * re-enable that is caused when a sprite plane is updated to no longer
5903  * completely hide the primary plane.
5904  */
5905 static void
5906 intel_post_enable_primary(struct drm_crtc *crtc,
5907                           const struct intel_crtc_state *new_crtc_state)
5908 {
5909         struct drm_device *dev = crtc->dev;
5910         struct drm_i915_private *dev_priv = to_i915(dev);
5911         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5912         enum pipe pipe = intel_crtc->pipe;
5913
5914         /*
5915          * Gen2 reports pipe underruns whenever all planes are disabled.
5916          * So don't enable underrun reporting before at least some planes
5917          * are enabled.
5918          * FIXME: Need to fix the logic to work when we turn off all planes
5919          * but leave the pipe running.
5920          */
5921         if (IS_GEN(dev_priv, 2))
5922                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5923
5924         /* Underruns don't always raise interrupts, so check manually. */
5925         intel_check_cpu_fifo_underruns(dev_priv);
5926         intel_check_pch_fifo_underruns(dev_priv);
5927 }
5928
5929 /* FIXME get rid of this and use pre_plane_update */
5930 static void
5931 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5932 {
5933         struct drm_device *dev = crtc->dev;
5934         struct drm_i915_private *dev_priv = to_i915(dev);
5935         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5936         enum pipe pipe = intel_crtc->pipe;
5937
5938         /*
5939          * Gen2 reports pipe underruns whenever all planes are disabled.
5940          * So disable underrun reporting before all the planes get disabled.
5941          */
5942         if (IS_GEN(dev_priv, 2))
5943                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5944
5945         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5946
5947         /*
5948          * Vblank time updates from the shadow to live plane control register
5949          * are blocked if the memory self-refresh mode is active at that
5950          * moment. So to make sure the plane gets truly disabled, disable
5951          * first the self-refresh mode. The self-refresh enable bit in turn
5952          * will be checked/applied by the HW only at the next frame start
5953          * event which is after the vblank start event, so we need to have a
5954          * wait-for-vblank between disabling the plane and the pipe.
5955          */
5956         if (HAS_GMCH(dev_priv) &&
5957             intel_set_memory_cxsr(dev_priv, false))
5958                 intel_wait_for_vblank(dev_priv, pipe);
5959 }
5960
5961 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5962                                        const struct intel_crtc_state *new_crtc_state)
5963 {
5964         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5965         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5966
5967         if (!old_crtc_state->ips_enabled)
5968                 return false;
5969
5970         if (needs_modeset(new_crtc_state))
5971                 return true;
5972
5973         /*
5974          * Workaround : Do not read or write the pipe palette/gamma data while
5975          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5976          *
5977          * Disable IPS before we program the LUT.
5978          */
5979         if (IS_HASWELL(dev_priv) &&
5980             (new_crtc_state->uapi.color_mgmt_changed ||
5981              new_crtc_state->update_pipe) &&
5982             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5983                 return true;
5984
5985         return !new_crtc_state->ips_enabled;
5986 }
5987
5988 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5989                                        const struct intel_crtc_state *new_crtc_state)
5990 {
5991         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5992         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5993
5994         if (!new_crtc_state->ips_enabled)
5995                 return false;
5996
5997         if (needs_modeset(new_crtc_state))
5998                 return true;
5999
6000         /*
6001          * Workaround : Do not read or write the pipe palette/gamma data while
6002          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6003          *
6004          * Re-enable IPS after the LUT has been programmed.
6005          */
6006         if (IS_HASWELL(dev_priv) &&
6007             (new_crtc_state->uapi.color_mgmt_changed ||
6008              new_crtc_state->update_pipe) &&
6009             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6010                 return true;
6011
6012         /*
6013          * We can't read out IPS on broadwell, assume the worst and
6014          * forcibly enable IPS on the first fastset.
6015          */
6016         if (new_crtc_state->update_pipe &&
6017             old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
6018                 return true;
6019
6020         return !old_crtc_state->ips_enabled;
6021 }
6022
6023 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
6024                           const struct intel_crtc_state *crtc_state)
6025 {
6026         if (!crtc_state->nv12_planes)
6027                 return false;
6028
6029         /* WA Display #0827: Gen9:all */
6030         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6031                 return true;
6032
6033         return false;
6034 }
6035
6036 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
6037                                const struct intel_crtc_state *crtc_state)
6038 {
6039         /* Wa_2006604312:icl */
6040         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
6041                 return true;
6042
6043         return false;
6044 }
6045
6046 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
6047 {
6048         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6049         struct drm_device *dev = crtc->base.dev;
6050         struct drm_i915_private *dev_priv = to_i915(dev);
6051         struct drm_atomic_state *state = old_crtc_state->uapi.state;
6052         struct intel_crtc_state *pipe_config =
6053                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
6054                                                 crtc);
6055         struct drm_plane *primary = crtc->base.primary;
6056         struct drm_plane_state *old_primary_state =
6057                 drm_atomic_get_old_plane_state(state, primary);
6058
6059         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
6060
6061         if (pipe_config->update_wm_post && pipe_config->hw.active)
6062                 intel_update_watermarks(crtc);
6063
6064         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
6065                 hsw_enable_ips(pipe_config);
6066
6067         if (old_primary_state) {
6068                 struct drm_plane_state *new_primary_state =
6069                         drm_atomic_get_new_plane_state(state, primary);
6070
6071                 intel_fbc_post_update(crtc);
6072
6073                 if (new_primary_state->visible &&
6074                     (needs_modeset(pipe_config) ||
6075                      !old_primary_state->visible))
6076                         intel_post_enable_primary(&crtc->base, pipe_config);
6077         }
6078
6079         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
6080             !needs_nv12_wa(dev_priv, pipe_config))
6081                 skl_wa_827(dev_priv, crtc->pipe, false);
6082
6083         if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6084             !needs_scalerclk_wa(dev_priv, pipe_config))
6085                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
6086 }
6087
6088 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
6089                                    struct intel_crtc_state *pipe_config)
6090 {
6091         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6092         struct drm_device *dev = crtc->base.dev;
6093         struct drm_i915_private *dev_priv = to_i915(dev);
6094         struct drm_atomic_state *state = old_crtc_state->uapi.state;
6095         struct drm_plane *primary = crtc->base.primary;
6096         struct drm_plane_state *old_primary_state =
6097                 drm_atomic_get_old_plane_state(state, primary);
6098         bool modeset = needs_modeset(pipe_config);
6099         struct intel_atomic_state *intel_state =
6100                 to_intel_atomic_state(state);
6101
6102         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
6103                 hsw_disable_ips(old_crtc_state);
6104
6105         if (old_primary_state) {
6106                 struct intel_plane_state *new_primary_state =
6107                         intel_atomic_get_new_plane_state(intel_state,
6108                                                          to_intel_plane(primary));
6109
6110                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
6111                 /*
6112                  * Gen2 reports pipe underruns whenever all planes are disabled.
6113                  * So disable underrun reporting before all the planes get disabled.
6114                  */
6115                 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
6116                     (modeset || !new_primary_state->uapi.visible))
6117                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
6118         }
6119
6120         /* Display WA 827 */
6121         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
6122             needs_nv12_wa(dev_priv, pipe_config))
6123                 skl_wa_827(dev_priv, crtc->pipe, true);
6124
6125         /* Wa_2006604312:icl */
6126         if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6127             needs_scalerclk_wa(dev_priv, pipe_config))
6128                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
6129
6130         /*
6131          * Vblank time updates from the shadow to live plane control register
6132          * are blocked if the memory self-refresh mode is active at that
6133          * moment. So to make sure the plane gets truly disabled, disable
6134          * first the self-refresh mode. The self-refresh enable bit in turn
6135          * will be checked/applied by the HW only at the next frame start
6136          * event which is after the vblank start event, so we need to have a
6137          * wait-for-vblank between disabling the plane and the pipe.
6138          */
6139         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6140             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6141                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6142
6143         /*
6144          * IVB workaround: must disable low power watermarks for at least
6145          * one frame before enabling scaling.  LP watermarks can be re-enabled
6146          * when scaling is disabled.
6147          *
6148          * WaCxSRDisabledForSpriteScaling:ivb
6149          */
6150         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
6151             old_crtc_state->hw.active)
6152                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6153
6154         /*
6155          * If we're doing a modeset, we're done.  No need to do any pre-vblank
6156          * watermark programming here.
6157          */
6158         if (needs_modeset(pipe_config))
6159                 return;
6160
6161         /*
6162          * For platforms that support atomic watermarks, program the
6163          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6164          * will be the intermediate values that are safe for both pre- and
6165          * post- vblank; when vblank happens, the 'active' values will be set
6166          * to the final 'target' values and we'll do this again to get the
6167          * optimal watermarks.  For gen9+ platforms, the values we program here
6168          * will be the final target values which will get automatically latched
6169          * at vblank time; no further programming will be necessary.
6170          *
6171          * If a platform hasn't been transitioned to atomic watermarks yet,
6172          * we'll continue to update watermarks the old way, if flags tell
6173          * us to.
6174          */
6175         if (dev_priv->display.initial_watermarks != NULL)
6176                 dev_priv->display.initial_watermarks(intel_state,
6177                                                      pipe_config);
6178         else if (pipe_config->update_wm_pre)
6179                 intel_update_watermarks(crtc);
6180 }
6181
6182 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6183                                       struct intel_crtc *crtc)
6184 {
6185         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6186         const struct intel_crtc_state *new_crtc_state =
6187                 intel_atomic_get_new_crtc_state(state, crtc);
6188         unsigned int update_mask = new_crtc_state->update_planes;
6189         const struct intel_plane_state *old_plane_state;
6190         struct intel_plane *plane;
6191         unsigned fb_bits = 0;
6192         int i;
6193
6194         intel_crtc_dpms_overlay_disable(crtc);
6195
6196         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6197                 if (crtc->pipe != plane->pipe ||
6198                     !(update_mask & BIT(plane->id)))
6199                         continue;
6200
6201                 intel_disable_plane(plane, new_crtc_state);
6202
6203                 if (old_plane_state->uapi.visible)
6204                         fb_bits |= plane->frontbuffer_bit;
6205         }
6206
6207         intel_frontbuffer_flip(dev_priv, fb_bits);
6208 }
6209
6210 /*
6211  * intel_connector_primary_encoder - get the primary encoder for a connector
6212  * @connector: connector for which to return the encoder
6213  *
6214  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6215  * all connectors to their encoder, except for DP-MST connectors which have
6216  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6217  * pointed to by as many DP-MST connectors as there are pipes.
6218  */
6219 static struct intel_encoder *
6220 intel_connector_primary_encoder(struct intel_connector *connector)
6221 {
6222         struct intel_encoder *encoder;
6223
6224         if (connector->mst_port)
6225                 return &dp_to_dig_port(connector->mst_port)->base;
6226
6227         encoder = intel_attached_encoder(&connector->base);
6228         WARN_ON(!encoder);
6229
6230         return encoder;
6231 }
6232
6233 static bool
6234 intel_connector_needs_modeset(struct intel_atomic_state *state,
6235                               const struct drm_connector_state *old_conn_state,
6236                               const struct drm_connector_state *new_conn_state)
6237 {
6238         struct intel_crtc *old_crtc = old_conn_state->crtc ?
6239                                       to_intel_crtc(old_conn_state->crtc) : NULL;
6240         struct intel_crtc *new_crtc = new_conn_state->crtc ?
6241                                       to_intel_crtc(new_conn_state->crtc) : NULL;
6242
6243         return new_crtc != old_crtc ||
6244                (new_crtc &&
6245                 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6246 }
6247
6248 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6249 {
6250         struct drm_connector_state *old_conn_state;
6251         struct drm_connector_state *new_conn_state;
6252         struct drm_connector *conn;
6253         int i;
6254
6255         for_each_oldnew_connector_in_state(&state->base, conn,
6256                                            old_conn_state, new_conn_state, i) {
6257                 struct intel_encoder *encoder;
6258                 struct intel_crtc *crtc;
6259
6260                 if (!intel_connector_needs_modeset(state,
6261                                                    old_conn_state,
6262                                                    new_conn_state))
6263                         continue;
6264
6265                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6266                 if (!encoder->update_prepare)
6267                         continue;
6268
6269                 crtc = new_conn_state->crtc ?
6270                         to_intel_crtc(new_conn_state->crtc) : NULL;
6271                 encoder->update_prepare(state, encoder, crtc);
6272         }
6273 }
6274
6275 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6276 {
6277         struct drm_connector_state *old_conn_state;
6278         struct drm_connector_state *new_conn_state;
6279         struct drm_connector *conn;
6280         int i;
6281
6282         for_each_oldnew_connector_in_state(&state->base, conn,
6283                                            old_conn_state, new_conn_state, i) {
6284                 struct intel_encoder *encoder;
6285                 struct intel_crtc *crtc;
6286
6287                 if (!intel_connector_needs_modeset(state,
6288                                                    old_conn_state,
6289                                                    new_conn_state))
6290                         continue;
6291
6292                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6293                 if (!encoder->update_complete)
6294                         continue;
6295
6296                 crtc = new_conn_state->crtc ?
6297                         to_intel_crtc(new_conn_state->crtc) : NULL;
6298                 encoder->update_complete(state, encoder, crtc);
6299         }
6300 }
6301
6302 static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
6303                                           struct intel_crtc_state *crtc_state,
6304                                           struct intel_atomic_state *state)
6305 {
6306         struct drm_connector_state *conn_state;
6307         struct drm_connector *conn;
6308         int i;
6309
6310         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6311                 struct intel_encoder *encoder =
6312                         to_intel_encoder(conn_state->best_encoder);
6313
6314                 if (conn_state->crtc != &crtc->base)
6315                         continue;
6316
6317                 if (encoder->pre_pll_enable)
6318                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6319         }
6320 }
6321
6322 static void intel_encoders_pre_enable(struct intel_crtc *crtc,
6323                                       struct intel_crtc_state *crtc_state,
6324                                       struct intel_atomic_state *state)
6325 {
6326         struct drm_connector_state *conn_state;
6327         struct drm_connector *conn;
6328         int i;
6329
6330         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6331                 struct intel_encoder *encoder =
6332                         to_intel_encoder(conn_state->best_encoder);
6333
6334                 if (conn_state->crtc != &crtc->base)
6335                         continue;
6336
6337                 if (encoder->pre_enable)
6338                         encoder->pre_enable(encoder, crtc_state, conn_state);
6339         }
6340 }
6341
6342 static void intel_encoders_enable(struct intel_crtc *crtc,
6343                                   struct intel_crtc_state *crtc_state,
6344                                   struct intel_atomic_state *state)
6345 {
6346         struct drm_connector_state *conn_state;
6347         struct drm_connector *conn;
6348         int i;
6349
6350         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6351                 struct intel_encoder *encoder =
6352                         to_intel_encoder(conn_state->best_encoder);
6353
6354                 if (conn_state->crtc != &crtc->base)
6355                         continue;
6356
6357                 if (encoder->enable)
6358                         encoder->enable(encoder, crtc_state, conn_state);
6359                 intel_opregion_notify_encoder(encoder, true);
6360         }
6361 }
6362
6363 static void intel_encoders_disable(struct intel_crtc *crtc,
6364                                    struct intel_crtc_state *old_crtc_state,
6365                                    struct intel_atomic_state *state)
6366 {
6367         struct drm_connector_state *old_conn_state;
6368         struct drm_connector *conn;
6369         int i;
6370
6371         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6372                 struct intel_encoder *encoder =
6373                         to_intel_encoder(old_conn_state->best_encoder);
6374
6375                 if (old_conn_state->crtc != &crtc->base)
6376                         continue;
6377
6378                 intel_opregion_notify_encoder(encoder, false);
6379                 if (encoder->disable)
6380                         encoder->disable(encoder, old_crtc_state, old_conn_state);
6381         }
6382 }
6383
6384 static void intel_encoders_post_disable(struct intel_crtc *crtc,
6385                                         struct intel_crtc_state *old_crtc_state,
6386                                         struct intel_atomic_state *state)
6387 {
6388         struct drm_connector_state *old_conn_state;
6389         struct drm_connector *conn;
6390         int i;
6391
6392         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6393                 struct intel_encoder *encoder =
6394                         to_intel_encoder(old_conn_state->best_encoder);
6395
6396                 if (old_conn_state->crtc != &crtc->base)
6397                         continue;
6398
6399                 if (encoder->post_disable)
6400                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6401         }
6402 }
6403
6404 static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
6405                                             struct intel_crtc_state *old_crtc_state,
6406                                             struct intel_atomic_state *state)
6407 {
6408         struct drm_connector_state *old_conn_state;
6409         struct drm_connector *conn;
6410         int i;
6411
6412         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6413                 struct intel_encoder *encoder =
6414                         to_intel_encoder(old_conn_state->best_encoder);
6415
6416                 if (old_conn_state->crtc != &crtc->base)
6417                         continue;
6418
6419                 if (encoder->post_pll_disable)
6420                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6421         }
6422 }
6423
6424 static void intel_encoders_update_pipe(struct intel_crtc *crtc,
6425                                        struct intel_crtc_state *crtc_state,
6426                                        struct intel_atomic_state *state)
6427 {
6428         struct drm_connector_state *conn_state;
6429         struct drm_connector *conn;
6430         int i;
6431
6432         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6433                 struct intel_encoder *encoder =
6434                         to_intel_encoder(conn_state->best_encoder);
6435
6436                 if (conn_state->crtc != &crtc->base)
6437                         continue;
6438
6439                 if (encoder->update_pipe)
6440                         encoder->update_pipe(encoder, crtc_state, conn_state);
6441         }
6442 }
6443
6444 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6445 {
6446         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6447         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6448
6449         plane->disable_plane(plane, crtc_state);
6450 }
6451
6452 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6453                                  struct intel_atomic_state *state)
6454 {
6455         struct drm_crtc *crtc = pipe_config->uapi.crtc;
6456         struct drm_device *dev = crtc->dev;
6457         struct drm_i915_private *dev_priv = to_i915(dev);
6458         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6459         enum pipe pipe = intel_crtc->pipe;
6460
6461         if (WARN_ON(intel_crtc->active))
6462                 return;
6463
6464         /*
6465          * Sometimes spurious CPU pipe underruns happen during FDI
6466          * training, at least with VGA+HDMI cloning. Suppress them.
6467          *
6468          * On ILK we get an occasional spurious CPU pipe underruns
6469          * between eDP port A enable and vdd enable. Also PCH port
6470          * enable seems to result in the occasional CPU pipe underrun.
6471          *
6472          * Spurious PCH underruns also occur during PCH enabling.
6473          */
6474         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6475         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6476
6477         if (pipe_config->has_pch_encoder)
6478                 intel_prepare_shared_dpll(pipe_config);
6479
6480         if (intel_crtc_has_dp_encoder(pipe_config))
6481                 intel_dp_set_m_n(pipe_config, M1_N1);
6482
6483         intel_set_pipe_timings(pipe_config);
6484         intel_set_pipe_src_size(pipe_config);
6485
6486         if (pipe_config->has_pch_encoder) {
6487                 intel_cpu_transcoder_set_m_n(pipe_config,
6488                                              &pipe_config->fdi_m_n, NULL);
6489         }
6490
6491         ironlake_set_pipeconf(pipe_config);
6492
6493         intel_crtc->active = true;
6494
6495         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6496
6497         if (pipe_config->has_pch_encoder) {
6498                 /* Note: FDI PLL enabling _must_ be done before we enable the
6499                  * cpu pipes, hence this is separate from all the other fdi/pch
6500                  * enabling. */
6501                 ironlake_fdi_pll_enable(pipe_config);
6502         } else {
6503                 assert_fdi_tx_disabled(dev_priv, pipe);
6504                 assert_fdi_rx_disabled(dev_priv, pipe);
6505         }
6506
6507         ironlake_pfit_enable(pipe_config);
6508
6509         /*
6510          * On ILK+ LUT must be loaded before the pipe is running but with
6511          * clocks enabled
6512          */
6513         intel_color_load_luts(pipe_config);
6514         intel_color_commit(pipe_config);
6515         /* update DSPCNTR to configure gamma for pipe bottom color */
6516         intel_disable_primary_plane(pipe_config);
6517
6518         if (dev_priv->display.initial_watermarks != NULL)
6519                 dev_priv->display.initial_watermarks(state, pipe_config);
6520         intel_enable_pipe(pipe_config);
6521
6522         if (pipe_config->has_pch_encoder)
6523                 ironlake_pch_enable(state, pipe_config);
6524
6525         assert_vblank_disabled(crtc);
6526         intel_crtc_vblank_on(pipe_config);
6527
6528         intel_encoders_enable(intel_crtc, pipe_config, state);
6529
6530         if (HAS_PCH_CPT(dev_priv))
6531                 cpt_verify_modeset(dev, intel_crtc->pipe);
6532
6533         /*
6534          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6535          * And a second vblank wait is needed at least on ILK with
6536          * some interlaced HDMI modes. Let's do the double wait always
6537          * in case there are more corner cases we don't know about.
6538          */
6539         if (pipe_config->has_pch_encoder) {
6540                 intel_wait_for_vblank(dev_priv, pipe);
6541                 intel_wait_for_vblank(dev_priv, pipe);
6542         }
6543         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6544         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6545 }
6546
6547 /* IPS only exists on ULT machines and is tied to pipe A. */
6548 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6549 {
6550         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6551 }
6552
6553 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6554                                             enum pipe pipe, bool apply)
6555 {
6556         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6557         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6558
6559         if (apply)
6560                 val |= mask;
6561         else
6562                 val &= ~mask;
6563
6564         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6565 }
6566
6567 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6568 {
6569         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6570         enum pipe pipe = crtc->pipe;
6571         u32 val;
6572
6573         val = MBUS_DBOX_A_CREDIT(2);
6574
6575         if (INTEL_GEN(dev_priv) >= 12) {
6576                 val |= MBUS_DBOX_BW_CREDIT(2);
6577                 val |= MBUS_DBOX_B_CREDIT(12);
6578         } else {
6579                 val |= MBUS_DBOX_BW_CREDIT(1);
6580                 val |= MBUS_DBOX_B_CREDIT(8);
6581         }
6582
6583         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6584 }
6585
6586 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6587                                 struct intel_atomic_state *state)
6588 {
6589         struct drm_crtc *crtc = pipe_config->uapi.crtc;
6590         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6591         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6592         enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
6593         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6594         bool psl_clkgate_wa;
6595
6596         if (WARN_ON(intel_crtc->active))
6597                 return;
6598
6599         intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6600
6601         if (pipe_config->shared_dpll)
6602                 intel_enable_shared_dpll(pipe_config);
6603
6604         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6605
6606         if (intel_crtc_has_dp_encoder(pipe_config))
6607                 intel_dp_set_m_n(pipe_config, M1_N1);
6608
6609         if (!transcoder_is_dsi(cpu_transcoder))
6610                 intel_set_pipe_timings(pipe_config);
6611
6612         if (INTEL_GEN(dev_priv) >= 11)
6613                 icl_enable_trans_port_sync(pipe_config);
6614
6615         intel_set_pipe_src_size(pipe_config);
6616
6617         if (cpu_transcoder != TRANSCODER_EDP &&
6618             !transcoder_is_dsi(cpu_transcoder)) {
6619                 I915_WRITE(PIPE_MULT(cpu_transcoder),
6620                            pipe_config->pixel_multiplier - 1);
6621         }
6622
6623         if (pipe_config->has_pch_encoder) {
6624                 intel_cpu_transcoder_set_m_n(pipe_config,
6625                                              &pipe_config->fdi_m_n, NULL);
6626         }
6627
6628         if (!transcoder_is_dsi(cpu_transcoder))
6629                 haswell_set_pipeconf(pipe_config);
6630
6631         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6632                 bdw_set_pipemisc(pipe_config);
6633
6634         intel_crtc->active = true;
6635
6636         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6637         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6638                          pipe_config->pch_pfit.enabled;
6639         if (psl_clkgate_wa)
6640                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6641
6642         if (INTEL_GEN(dev_priv) >= 9)
6643                 skylake_pfit_enable(pipe_config);
6644         else
6645                 ironlake_pfit_enable(pipe_config);
6646
6647         /*
6648          * On ILK+ LUT must be loaded before the pipe is running but with
6649          * clocks enabled
6650          */
6651         intel_color_load_luts(pipe_config);
6652         intel_color_commit(pipe_config);
6653         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6654         if (INTEL_GEN(dev_priv) < 9)
6655                 intel_disable_primary_plane(pipe_config);
6656
6657         if (INTEL_GEN(dev_priv) >= 11)
6658                 icl_set_pipe_chicken(intel_crtc);
6659
6660         if (!transcoder_is_dsi(cpu_transcoder))
6661                 intel_ddi_enable_transcoder_func(pipe_config);
6662
6663         if (dev_priv->display.initial_watermarks != NULL)
6664                 dev_priv->display.initial_watermarks(state, pipe_config);
6665
6666         if (INTEL_GEN(dev_priv) >= 11)
6667                 icl_pipe_mbus_enable(intel_crtc);
6668
6669         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6670         if (!transcoder_is_dsi(cpu_transcoder))
6671                 intel_enable_pipe(pipe_config);
6672
6673         if (pipe_config->has_pch_encoder)
6674                 lpt_pch_enable(state, pipe_config);
6675
6676         assert_vblank_disabled(crtc);
6677         intel_crtc_vblank_on(pipe_config);
6678
6679         intel_encoders_enable(intel_crtc, pipe_config, state);
6680
6681         if (psl_clkgate_wa) {
6682                 intel_wait_for_vblank(dev_priv, pipe);
6683                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6684         }
6685
6686         /* If we change the relative order between pipe/planes enabling, we need
6687          * to change the workaround. */
6688         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6689         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6690                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6691                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6692         }
6693 }
6694
6695 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6696 {
6697         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6698         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6699         enum pipe pipe = crtc->pipe;
6700
6701         /* To avoid upsetting the power well on haswell only disable the pfit if
6702          * it's in use. The hw state code will make sure we get this right. */
6703         if (old_crtc_state->pch_pfit.enabled) {
6704                 I915_WRITE(PF_CTL(pipe), 0);
6705                 I915_WRITE(PF_WIN_POS(pipe), 0);
6706                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6707         }
6708 }
6709
6710 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6711                                   struct intel_atomic_state *state)
6712 {
6713         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
6714         struct drm_device *dev = crtc->dev;
6715         struct drm_i915_private *dev_priv = to_i915(dev);
6716         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6717         enum pipe pipe = intel_crtc->pipe;
6718
6719         /*
6720          * Sometimes spurious CPU pipe underruns happen when the
6721          * pipe is already disabled, but FDI RX/TX is still enabled.
6722          * Happens at least with VGA+HDMI cloning. Suppress them.
6723          */
6724         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6725         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6726
6727         intel_encoders_disable(intel_crtc, old_crtc_state, state);
6728
6729         drm_crtc_vblank_off(crtc);
6730         assert_vblank_disabled(crtc);
6731
6732         intel_disable_pipe(old_crtc_state);
6733
6734         ironlake_pfit_disable(old_crtc_state);
6735
6736         if (old_crtc_state->has_pch_encoder)
6737                 ironlake_fdi_disable(crtc);
6738
6739         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6740
6741         if (old_crtc_state->has_pch_encoder) {
6742                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6743
6744                 if (HAS_PCH_CPT(dev_priv)) {
6745                         i915_reg_t reg;
6746                         u32 temp;
6747
6748                         /* disable TRANS_DP_CTL */
6749                         reg = TRANS_DP_CTL(pipe);
6750                         temp = I915_READ(reg);
6751                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6752                                   TRANS_DP_PORT_SEL_MASK);
6753                         temp |= TRANS_DP_PORT_SEL_NONE;
6754                         I915_WRITE(reg, temp);
6755
6756                         /* disable DPLL_SEL */
6757                         temp = I915_READ(PCH_DPLL_SEL);
6758                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6759                         I915_WRITE(PCH_DPLL_SEL, temp);
6760                 }
6761
6762                 ironlake_fdi_pll_disable(intel_crtc);
6763         }
6764
6765         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6766         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6767 }
6768
6769 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6770                                  struct intel_atomic_state *state)
6771 {
6772         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
6773         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6774         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6775         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6776
6777         intel_encoders_disable(intel_crtc, old_crtc_state, state);
6778
6779         drm_crtc_vblank_off(crtc);
6780         assert_vblank_disabled(crtc);
6781
6782         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6783         if (!transcoder_is_dsi(cpu_transcoder))
6784                 intel_disable_pipe(old_crtc_state);
6785
6786         if (INTEL_GEN(dev_priv) >= 11)
6787                 icl_disable_transcoder_port_sync(old_crtc_state);
6788
6789         if (!transcoder_is_dsi(cpu_transcoder))
6790                 intel_ddi_disable_transcoder_func(old_crtc_state);
6791
6792         intel_dsc_disable(old_crtc_state);
6793
6794         if (INTEL_GEN(dev_priv) >= 9)
6795                 skylake_scaler_disable(intel_crtc);
6796         else
6797                 ironlake_pfit_disable(old_crtc_state);
6798
6799         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6800
6801         intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
6802 }
6803
6804 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6805 {
6806         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6807         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6808
6809         if (!crtc_state->gmch_pfit.control)
6810                 return;
6811
6812         /*
6813          * The panel fitter should only be adjusted whilst the pipe is disabled,
6814          * according to register description and PRM.
6815          */
6816         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6817         assert_pipe_disabled(dev_priv, crtc->pipe);
6818
6819         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6820         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6821
6822         /* Border color in case we don't scale up to the full screen. Black by
6823          * default, change to something else for debugging. */
6824         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6825 }
6826
6827 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6828 {
6829         if (phy == PHY_NONE)
6830                 return false;
6831
6832         if (IS_ELKHARTLAKE(dev_priv))
6833                 return phy <= PHY_C;
6834
6835         if (INTEL_GEN(dev_priv) >= 11)
6836                 return phy <= PHY_B;
6837
6838         return false;
6839 }
6840
6841 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6842 {
6843         if (INTEL_GEN(dev_priv) >= 12)
6844                 return phy >= PHY_D && phy <= PHY_I;
6845
6846         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6847                 return phy >= PHY_C && phy <= PHY_F;
6848
6849         return false;
6850 }
6851
6852 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6853 {
6854         if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6855                 return PHY_A;
6856
6857         return (enum phy)port;
6858 }
6859
6860 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6861 {
6862         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6863                 return PORT_TC_NONE;
6864
6865         if (INTEL_GEN(dev_priv) >= 12)
6866                 return port - PORT_D;
6867
6868         return port - PORT_C;
6869 }
6870
6871 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6872 {
6873         switch (port) {
6874         case PORT_A:
6875                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6876         case PORT_B:
6877                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6878         case PORT_C:
6879                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6880         case PORT_D:
6881                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6882         case PORT_E:
6883                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6884         case PORT_F:
6885                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6886         case PORT_G:
6887                 return POWER_DOMAIN_PORT_DDI_G_LANES;
6888         default:
6889                 MISSING_CASE(port);
6890                 return POWER_DOMAIN_PORT_OTHER;
6891         }
6892 }
6893
6894 enum intel_display_power_domain
6895 intel_aux_power_domain(struct intel_digital_port *dig_port)
6896 {
6897         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
6898         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
6899
6900         if (intel_phy_is_tc(dev_priv, phy) &&
6901             dig_port->tc_mode == TC_PORT_TBT_ALT) {
6902                 switch (dig_port->aux_ch) {
6903                 case AUX_CH_C:
6904                         return POWER_DOMAIN_AUX_C_TBT;
6905                 case AUX_CH_D:
6906                         return POWER_DOMAIN_AUX_D_TBT;
6907                 case AUX_CH_E:
6908                         return POWER_DOMAIN_AUX_E_TBT;
6909                 case AUX_CH_F:
6910                         return POWER_DOMAIN_AUX_F_TBT;
6911                 case AUX_CH_G:
6912                         return POWER_DOMAIN_AUX_G_TBT;
6913                 default:
6914                         MISSING_CASE(dig_port->aux_ch);
6915                         return POWER_DOMAIN_AUX_C_TBT;
6916                 }
6917         }
6918
6919         switch (dig_port->aux_ch) {
6920         case AUX_CH_A:
6921                 return POWER_DOMAIN_AUX_A;
6922         case AUX_CH_B:
6923                 return POWER_DOMAIN_AUX_B;
6924         case AUX_CH_C:
6925                 return POWER_DOMAIN_AUX_C;
6926         case AUX_CH_D:
6927                 return POWER_DOMAIN_AUX_D;
6928         case AUX_CH_E:
6929                 return POWER_DOMAIN_AUX_E;
6930         case AUX_CH_F:
6931                 return POWER_DOMAIN_AUX_F;
6932         case AUX_CH_G:
6933                 return POWER_DOMAIN_AUX_G;
6934         default:
6935                 MISSING_CASE(dig_port->aux_ch);
6936                 return POWER_DOMAIN_AUX_A;
6937         }
6938 }
6939
6940 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6941 {
6942         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6943         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6944         struct drm_encoder *encoder;
6945         enum pipe pipe = crtc->pipe;
6946         u64 mask;
6947         enum transcoder transcoder = crtc_state->cpu_transcoder;
6948
6949         if (!crtc_state->hw.active)
6950                 return 0;
6951
6952         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6953         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6954         if (crtc_state->pch_pfit.enabled ||
6955             crtc_state->pch_pfit.force_thru)
6956                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6957
6958         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
6959                                   crtc_state->uapi.encoder_mask) {
6960                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6961
6962                 mask |= BIT_ULL(intel_encoder->power_domain);
6963         }
6964
6965         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6966                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6967
6968         if (crtc_state->shared_dpll)
6969                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6970
6971         return mask;
6972 }
6973
6974 static u64
6975 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6976 {
6977         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6978         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6979         enum intel_display_power_domain domain;
6980         u64 domains, new_domains, old_domains;
6981
6982         old_domains = crtc->enabled_power_domains;
6983         crtc->enabled_power_domains = new_domains =
6984                 get_crtc_power_domains(crtc_state);
6985
6986         domains = new_domains & ~old_domains;
6987
6988         for_each_power_domain(domain, domains)
6989                 intel_display_power_get(dev_priv, domain);
6990
6991         return old_domains & ~new_domains;
6992 }
6993
6994 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6995                                       u64 domains)
6996 {
6997         enum intel_display_power_domain domain;
6998
6999         for_each_power_domain(domain, domains)
7000                 intel_display_power_put_unchecked(dev_priv, domain);
7001 }
7002
7003 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
7004                                    struct intel_atomic_state *state)
7005 {
7006         struct drm_crtc *crtc = pipe_config->uapi.crtc;
7007         struct drm_device *dev = crtc->dev;
7008         struct drm_i915_private *dev_priv = to_i915(dev);
7009         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7010         enum pipe pipe = intel_crtc->pipe;
7011
7012         if (WARN_ON(intel_crtc->active))
7013                 return;
7014
7015         if (intel_crtc_has_dp_encoder(pipe_config))
7016                 intel_dp_set_m_n(pipe_config, M1_N1);
7017
7018         intel_set_pipe_timings(pipe_config);
7019         intel_set_pipe_src_size(pipe_config);
7020
7021         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7022                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7023                 I915_WRITE(CHV_CANVAS(pipe), 0);
7024         }
7025
7026         i9xx_set_pipeconf(pipe_config);
7027
7028         intel_crtc->active = true;
7029
7030         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7031
7032         intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
7033
7034         if (IS_CHERRYVIEW(dev_priv)) {
7035                 chv_prepare_pll(intel_crtc, pipe_config);
7036                 chv_enable_pll(intel_crtc, pipe_config);
7037         } else {
7038                 vlv_prepare_pll(intel_crtc, pipe_config);
7039                 vlv_enable_pll(intel_crtc, pipe_config);
7040         }
7041
7042         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
7043
7044         i9xx_pfit_enable(pipe_config);
7045
7046         intel_color_load_luts(pipe_config);
7047         intel_color_commit(pipe_config);
7048         /* update DSPCNTR to configure gamma for pipe bottom color */
7049         intel_disable_primary_plane(pipe_config);
7050
7051         dev_priv->display.initial_watermarks(state, pipe_config);
7052         intel_enable_pipe(pipe_config);
7053
7054         assert_vblank_disabled(crtc);
7055         intel_crtc_vblank_on(pipe_config);
7056
7057         intel_encoders_enable(intel_crtc, pipe_config, state);
7058 }
7059
7060 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7061 {
7062         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7063         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7064
7065         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
7066         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
7067 }
7068
7069 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
7070                              struct intel_atomic_state *state)
7071 {
7072         struct drm_crtc *crtc = pipe_config->uapi.crtc;
7073         struct drm_device *dev = crtc->dev;
7074         struct drm_i915_private *dev_priv = to_i915(dev);
7075         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7076         enum pipe pipe = intel_crtc->pipe;
7077
7078         if (WARN_ON(intel_crtc->active))
7079                 return;
7080
7081         i9xx_set_pll_dividers(pipe_config);
7082
7083         if (intel_crtc_has_dp_encoder(pipe_config))
7084                 intel_dp_set_m_n(pipe_config, M1_N1);
7085
7086         intel_set_pipe_timings(pipe_config);
7087         intel_set_pipe_src_size(pipe_config);
7088
7089         i9xx_set_pipeconf(pipe_config);
7090
7091         intel_crtc->active = true;
7092
7093         if (!IS_GEN(dev_priv, 2))
7094                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7095
7096         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
7097
7098         i9xx_enable_pll(intel_crtc, pipe_config);
7099
7100         i9xx_pfit_enable(pipe_config);
7101
7102         intel_color_load_luts(pipe_config);
7103         intel_color_commit(pipe_config);
7104         /* update DSPCNTR to configure gamma for pipe bottom color */
7105         intel_disable_primary_plane(pipe_config);
7106
7107         if (dev_priv->display.initial_watermarks != NULL)
7108                 dev_priv->display.initial_watermarks(state,
7109                                                      pipe_config);
7110         else
7111                 intel_update_watermarks(intel_crtc);
7112         intel_enable_pipe(pipe_config);
7113
7114         assert_vblank_disabled(crtc);
7115         intel_crtc_vblank_on(pipe_config);
7116
7117         intel_encoders_enable(intel_crtc, pipe_config, state);
7118 }
7119
7120 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7121 {
7122         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7123         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7124
7125         if (!old_crtc_state->gmch_pfit.control)
7126                 return;
7127
7128         assert_pipe_disabled(dev_priv, crtc->pipe);
7129
7130         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
7131                       I915_READ(PFIT_CONTROL));
7132         I915_WRITE(PFIT_CONTROL, 0);
7133 }
7134
7135 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
7136                               struct intel_atomic_state *state)
7137 {
7138         struct drm_crtc *crtc = old_crtc_state->uapi.crtc;
7139         struct drm_device *dev = crtc->dev;
7140         struct drm_i915_private *dev_priv = to_i915(dev);
7141         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7142         enum pipe pipe = intel_crtc->pipe;
7143
7144         /*
7145          * On gen2 planes are double buffered but the pipe isn't, so we must
7146          * wait for planes to fully turn off before disabling the pipe.
7147          */
7148         if (IS_GEN(dev_priv, 2))
7149                 intel_wait_for_vblank(dev_priv, pipe);
7150
7151         intel_encoders_disable(intel_crtc, old_crtc_state, state);
7152
7153         drm_crtc_vblank_off(crtc);
7154         assert_vblank_disabled(crtc);
7155
7156         intel_disable_pipe(old_crtc_state);
7157
7158         i9xx_pfit_disable(old_crtc_state);
7159
7160         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
7161
7162         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7163                 if (IS_CHERRYVIEW(dev_priv))
7164                         chv_disable_pll(dev_priv, pipe);
7165                 else if (IS_VALLEYVIEW(dev_priv))
7166                         vlv_disable_pll(dev_priv, pipe);
7167                 else
7168                         i9xx_disable_pll(old_crtc_state);
7169         }
7170
7171         intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
7172
7173         if (!IS_GEN(dev_priv, 2))
7174                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7175
7176         if (!dev_priv->display.initial_watermarks)
7177                 intel_update_watermarks(intel_crtc);
7178
7179         /* clock the pipe down to 640x480@60 to potentially save power */
7180         if (IS_I830(dev_priv))
7181                 i830_enable_pipe(dev_priv, pipe);
7182 }
7183
7184 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
7185                                         struct drm_modeset_acquire_ctx *ctx)
7186 {
7187         struct intel_encoder *encoder;
7188         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7189         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
7190         struct intel_bw_state *bw_state =
7191                 to_intel_bw_state(dev_priv->bw_obj.state);
7192         struct intel_crtc_state *crtc_state =
7193                 to_intel_crtc_state(crtc->state);
7194         enum intel_display_power_domain domain;
7195         struct intel_plane *plane;
7196         u64 domains;
7197         struct drm_atomic_state *state;
7198         struct intel_crtc_state *temp_crtc_state;
7199         int ret;
7200
7201         if (!intel_crtc->active)
7202                 return;
7203
7204         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
7205                 const struct intel_plane_state *plane_state =
7206                         to_intel_plane_state(plane->base.state);
7207
7208                 if (plane_state->uapi.visible)
7209                         intel_plane_disable_noatomic(intel_crtc, plane);
7210         }
7211
7212         state = drm_atomic_state_alloc(crtc->dev);
7213         if (!state) {
7214                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7215                               crtc->base.id, crtc->name);
7216                 return;
7217         }
7218
7219         state->acquire_ctx = ctx;
7220
7221         /* Everything's already locked, -EDEADLK can't happen. */
7222         temp_crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7223         ret = drm_atomic_add_affected_connectors(state, crtc);
7224
7225         WARN_ON(IS_ERR(temp_crtc_state) || ret);
7226
7227         dev_priv->display.crtc_disable(temp_crtc_state, to_intel_atomic_state(state));
7228
7229         drm_atomic_state_put(state);
7230
7231         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7232                       crtc->base.id, crtc->name);
7233
7234         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
7235         crtc->state->active = false;
7236         intel_crtc->active = false;
7237         crtc->enabled = false;
7238         crtc->state->connector_mask = 0;
7239         crtc->state->encoder_mask = 0;
7240         intel_crtc_free_hw_state(crtc_state);
7241         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7242
7243         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
7244                 encoder->base.crtc = NULL;
7245
7246         intel_fbc_disable(intel_crtc);
7247         intel_update_watermarks(intel_crtc);
7248         intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
7249
7250         domains = intel_crtc->enabled_power_domains;
7251         for_each_power_domain(domain, domains)
7252                 intel_display_power_put_unchecked(dev_priv, domain);
7253         intel_crtc->enabled_power_domains = 0;
7254
7255         dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
7256         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
7257         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
7258
7259         bw_state->data_rate[intel_crtc->pipe] = 0;
7260         bw_state->num_active_planes[intel_crtc->pipe] = 0;
7261 }
7262
7263 /*
7264  * turn all crtc's off, but do not adjust state
7265  * This has to be paired with a call to intel_modeset_setup_hw_state.
7266  */
7267 int intel_display_suspend(struct drm_device *dev)
7268 {
7269         struct drm_i915_private *dev_priv = to_i915(dev);
7270         struct drm_atomic_state *state;
7271         int ret;
7272
7273         state = drm_atomic_helper_suspend(dev);
7274         ret = PTR_ERR_OR_ZERO(state);
7275         if (ret)
7276                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7277         else
7278                 dev_priv->modeset_restore_state = state;
7279         return ret;
7280 }
7281
7282 void intel_encoder_destroy(struct drm_encoder *encoder)
7283 {
7284         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7285
7286         drm_encoder_cleanup(encoder);
7287         kfree(intel_encoder);
7288 }
7289
7290 /* Cross check the actual hw state with our own modeset state tracking (and it's
7291  * internal consistency). */
7292 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7293                                          struct drm_connector_state *conn_state)
7294 {
7295         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7296
7297         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7298                       connector->base.base.id,
7299                       connector->base.name);
7300
7301         if (connector->get_hw_state(connector)) {
7302                 struct intel_encoder *encoder = connector->encoder;
7303
7304                 I915_STATE_WARN(!crtc_state,
7305                          "connector enabled without attached crtc\n");
7306
7307                 if (!crtc_state)
7308                         return;
7309
7310                 I915_STATE_WARN(!crtc_state->hw.active,
7311                                 "connector is active, but attached crtc isn't\n");
7312
7313                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7314                         return;
7315
7316                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7317                         "atomic encoder doesn't match attached encoder\n");
7318
7319                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7320                         "attached encoder crtc differs from connector crtc\n");
7321         } else {
7322                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7323                                 "attached crtc is active, but connector isn't\n");
7324                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7325                         "best encoder set without crtc!\n");
7326         }
7327 }
7328
7329 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7330 {
7331         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7332                 return crtc_state->fdi_lanes;
7333
7334         return 0;
7335 }
7336
7337 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7338                                      struct intel_crtc_state *pipe_config)
7339 {
7340         struct drm_i915_private *dev_priv = to_i915(dev);
7341         struct drm_atomic_state *state = pipe_config->uapi.state;
7342         struct intel_crtc *other_crtc;
7343         struct intel_crtc_state *other_crtc_state;
7344
7345         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7346                       pipe_name(pipe), pipe_config->fdi_lanes);
7347         if (pipe_config->fdi_lanes > 4) {
7348                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7349                               pipe_name(pipe), pipe_config->fdi_lanes);
7350                 return -EINVAL;
7351         }
7352
7353         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7354                 if (pipe_config->fdi_lanes > 2) {
7355                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7356                                       pipe_config->fdi_lanes);
7357                         return -EINVAL;
7358                 } else {
7359                         return 0;
7360                 }
7361         }
7362
7363         if (INTEL_NUM_PIPES(dev_priv) == 2)
7364                 return 0;
7365
7366         /* Ivybridge 3 pipe is really complicated */
7367         switch (pipe) {
7368         case PIPE_A:
7369                 return 0;
7370         case PIPE_B:
7371                 if (pipe_config->fdi_lanes <= 2)
7372                         return 0;
7373
7374                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7375                 other_crtc_state =
7376                         intel_atomic_get_crtc_state(state, other_crtc);
7377                 if (IS_ERR(other_crtc_state))
7378                         return PTR_ERR(other_crtc_state);
7379
7380                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7381                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7382                                       pipe_name(pipe), pipe_config->fdi_lanes);
7383                         return -EINVAL;
7384                 }
7385                 return 0;
7386         case PIPE_C:
7387                 if (pipe_config->fdi_lanes > 2) {
7388                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7389                                       pipe_name(pipe), pipe_config->fdi_lanes);
7390                         return -EINVAL;
7391                 }
7392
7393                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7394                 other_crtc_state =
7395                         intel_atomic_get_crtc_state(state, other_crtc);
7396                 if (IS_ERR(other_crtc_state))
7397                         return PTR_ERR(other_crtc_state);
7398
7399                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7400                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7401                         return -EINVAL;
7402                 }
7403                 return 0;
7404         default:
7405                 BUG();
7406         }
7407 }
7408
7409 #define RETRY 1
7410 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7411                                        struct intel_crtc_state *pipe_config)
7412 {
7413         struct drm_device *dev = intel_crtc->base.dev;
7414         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7415         int lane, link_bw, fdi_dotclock, ret;
7416         bool needs_recompute = false;
7417
7418 retry:
7419         /* FDI is a binary signal running at ~2.7GHz, encoding
7420          * each output octet as 10 bits. The actual frequency
7421          * is stored as a divider into a 100MHz clock, and the
7422          * mode pixel clock is stored in units of 1KHz.
7423          * Hence the bw of each lane in terms of the mode signal
7424          * is:
7425          */
7426         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7427
7428         fdi_dotclock = adjusted_mode->crtc_clock;
7429
7430         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7431                                            pipe_config->pipe_bpp);
7432
7433         pipe_config->fdi_lanes = lane;
7434
7435         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7436                                link_bw, &pipe_config->fdi_m_n, false, false);
7437
7438         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7439         if (ret == -EDEADLK)
7440                 return ret;
7441
7442         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7443                 pipe_config->pipe_bpp -= 2*3;
7444                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7445                               pipe_config->pipe_bpp);
7446                 needs_recompute = true;
7447                 pipe_config->bw_constrained = true;
7448
7449                 goto retry;
7450         }
7451
7452         if (needs_recompute)
7453                 return RETRY;
7454
7455         return ret;
7456 }
7457
7458 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7459 {
7460         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7461         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7462
7463         /* IPS only exists on ULT machines and is tied to pipe A. */
7464         if (!hsw_crtc_supports_ips(crtc))
7465                 return false;
7466
7467         if (!i915_modparams.enable_ips)
7468                 return false;
7469
7470         if (crtc_state->pipe_bpp > 24)
7471                 return false;
7472
7473         /*
7474          * We compare against max which means we must take
7475          * the increased cdclk requirement into account when
7476          * calculating the new cdclk.
7477          *
7478          * Should measure whether using a lower cdclk w/o IPS
7479          */
7480         if (IS_BROADWELL(dev_priv) &&
7481             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7482                 return false;
7483
7484         return true;
7485 }
7486
7487 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7488 {
7489         struct drm_i915_private *dev_priv =
7490                 to_i915(crtc_state->uapi.crtc->dev);
7491         struct intel_atomic_state *intel_state =
7492                 to_intel_atomic_state(crtc_state->uapi.state);
7493
7494         if (!hsw_crtc_state_ips_capable(crtc_state))
7495                 return false;
7496
7497         /*
7498          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7499          * enabled and disabled dynamically based on package C states,
7500          * user space can't make reliable use of the CRCs, so let's just
7501          * completely disable it.
7502          */
7503         if (crtc_state->crc_enabled)
7504                 return false;
7505
7506         /* IPS should be fine as long as at least one plane is enabled. */
7507         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7508                 return false;
7509
7510         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7511         if (IS_BROADWELL(dev_priv) &&
7512             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7513                 return false;
7514
7515         return true;
7516 }
7517
7518 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7519 {
7520         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7521
7522         /* GDG double wide on either pipe, otherwise pipe A only */
7523         return INTEL_GEN(dev_priv) < 4 &&
7524                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7525 }
7526
7527 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7528 {
7529         u32 pixel_rate;
7530
7531         pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7532
7533         /*
7534          * We only use IF-ID interlacing. If we ever use
7535          * PF-ID we'll need to adjust the pixel_rate here.
7536          */
7537
7538         if (pipe_config->pch_pfit.enabled) {
7539                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7540                 u32 pfit_size = pipe_config->pch_pfit.size;
7541
7542                 pipe_w = pipe_config->pipe_src_w;
7543                 pipe_h = pipe_config->pipe_src_h;
7544
7545                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7546                 pfit_h = pfit_size & 0xFFFF;
7547                 if (pipe_w < pfit_w)
7548                         pipe_w = pfit_w;
7549                 if (pipe_h < pfit_h)
7550                         pipe_h = pfit_h;
7551
7552                 if (WARN_ON(!pfit_w || !pfit_h))
7553                         return pixel_rate;
7554
7555                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7556                                      pfit_w * pfit_h);
7557         }
7558
7559         return pixel_rate;
7560 }
7561
7562 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7563 {
7564         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7565
7566         if (HAS_GMCH(dev_priv))
7567                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7568                 crtc_state->pixel_rate =
7569                         crtc_state->hw.adjusted_mode.crtc_clock;
7570         else
7571                 crtc_state->pixel_rate =
7572                         ilk_pipe_pixel_rate(crtc_state);
7573 }
7574
7575 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7576                                      struct intel_crtc_state *pipe_config)
7577 {
7578         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7579         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7580         int clock_limit = dev_priv->max_dotclk_freq;
7581
7582         if (INTEL_GEN(dev_priv) < 4) {
7583                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7584
7585                 /*
7586                  * Enable double wide mode when the dot clock
7587                  * is > 90% of the (display) core speed.
7588                  */
7589                 if (intel_crtc_supports_double_wide(crtc) &&
7590                     adjusted_mode->crtc_clock > clock_limit) {
7591                         clock_limit = dev_priv->max_dotclk_freq;
7592                         pipe_config->double_wide = true;
7593                 }
7594         }
7595
7596         if (adjusted_mode->crtc_clock > clock_limit) {
7597                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7598                               adjusted_mode->crtc_clock, clock_limit,
7599                               yesno(pipe_config->double_wide));
7600                 return -EINVAL;
7601         }
7602
7603         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7604              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7605              pipe_config->hw.ctm) {
7606                 /*
7607                  * There is only one pipe CSC unit per pipe, and we need that
7608                  * for output conversion from RGB->YCBCR. So if CTM is already
7609                  * applied we can't support YCBCR420 output.
7610                  */
7611                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7612                 return -EINVAL;
7613         }
7614
7615         /*
7616          * Pipe horizontal size must be even in:
7617          * - DVO ganged mode
7618          * - LVDS dual channel mode
7619          * - Double wide pipe
7620          */
7621         if (pipe_config->pipe_src_w & 1) {
7622                 if (pipe_config->double_wide) {
7623                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7624                         return -EINVAL;
7625                 }
7626
7627                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7628                     intel_is_dual_link_lvds(dev_priv)) {
7629                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7630                         return -EINVAL;
7631                 }
7632         }
7633
7634         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7635          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7636          */
7637         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7638                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7639                 return -EINVAL;
7640
7641         intel_crtc_compute_pixel_rate(pipe_config);
7642
7643         if (pipe_config->has_pch_encoder)
7644                 return ironlake_fdi_compute_config(crtc, pipe_config);
7645
7646         return 0;
7647 }
7648
7649 static void
7650 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7651 {
7652         while (*num > DATA_LINK_M_N_MASK ||
7653                *den > DATA_LINK_M_N_MASK) {
7654                 *num >>= 1;
7655                 *den >>= 1;
7656         }
7657 }
7658
7659 static void compute_m_n(unsigned int m, unsigned int n,
7660                         u32 *ret_m, u32 *ret_n,
7661                         bool constant_n)
7662 {
7663         /*
7664          * Several DP dongles in particular seem to be fussy about
7665          * too large link M/N values. Give N value as 0x8000 that
7666          * should be acceptable by specific devices. 0x8000 is the
7667          * specified fixed N value for asynchronous clock mode,
7668          * which the devices expect also in synchronous clock mode.
7669          */
7670         if (constant_n)
7671                 *ret_n = 0x8000;
7672         else
7673                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7674
7675         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7676         intel_reduce_m_n_ratio(ret_m, ret_n);
7677 }
7678
7679 void
7680 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7681                        int pixel_clock, int link_clock,
7682                        struct intel_link_m_n *m_n,
7683                        bool constant_n, bool fec_enable)
7684 {
7685         u32 data_clock = bits_per_pixel * pixel_clock;
7686
7687         if (fec_enable)
7688                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7689
7690         m_n->tu = 64;
7691         compute_m_n(data_clock,
7692                     link_clock * nlanes * 8,
7693                     &m_n->gmch_m, &m_n->gmch_n,
7694                     constant_n);
7695
7696         compute_m_n(pixel_clock, link_clock,
7697                     &m_n->link_m, &m_n->link_n,
7698                     constant_n);
7699 }
7700
7701 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7702 {
7703         /*
7704          * There may be no VBT; and if the BIOS enabled SSC we can
7705          * just keep using it to avoid unnecessary flicker.  Whereas if the
7706          * BIOS isn't using it, don't assume it will work even if the VBT
7707          * indicates as much.
7708          */
7709         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7710                 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
7711                         DREF_SSC1_ENABLE;
7712
7713                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7714                         DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
7715                                       enableddisabled(bios_lvds_use_ssc),
7716                                       enableddisabled(dev_priv->vbt.lvds_use_ssc));
7717                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7718                 }
7719         }
7720 }
7721
7722 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7723 {
7724         if (i915_modparams.panel_use_ssc >= 0)
7725                 return i915_modparams.panel_use_ssc != 0;
7726         return dev_priv->vbt.lvds_use_ssc
7727                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7728 }
7729
7730 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7731 {
7732         return (1 << dpll->n) << 16 | dpll->m2;
7733 }
7734
7735 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7736 {
7737         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7738 }
7739
7740 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7741                                      struct intel_crtc_state *crtc_state,
7742                                      struct dpll *reduced_clock)
7743 {
7744         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7745         u32 fp, fp2 = 0;
7746
7747         if (IS_PINEVIEW(dev_priv)) {
7748                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7749                 if (reduced_clock)
7750                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7751         } else {
7752                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7753                 if (reduced_clock)
7754                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7755         }
7756
7757         crtc_state->dpll_hw_state.fp0 = fp;
7758
7759         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7760             reduced_clock) {
7761                 crtc_state->dpll_hw_state.fp1 = fp2;
7762         } else {
7763                 crtc_state->dpll_hw_state.fp1 = fp;
7764         }
7765 }
7766
7767 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7768                 pipe)
7769 {
7770         u32 reg_val;
7771
7772         /*
7773          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7774          * and set it to a reasonable value instead.
7775          */
7776         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7777         reg_val &= 0xffffff00;
7778         reg_val |= 0x00000030;
7779         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7780
7781         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7782         reg_val &= 0x00ffffff;
7783         reg_val |= 0x8c000000;
7784         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7785
7786         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7787         reg_val &= 0xffffff00;
7788         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7789
7790         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7791         reg_val &= 0x00ffffff;
7792         reg_val |= 0xb0000000;
7793         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7794 }
7795
7796 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7797                                          const struct intel_link_m_n *m_n)
7798 {
7799         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7800         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7801         enum pipe pipe = crtc->pipe;
7802
7803         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7804         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7805         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7806         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7807 }
7808
7809 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7810                                  enum transcoder transcoder)
7811 {
7812         if (IS_HASWELL(dev_priv))
7813                 return transcoder == TRANSCODER_EDP;
7814
7815         /*
7816          * Strictly speaking some registers are available before
7817          * gen7, but we only support DRRS on gen7+
7818          */
7819         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7820 }
7821
7822 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7823                                          const struct intel_link_m_n *m_n,
7824                                          const struct intel_link_m_n *m2_n2)
7825 {
7826         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7827         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7828         enum pipe pipe = crtc->pipe;
7829         enum transcoder transcoder = crtc_state->cpu_transcoder;
7830
7831         if (INTEL_GEN(dev_priv) >= 5) {
7832                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7833                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7834                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7835                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7836                 /*
7837                  *  M2_N2 registers are set only if DRRS is supported
7838                  * (to make sure the registers are not unnecessarily accessed).
7839                  */
7840                 if (m2_n2 && crtc_state->has_drrs &&
7841                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7842                         I915_WRITE(PIPE_DATA_M2(transcoder),
7843                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7844                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7845                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7846                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7847                 }
7848         } else {
7849                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7850                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7851                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7852                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7853         }
7854 }
7855
7856 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7857 {
7858         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7859
7860         if (m_n == M1_N1) {
7861                 dp_m_n = &crtc_state->dp_m_n;
7862                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7863         } else if (m_n == M2_N2) {
7864
7865                 /*
7866                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7867                  * needs to be programmed into M1_N1.
7868                  */
7869                 dp_m_n = &crtc_state->dp_m2_n2;
7870         } else {
7871                 DRM_ERROR("Unsupported divider value\n");
7872                 return;
7873         }
7874
7875         if (crtc_state->has_pch_encoder)
7876                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7877         else
7878                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7879 }
7880
7881 static void vlv_compute_dpll(struct intel_crtc *crtc,
7882                              struct intel_crtc_state *pipe_config)
7883 {
7884         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7885                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7886         if (crtc->pipe != PIPE_A)
7887                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7888
7889         /* DPLL not used with DSI, but still need the rest set up */
7890         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7891                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7892                         DPLL_EXT_BUFFER_ENABLE_VLV;
7893
7894         pipe_config->dpll_hw_state.dpll_md =
7895                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7896 }
7897
7898 static void chv_compute_dpll(struct intel_crtc *crtc,
7899                              struct intel_crtc_state *pipe_config)
7900 {
7901         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7902                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7903         if (crtc->pipe != PIPE_A)
7904                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7905
7906         /* DPLL not used with DSI, but still need the rest set up */
7907         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7908                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7909
7910         pipe_config->dpll_hw_state.dpll_md =
7911                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7912 }
7913
7914 static void vlv_prepare_pll(struct intel_crtc *crtc,
7915                             const struct intel_crtc_state *pipe_config)
7916 {
7917         struct drm_device *dev = crtc->base.dev;
7918         struct drm_i915_private *dev_priv = to_i915(dev);
7919         enum pipe pipe = crtc->pipe;
7920         u32 mdiv;
7921         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7922         u32 coreclk, reg_val;
7923
7924         /* Enable Refclk */
7925         I915_WRITE(DPLL(pipe),
7926                    pipe_config->dpll_hw_state.dpll &
7927                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7928
7929         /* No need to actually set up the DPLL with DSI */
7930         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7931                 return;
7932
7933         vlv_dpio_get(dev_priv);
7934
7935         bestn = pipe_config->dpll.n;
7936         bestm1 = pipe_config->dpll.m1;
7937         bestm2 = pipe_config->dpll.m2;
7938         bestp1 = pipe_config->dpll.p1;
7939         bestp2 = pipe_config->dpll.p2;
7940
7941         /* See eDP HDMI DPIO driver vbios notes doc */
7942
7943         /* PLL B needs special handling */
7944         if (pipe == PIPE_B)
7945                 vlv_pllb_recal_opamp(dev_priv, pipe);
7946
7947         /* Set up Tx target for periodic Rcomp update */
7948         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7949
7950         /* Disable target IRef on PLL */
7951         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7952         reg_val &= 0x00ffffff;
7953         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7954
7955         /* Disable fast lock */
7956         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7957
7958         /* Set idtafcrecal before PLL is enabled */
7959         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7960         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7961         mdiv |= ((bestn << DPIO_N_SHIFT));
7962         mdiv |= (1 << DPIO_K_SHIFT);
7963
7964         /*
7965          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7966          * but we don't support that).
7967          * Note: don't use the DAC post divider as it seems unstable.
7968          */
7969         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7970         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7971
7972         mdiv |= DPIO_ENABLE_CALIBRATION;
7973         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7974
7975         /* Set HBR and RBR LPF coefficients */
7976         if (pipe_config->port_clock == 162000 ||
7977             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7978             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7979                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7980                                  0x009f0003);
7981         else
7982                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7983                                  0x00d0000f);
7984
7985         if (intel_crtc_has_dp_encoder(pipe_config)) {
7986                 /* Use SSC source */
7987                 if (pipe == PIPE_A)
7988                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7989                                          0x0df40000);
7990                 else
7991                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7992                                          0x0df70000);
7993         } else { /* HDMI or VGA */
7994                 /* Use bend source */
7995                 if (pipe == PIPE_A)
7996                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7997                                          0x0df70000);
7998                 else
7999                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8000                                          0x0df40000);
8001         }
8002
8003         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8004         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8005         if (intel_crtc_has_dp_encoder(pipe_config))
8006                 coreclk |= 0x01000000;
8007         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8008
8009         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8010
8011         vlv_dpio_put(dev_priv);
8012 }
8013
8014 static void chv_prepare_pll(struct intel_crtc *crtc,
8015                             const struct intel_crtc_state *pipe_config)
8016 {
8017         struct drm_device *dev = crtc->base.dev;
8018         struct drm_i915_private *dev_priv = to_i915(dev);
8019         enum pipe pipe = crtc->pipe;
8020         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8021         u32 loopfilter, tribuf_calcntr;
8022         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8023         u32 dpio_val;
8024         int vco;
8025
8026         /* Enable Refclk and SSC */
8027         I915_WRITE(DPLL(pipe),
8028                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8029
8030         /* No need to actually set up the DPLL with DSI */
8031         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8032                 return;
8033
8034         bestn = pipe_config->dpll.n;
8035         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8036         bestm1 = pipe_config->dpll.m1;
8037         bestm2 = pipe_config->dpll.m2 >> 22;
8038         bestp1 = pipe_config->dpll.p1;
8039         bestp2 = pipe_config->dpll.p2;
8040         vco = pipe_config->dpll.vco;
8041         dpio_val = 0;
8042         loopfilter = 0;
8043
8044         vlv_dpio_get(dev_priv);
8045
8046         /* p1 and p2 divider */
8047         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8048                         5 << DPIO_CHV_S1_DIV_SHIFT |
8049                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8050                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8051                         1 << DPIO_CHV_K_DIV_SHIFT);
8052
8053         /* Feedback post-divider - m2 */
8054         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8055
8056         /* Feedback refclk divider - n and m1 */
8057         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8058                         DPIO_CHV_M1_DIV_BY_2 |
8059                         1 << DPIO_CHV_N_DIV_SHIFT);
8060
8061         /* M2 fraction division */
8062         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8063
8064         /* M2 fraction division enable */
8065         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8066         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8067         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8068         if (bestm2_frac)
8069                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8070         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8071
8072         /* Program digital lock detect threshold */
8073         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8074         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8075                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8076         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8077         if (!bestm2_frac)
8078                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8079         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8080
8081         /* Loop filter */
8082         if (vco == 5400000) {
8083                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8084                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8085                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8086                 tribuf_calcntr = 0x9;
8087         } else if (vco <= 6200000) {
8088                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8089                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8090                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8091                 tribuf_calcntr = 0x9;
8092         } else if (vco <= 6480000) {
8093                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8094                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8095                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8096                 tribuf_calcntr = 0x8;
8097         } else {
8098                 /* Not supported. Apply the same limits as in the max case */
8099                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8100                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8101                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8102                 tribuf_calcntr = 0;
8103         }
8104         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8105
8106         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8107         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8108         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8109         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8110
8111         /* AFC Recal */
8112         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8113                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8114                         DPIO_AFC_RECAL);
8115
8116         vlv_dpio_put(dev_priv);
8117 }
8118
8119 /**
8120  * vlv_force_pll_on - forcibly enable just the PLL
8121  * @dev_priv: i915 private structure
8122  * @pipe: pipe PLL to enable
8123  * @dpll: PLL configuration
8124  *
8125  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8126  * in cases where we need the PLL enabled even when @pipe is not going to
8127  * be enabled.
8128  */
8129 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8130                      const struct dpll *dpll)
8131 {
8132         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8133         struct intel_crtc_state *pipe_config;
8134
8135         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8136         if (!pipe_config)
8137                 return -ENOMEM;
8138
8139         pipe_config->uapi.crtc = &crtc->base;
8140         pipe_config->pixel_multiplier = 1;
8141         pipe_config->dpll = *dpll;
8142
8143         if (IS_CHERRYVIEW(dev_priv)) {
8144                 chv_compute_dpll(crtc, pipe_config);
8145                 chv_prepare_pll(crtc, pipe_config);
8146                 chv_enable_pll(crtc, pipe_config);
8147         } else {
8148                 vlv_compute_dpll(crtc, pipe_config);
8149                 vlv_prepare_pll(crtc, pipe_config);
8150                 vlv_enable_pll(crtc, pipe_config);
8151         }
8152
8153         kfree(pipe_config);
8154
8155         return 0;
8156 }
8157
8158 /**
8159  * vlv_force_pll_off - forcibly disable just the PLL
8160  * @dev_priv: i915 private structure
8161  * @pipe: pipe PLL to disable
8162  *
8163  * Disable the PLL for @pipe. To be used in cases where we need
8164  * the PLL enabled even when @pipe is not going to be enabled.
8165  */
8166 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8167 {
8168         if (IS_CHERRYVIEW(dev_priv))
8169                 chv_disable_pll(dev_priv, pipe);
8170         else
8171                 vlv_disable_pll(dev_priv, pipe);
8172 }
8173
8174 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8175                               struct intel_crtc_state *crtc_state,
8176                               struct dpll *reduced_clock)
8177 {
8178         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8179         u32 dpll;
8180         struct dpll *clock = &crtc_state->dpll;
8181
8182         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8183
8184         dpll = DPLL_VGA_MODE_DIS;
8185
8186         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8187                 dpll |= DPLLB_MODE_LVDS;
8188         else
8189                 dpll |= DPLLB_MODE_DAC_SERIAL;
8190
8191         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8192             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8193                 dpll |= (crtc_state->pixel_multiplier - 1)
8194                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8195         }
8196
8197         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8198             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8199                 dpll |= DPLL_SDVO_HIGH_SPEED;
8200
8201         if (intel_crtc_has_dp_encoder(crtc_state))
8202                 dpll |= DPLL_SDVO_HIGH_SPEED;
8203
8204         /* compute bitmask from p1 value */
8205         if (IS_PINEVIEW(dev_priv))
8206                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8207         else {
8208                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8209                 if (IS_G4X(dev_priv) && reduced_clock)
8210                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8211         }
8212         switch (clock->p2) {
8213         case 5:
8214                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8215                 break;
8216         case 7:
8217                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8218                 break;
8219         case 10:
8220                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8221                 break;
8222         case 14:
8223                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8224                 break;
8225         }
8226         if (INTEL_GEN(dev_priv) >= 4)
8227                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8228
8229         if (crtc_state->sdvo_tv_clock)
8230                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8231         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8232                  intel_panel_use_ssc(dev_priv))
8233                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8234         else
8235                 dpll |= PLL_REF_INPUT_DREFCLK;
8236
8237         dpll |= DPLL_VCO_ENABLE;
8238         crtc_state->dpll_hw_state.dpll = dpll;
8239
8240         if (INTEL_GEN(dev_priv) >= 4) {
8241                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8242                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8243                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8244         }
8245 }
8246
8247 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8248                               struct intel_crtc_state *crtc_state,
8249                               struct dpll *reduced_clock)
8250 {
8251         struct drm_device *dev = crtc->base.dev;
8252         struct drm_i915_private *dev_priv = to_i915(dev);
8253         u32 dpll;
8254         struct dpll *clock = &crtc_state->dpll;
8255
8256         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8257
8258         dpll = DPLL_VGA_MODE_DIS;
8259
8260         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8261                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8262         } else {
8263                 if (clock->p1 == 2)
8264                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8265                 else
8266                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8267                 if (clock->p2 == 4)
8268                         dpll |= PLL_P2_DIVIDE_BY_4;
8269         }
8270
8271         /*
8272          * Bspec:
8273          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8274          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8275          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8276          *  Enable) must be set to “1” in both the DPLL A Control Register
8277          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8278          *
8279          * For simplicity We simply keep both bits always enabled in
8280          * both DPLLS. The spec says we should disable the DVO 2X clock
8281          * when not needed, but this seems to work fine in practice.
8282          */
8283         if (IS_I830(dev_priv) ||
8284             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8285                 dpll |= DPLL_DVO_2X_MODE;
8286
8287         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8288             intel_panel_use_ssc(dev_priv))
8289                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8290         else
8291                 dpll |= PLL_REF_INPUT_DREFCLK;
8292
8293         dpll |= DPLL_VCO_ENABLE;
8294         crtc_state->dpll_hw_state.dpll = dpll;
8295 }
8296
8297 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8298 {
8299         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8300         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8301         enum pipe pipe = crtc->pipe;
8302         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8303         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8304         u32 crtc_vtotal, crtc_vblank_end;
8305         int vsyncshift = 0;
8306
8307         /* We need to be careful not to changed the adjusted mode, for otherwise
8308          * the hw state checker will get angry at the mismatch. */
8309         crtc_vtotal = adjusted_mode->crtc_vtotal;
8310         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8311
8312         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8313                 /* the chip adds 2 halflines automatically */
8314                 crtc_vtotal -= 1;
8315                 crtc_vblank_end -= 1;
8316
8317                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8318                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8319                 else
8320                         vsyncshift = adjusted_mode->crtc_hsync_start -
8321                                 adjusted_mode->crtc_htotal / 2;
8322                 if (vsyncshift < 0)
8323                         vsyncshift += adjusted_mode->crtc_htotal;
8324         }
8325
8326         if (INTEL_GEN(dev_priv) > 3)
8327                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8328
8329         I915_WRITE(HTOTAL(cpu_transcoder),
8330                    (adjusted_mode->crtc_hdisplay - 1) |
8331                    ((adjusted_mode->crtc_htotal - 1) << 16));
8332         I915_WRITE(HBLANK(cpu_transcoder),
8333                    (adjusted_mode->crtc_hblank_start - 1) |
8334                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
8335         I915_WRITE(HSYNC(cpu_transcoder),
8336                    (adjusted_mode->crtc_hsync_start - 1) |
8337                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
8338
8339         I915_WRITE(VTOTAL(cpu_transcoder),
8340                    (adjusted_mode->crtc_vdisplay - 1) |
8341                    ((crtc_vtotal - 1) << 16));
8342         I915_WRITE(VBLANK(cpu_transcoder),
8343                    (adjusted_mode->crtc_vblank_start - 1) |
8344                    ((crtc_vblank_end - 1) << 16));
8345         I915_WRITE(VSYNC(cpu_transcoder),
8346                    (adjusted_mode->crtc_vsync_start - 1) |
8347                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
8348
8349         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8350          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8351          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8352          * bits. */
8353         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8354             (pipe == PIPE_B || pipe == PIPE_C))
8355                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8356
8357 }
8358
8359 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8360 {
8361         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8362         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8363         enum pipe pipe = crtc->pipe;
8364
8365         /* pipesrc controls the size that is scaled from, which should
8366          * always be the user's requested size.
8367          */
8368         I915_WRITE(PIPESRC(pipe),
8369                    ((crtc_state->pipe_src_w - 1) << 16) |
8370                    (crtc_state->pipe_src_h - 1));
8371 }
8372
8373 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8374 {
8375         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8376         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8377
8378         if (IS_GEN(dev_priv, 2))
8379                 return false;
8380
8381         if (INTEL_GEN(dev_priv) >= 9 ||
8382             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8383                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8384         else
8385                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8386 }
8387
8388 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8389                                    struct intel_crtc_state *pipe_config)
8390 {
8391         struct drm_device *dev = crtc->base.dev;
8392         struct drm_i915_private *dev_priv = to_i915(dev);
8393         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8394         u32 tmp;
8395
8396         tmp = I915_READ(HTOTAL(cpu_transcoder));
8397         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8398         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8399
8400         if (!transcoder_is_dsi(cpu_transcoder)) {
8401                 tmp = I915_READ(HBLANK(cpu_transcoder));
8402                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8403                                                         (tmp & 0xffff) + 1;
8404                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8405                                                 ((tmp >> 16) & 0xffff) + 1;
8406         }
8407         tmp = I915_READ(HSYNC(cpu_transcoder));
8408         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8409         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8410
8411         tmp = I915_READ(VTOTAL(cpu_transcoder));
8412         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8413         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8414
8415         if (!transcoder_is_dsi(cpu_transcoder)) {
8416                 tmp = I915_READ(VBLANK(cpu_transcoder));
8417                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8418                                                         (tmp & 0xffff) + 1;
8419                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8420                                                 ((tmp >> 16) & 0xffff) + 1;
8421         }
8422         tmp = I915_READ(VSYNC(cpu_transcoder));
8423         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8424         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8425
8426         if (intel_pipe_is_interlaced(pipe_config)) {
8427                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8428                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8429                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8430         }
8431 }
8432
8433 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8434                                     struct intel_crtc_state *pipe_config)
8435 {
8436         struct drm_device *dev = crtc->base.dev;
8437         struct drm_i915_private *dev_priv = to_i915(dev);
8438         u32 tmp;
8439
8440         tmp = I915_READ(PIPESRC(crtc->pipe));
8441         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8442         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8443
8444         pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8445         pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8446 }
8447
8448 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8449                                  struct intel_crtc_state *pipe_config)
8450 {
8451         mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8452         mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8453         mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8454         mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8455
8456         mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8457         mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8458         mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8459         mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8460
8461         mode->flags = pipe_config->hw.adjusted_mode.flags;
8462         mode->type = DRM_MODE_TYPE_DRIVER;
8463
8464         mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8465
8466         mode->hsync = drm_mode_hsync(mode);
8467         mode->vrefresh = drm_mode_vrefresh(mode);
8468         drm_mode_set_name(mode);
8469 }
8470
8471 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8472 {
8473         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8474         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8475         u32 pipeconf;
8476
8477         pipeconf = 0;
8478
8479         /* we keep both pipes enabled on 830 */
8480         if (IS_I830(dev_priv))
8481                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8482
8483         if (crtc_state->double_wide)
8484                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8485
8486         /* only g4x and later have fancy bpc/dither controls */
8487         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8488             IS_CHERRYVIEW(dev_priv)) {
8489                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8490                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8491                         pipeconf |= PIPECONF_DITHER_EN |
8492                                     PIPECONF_DITHER_TYPE_SP;
8493
8494                 switch (crtc_state->pipe_bpp) {
8495                 case 18:
8496                         pipeconf |= PIPECONF_6BPC;
8497                         break;
8498                 case 24:
8499                         pipeconf |= PIPECONF_8BPC;
8500                         break;
8501                 case 30:
8502                         pipeconf |= PIPECONF_10BPC;
8503                         break;
8504                 default:
8505                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8506                         BUG();
8507                 }
8508         }
8509
8510         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8511                 if (INTEL_GEN(dev_priv) < 4 ||
8512                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8513                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8514                 else
8515                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8516         } else {
8517                 pipeconf |= PIPECONF_PROGRESSIVE;
8518         }
8519
8520         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8521              crtc_state->limited_color_range)
8522                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8523
8524         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8525
8526         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8527         POSTING_READ(PIPECONF(crtc->pipe));
8528 }
8529
8530 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8531                                    struct intel_crtc_state *crtc_state)
8532 {
8533         struct drm_device *dev = crtc->base.dev;
8534         struct drm_i915_private *dev_priv = to_i915(dev);
8535         const struct intel_limit *limit;
8536         int refclk = 48000;
8537
8538         memset(&crtc_state->dpll_hw_state, 0,
8539                sizeof(crtc_state->dpll_hw_state));
8540
8541         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8542                 if (intel_panel_use_ssc(dev_priv)) {
8543                         refclk = dev_priv->vbt.lvds_ssc_freq;
8544                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8545                 }
8546
8547                 limit = &intel_limits_i8xx_lvds;
8548         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8549                 limit = &intel_limits_i8xx_dvo;
8550         } else {
8551                 limit = &intel_limits_i8xx_dac;
8552         }
8553
8554         if (!crtc_state->clock_set &&
8555             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8556                                  refclk, NULL, &crtc_state->dpll)) {
8557                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8558                 return -EINVAL;
8559         }
8560
8561         i8xx_compute_dpll(crtc, crtc_state, NULL);
8562
8563         return 0;
8564 }
8565
8566 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8567                                   struct intel_crtc_state *crtc_state)
8568 {
8569         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8570         const struct intel_limit *limit;
8571         int refclk = 96000;
8572
8573         memset(&crtc_state->dpll_hw_state, 0,
8574                sizeof(crtc_state->dpll_hw_state));
8575
8576         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8577                 if (intel_panel_use_ssc(dev_priv)) {
8578                         refclk = dev_priv->vbt.lvds_ssc_freq;
8579                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8580                 }
8581
8582                 if (intel_is_dual_link_lvds(dev_priv))
8583                         limit = &intel_limits_g4x_dual_channel_lvds;
8584                 else
8585                         limit = &intel_limits_g4x_single_channel_lvds;
8586         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8587                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8588                 limit = &intel_limits_g4x_hdmi;
8589         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8590                 limit = &intel_limits_g4x_sdvo;
8591         } else {
8592                 /* The option is for other outputs */
8593                 limit = &intel_limits_i9xx_sdvo;
8594         }
8595
8596         if (!crtc_state->clock_set &&
8597             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8598                                 refclk, NULL, &crtc_state->dpll)) {
8599                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8600                 return -EINVAL;
8601         }
8602
8603         i9xx_compute_dpll(crtc, crtc_state, NULL);
8604
8605         return 0;
8606 }
8607
8608 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8609                                   struct intel_crtc_state *crtc_state)
8610 {
8611         struct drm_device *dev = crtc->base.dev;
8612         struct drm_i915_private *dev_priv = to_i915(dev);
8613         const struct intel_limit *limit;
8614         int refclk = 96000;
8615
8616         memset(&crtc_state->dpll_hw_state, 0,
8617                sizeof(crtc_state->dpll_hw_state));
8618
8619         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8620                 if (intel_panel_use_ssc(dev_priv)) {
8621                         refclk = dev_priv->vbt.lvds_ssc_freq;
8622                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8623                 }
8624
8625                 limit = &intel_limits_pineview_lvds;
8626         } else {
8627                 limit = &intel_limits_pineview_sdvo;
8628         }
8629
8630         if (!crtc_state->clock_set &&
8631             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8632                                 refclk, NULL, &crtc_state->dpll)) {
8633                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8634                 return -EINVAL;
8635         }
8636
8637         i9xx_compute_dpll(crtc, crtc_state, NULL);
8638
8639         return 0;
8640 }
8641
8642 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8643                                    struct intel_crtc_state *crtc_state)
8644 {
8645         struct drm_device *dev = crtc->base.dev;
8646         struct drm_i915_private *dev_priv = to_i915(dev);
8647         const struct intel_limit *limit;
8648         int refclk = 96000;
8649
8650         memset(&crtc_state->dpll_hw_state, 0,
8651                sizeof(crtc_state->dpll_hw_state));
8652
8653         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8654                 if (intel_panel_use_ssc(dev_priv)) {
8655                         refclk = dev_priv->vbt.lvds_ssc_freq;
8656                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8657                 }
8658
8659                 limit = &intel_limits_i9xx_lvds;
8660         } else {
8661                 limit = &intel_limits_i9xx_sdvo;
8662         }
8663
8664         if (!crtc_state->clock_set &&
8665             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8666                                  refclk, NULL, &crtc_state->dpll)) {
8667                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8668                 return -EINVAL;
8669         }
8670
8671         i9xx_compute_dpll(crtc, crtc_state, NULL);
8672
8673         return 0;
8674 }
8675
8676 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8677                                   struct intel_crtc_state *crtc_state)
8678 {
8679         int refclk = 100000;
8680         const struct intel_limit *limit = &intel_limits_chv;
8681
8682         memset(&crtc_state->dpll_hw_state, 0,
8683                sizeof(crtc_state->dpll_hw_state));
8684
8685         if (!crtc_state->clock_set &&
8686             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8687                                 refclk, NULL, &crtc_state->dpll)) {
8688                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8689                 return -EINVAL;
8690         }
8691
8692         chv_compute_dpll(crtc, crtc_state);
8693
8694         return 0;
8695 }
8696
8697 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8698                                   struct intel_crtc_state *crtc_state)
8699 {
8700         int refclk = 100000;
8701         const struct intel_limit *limit = &intel_limits_vlv;
8702
8703         memset(&crtc_state->dpll_hw_state, 0,
8704                sizeof(crtc_state->dpll_hw_state));
8705
8706         if (!crtc_state->clock_set &&
8707             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8708                                 refclk, NULL, &crtc_state->dpll)) {
8709                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8710                 return -EINVAL;
8711         }
8712
8713         vlv_compute_dpll(crtc, crtc_state);
8714
8715         return 0;
8716 }
8717
8718 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8719 {
8720         if (IS_I830(dev_priv))
8721                 return false;
8722
8723         return INTEL_GEN(dev_priv) >= 4 ||
8724                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8725 }
8726
8727 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8728                                  struct intel_crtc_state *pipe_config)
8729 {
8730         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8731         u32 tmp;
8732
8733         if (!i9xx_has_pfit(dev_priv))
8734                 return;
8735
8736         tmp = I915_READ(PFIT_CONTROL);
8737         if (!(tmp & PFIT_ENABLE))
8738                 return;
8739
8740         /* Check whether the pfit is attached to our pipe. */
8741         if (INTEL_GEN(dev_priv) < 4) {
8742                 if (crtc->pipe != PIPE_B)
8743                         return;
8744         } else {
8745                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8746                         return;
8747         }
8748
8749         pipe_config->gmch_pfit.control = tmp;
8750         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8751 }
8752
8753 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8754                                struct intel_crtc_state *pipe_config)
8755 {
8756         struct drm_device *dev = crtc->base.dev;
8757         struct drm_i915_private *dev_priv = to_i915(dev);
8758         enum pipe pipe = crtc->pipe;
8759         struct dpll clock;
8760         u32 mdiv;
8761         int refclk = 100000;
8762
8763         /* In case of DSI, DPLL will not be used */
8764         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8765                 return;
8766
8767         vlv_dpio_get(dev_priv);
8768         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8769         vlv_dpio_put(dev_priv);
8770
8771         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8772         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8773         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8774         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8775         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8776
8777         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8778 }
8779
8780 static void
8781 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8782                               struct intel_initial_plane_config *plane_config)
8783 {
8784         struct drm_device *dev = crtc->base.dev;
8785         struct drm_i915_private *dev_priv = to_i915(dev);
8786         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8787         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8788         enum pipe pipe;
8789         u32 val, base, offset;
8790         int fourcc, pixel_format;
8791         unsigned int aligned_height;
8792         struct drm_framebuffer *fb;
8793         struct intel_framebuffer *intel_fb;
8794
8795         if (!plane->get_hw_state(plane, &pipe))
8796                 return;
8797
8798         WARN_ON(pipe != crtc->pipe);
8799
8800         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8801         if (!intel_fb) {
8802                 DRM_DEBUG_KMS("failed to alloc fb\n");
8803                 return;
8804         }
8805
8806         fb = &intel_fb->base;
8807
8808         fb->dev = dev;
8809
8810         val = I915_READ(DSPCNTR(i9xx_plane));
8811
8812         if (INTEL_GEN(dev_priv) >= 4) {
8813                 if (val & DISPPLANE_TILED) {
8814                         plane_config->tiling = I915_TILING_X;
8815                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8816                 }
8817
8818                 if (val & DISPPLANE_ROTATE_180)
8819                         plane_config->rotation = DRM_MODE_ROTATE_180;
8820         }
8821
8822         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8823             val & DISPPLANE_MIRROR)
8824                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8825
8826         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8827         fourcc = i9xx_format_to_fourcc(pixel_format);
8828         fb->format = drm_format_info(fourcc);
8829
8830         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8831                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8832                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8833         } else if (INTEL_GEN(dev_priv) >= 4) {
8834                 if (plane_config->tiling)
8835                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8836                 else
8837                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8838                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8839         } else {
8840                 base = I915_READ(DSPADDR(i9xx_plane));
8841         }
8842         plane_config->base = base;
8843
8844         val = I915_READ(PIPESRC(pipe));
8845         fb->width = ((val >> 16) & 0xfff) + 1;
8846         fb->height = ((val >> 0) & 0xfff) + 1;
8847
8848         val = I915_READ(DSPSTRIDE(i9xx_plane));
8849         fb->pitches[0] = val & 0xffffffc0;
8850
8851         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8852
8853         plane_config->size = fb->pitches[0] * aligned_height;
8854
8855         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8856                       crtc->base.name, plane->base.name, fb->width, fb->height,
8857                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8858                       plane_config->size);
8859
8860         plane_config->fb = intel_fb;
8861 }
8862
8863 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8864                                struct intel_crtc_state *pipe_config)
8865 {
8866         struct drm_device *dev = crtc->base.dev;
8867         struct drm_i915_private *dev_priv = to_i915(dev);
8868         enum pipe pipe = crtc->pipe;
8869         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8870         struct dpll clock;
8871         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8872         int refclk = 100000;
8873
8874         /* In case of DSI, DPLL will not be used */
8875         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8876                 return;
8877
8878         vlv_dpio_get(dev_priv);
8879         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8880         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8881         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8882         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8883         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8884         vlv_dpio_put(dev_priv);
8885
8886         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8887         clock.m2 = (pll_dw0 & 0xff) << 22;
8888         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8889                 clock.m2 |= pll_dw2 & 0x3fffff;
8890         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8891         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8892         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8893
8894         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8895 }
8896
8897 static enum intel_output_format
8898 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
8899 {
8900         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8901         u32 tmp;
8902
8903         tmp = I915_READ(PIPEMISC(crtc->pipe));
8904
8905         if (tmp & PIPEMISC_YUV420_ENABLE) {
8906                 /* We support 4:2:0 in full blend mode only */
8907                 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
8908
8909                 return INTEL_OUTPUT_FORMAT_YCBCR420;
8910         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8911                 return INTEL_OUTPUT_FORMAT_YCBCR444;
8912         } else {
8913                 return INTEL_OUTPUT_FORMAT_RGB;
8914         }
8915 }
8916
8917 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8918 {
8919         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8920         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8921         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8922         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8923         u32 tmp;
8924
8925         tmp = I915_READ(DSPCNTR(i9xx_plane));
8926
8927         if (tmp & DISPPLANE_GAMMA_ENABLE)
8928                 crtc_state->gamma_enable = true;
8929
8930         if (!HAS_GMCH(dev_priv) &&
8931             tmp & DISPPLANE_PIPE_CSC_ENABLE)
8932                 crtc_state->csc_enable = true;
8933 }
8934
8935 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8936                                  struct intel_crtc_state *pipe_config)
8937 {
8938         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8939         enum intel_display_power_domain power_domain;
8940         intel_wakeref_t wakeref;
8941         u32 tmp;
8942         bool ret;
8943
8944         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8945         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8946         if (!wakeref)
8947                 return false;
8948
8949         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8950         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8951         pipe_config->shared_dpll = NULL;
8952         pipe_config->master_transcoder = INVALID_TRANSCODER;
8953
8954         ret = false;
8955
8956         tmp = I915_READ(PIPECONF(crtc->pipe));
8957         if (!(tmp & PIPECONF_ENABLE))
8958                 goto out;
8959
8960         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8961             IS_CHERRYVIEW(dev_priv)) {
8962                 switch (tmp & PIPECONF_BPC_MASK) {
8963                 case PIPECONF_6BPC:
8964                         pipe_config->pipe_bpp = 18;
8965                         break;
8966                 case PIPECONF_8BPC:
8967                         pipe_config->pipe_bpp = 24;
8968                         break;
8969                 case PIPECONF_10BPC:
8970                         pipe_config->pipe_bpp = 30;
8971                         break;
8972                 default:
8973                         break;
8974                 }
8975         }
8976
8977         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8978             (tmp & PIPECONF_COLOR_RANGE_SELECT))
8979                 pipe_config->limited_color_range = true;
8980
8981         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8982                 PIPECONF_GAMMA_MODE_SHIFT;
8983
8984         if (IS_CHERRYVIEW(dev_priv))
8985                 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8986
8987         i9xx_get_pipe_color_config(pipe_config);
8988         intel_color_get_config(pipe_config);
8989
8990         if (INTEL_GEN(dev_priv) < 4)
8991                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8992
8993         intel_get_pipe_timings(crtc, pipe_config);
8994         intel_get_pipe_src_size(crtc, pipe_config);
8995
8996         i9xx_get_pfit_config(crtc, pipe_config);
8997
8998         if (INTEL_GEN(dev_priv) >= 4) {
8999                 /* No way to read it out on pipes B and C */
9000                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9001                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
9002                 else
9003                         tmp = I915_READ(DPLL_MD(crtc->pipe));
9004                 pipe_config->pixel_multiplier =
9005                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9006                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9007                 pipe_config->dpll_hw_state.dpll_md = tmp;
9008         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9009                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9010                 tmp = I915_READ(DPLL(crtc->pipe));
9011                 pipe_config->pixel_multiplier =
9012                         ((tmp & SDVO_MULTIPLIER_MASK)
9013                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9014         } else {
9015                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9016                  * port and will be fixed up in the encoder->get_config
9017                  * function. */
9018                 pipe_config->pixel_multiplier = 1;
9019         }
9020         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
9021         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9022                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
9023                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
9024         } else {
9025                 /* Mask out read-only status bits. */
9026                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9027                                                      DPLL_PORTC_READY_MASK |
9028                                                      DPLL_PORTB_READY_MASK);
9029         }
9030
9031         if (IS_CHERRYVIEW(dev_priv))
9032                 chv_crtc_clock_get(crtc, pipe_config);
9033         else if (IS_VALLEYVIEW(dev_priv))
9034                 vlv_crtc_clock_get(crtc, pipe_config);
9035         else
9036                 i9xx_crtc_clock_get(crtc, pipe_config);
9037
9038         /*
9039          * Normally the dotclock is filled in by the encoder .get_config()
9040          * but in case the pipe is enabled w/o any ports we need a sane
9041          * default.
9042          */
9043         pipe_config->hw.adjusted_mode.crtc_clock =
9044                 pipe_config->port_clock / pipe_config->pixel_multiplier;
9045
9046         ret = true;
9047
9048 out:
9049         intel_display_power_put(dev_priv, power_domain, wakeref);
9050
9051         return ret;
9052 }
9053
9054 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
9055 {
9056         struct intel_encoder *encoder;
9057         int i;
9058         u32 val, final;
9059         bool has_lvds = false;
9060         bool has_cpu_edp = false;
9061         bool has_panel = false;
9062         bool has_ck505 = false;
9063         bool can_ssc = false;
9064         bool using_ssc_source = false;
9065
9066         /* We need to take the global config into account */
9067         for_each_intel_encoder(&dev_priv->drm, encoder) {
9068                 switch (encoder->type) {
9069                 case INTEL_OUTPUT_LVDS:
9070                         has_panel = true;
9071                         has_lvds = true;
9072                         break;
9073                 case INTEL_OUTPUT_EDP:
9074                         has_panel = true;
9075                         if (encoder->port == PORT_A)
9076                                 has_cpu_edp = true;
9077                         break;
9078                 default:
9079                         break;
9080                 }
9081         }
9082
9083         if (HAS_PCH_IBX(dev_priv)) {
9084                 has_ck505 = dev_priv->vbt.display_clock_mode;
9085                 can_ssc = has_ck505;
9086         } else {
9087                 has_ck505 = false;
9088                 can_ssc = true;
9089         }
9090
9091         /* Check if any DPLLs are using the SSC source */
9092         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9093                 u32 temp = I915_READ(PCH_DPLL(i));
9094
9095                 if (!(temp & DPLL_VCO_ENABLE))
9096                         continue;
9097
9098                 if ((temp & PLL_REF_INPUT_MASK) ==
9099                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9100                         using_ssc_source = true;
9101                         break;
9102                 }
9103         }
9104
9105         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9106                       has_panel, has_lvds, has_ck505, using_ssc_source);
9107
9108         /* Ironlake: try to setup display ref clock before DPLL
9109          * enabling. This is only under driver's control after
9110          * PCH B stepping, previous chipset stepping should be
9111          * ignoring this setting.
9112          */
9113         val = I915_READ(PCH_DREF_CONTROL);
9114
9115         /* As we must carefully and slowly disable/enable each source in turn,
9116          * compute the final state we want first and check if we need to
9117          * make any changes at all.
9118          */
9119         final = val;
9120         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9121         if (has_ck505)
9122                 final |= DREF_NONSPREAD_CK505_ENABLE;
9123         else
9124                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9125
9126         final &= ~DREF_SSC_SOURCE_MASK;
9127         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9128         final &= ~DREF_SSC1_ENABLE;
9129
9130         if (has_panel) {
9131                 final |= DREF_SSC_SOURCE_ENABLE;
9132
9133                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9134                         final |= DREF_SSC1_ENABLE;
9135
9136                 if (has_cpu_edp) {
9137                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9138                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9139                         else
9140                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9141                 } else
9142                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9143         } else if (using_ssc_source) {
9144                 final |= DREF_SSC_SOURCE_ENABLE;
9145                 final |= DREF_SSC1_ENABLE;
9146         }
9147
9148         if (final == val)
9149                 return;
9150
9151         /* Always enable nonspread source */
9152         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9153
9154         if (has_ck505)
9155                 val |= DREF_NONSPREAD_CK505_ENABLE;
9156         else
9157                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9158
9159         if (has_panel) {
9160                 val &= ~DREF_SSC_SOURCE_MASK;
9161                 val |= DREF_SSC_SOURCE_ENABLE;
9162
9163                 /* SSC must be turned on before enabling the CPU output  */
9164                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9165                         DRM_DEBUG_KMS("Using SSC on panel\n");
9166                         val |= DREF_SSC1_ENABLE;
9167                 } else
9168                         val &= ~DREF_SSC1_ENABLE;
9169
9170                 /* Get SSC going before enabling the outputs */
9171                 I915_WRITE(PCH_DREF_CONTROL, val);
9172                 POSTING_READ(PCH_DREF_CONTROL);
9173                 udelay(200);
9174
9175                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9176
9177                 /* Enable CPU source on CPU attached eDP */
9178                 if (has_cpu_edp) {
9179                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9180                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
9181                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9182                         } else
9183                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9184                 } else
9185                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9186
9187                 I915_WRITE(PCH_DREF_CONTROL, val);
9188                 POSTING_READ(PCH_DREF_CONTROL);
9189                 udelay(200);
9190         } else {
9191                 DRM_DEBUG_KMS("Disabling CPU source output\n");
9192
9193                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9194
9195                 /* Turn off CPU output */
9196                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9197
9198                 I915_WRITE(PCH_DREF_CONTROL, val);
9199                 POSTING_READ(PCH_DREF_CONTROL);
9200                 udelay(200);
9201
9202                 if (!using_ssc_source) {
9203                         DRM_DEBUG_KMS("Disabling SSC source\n");
9204
9205                         /* Turn off the SSC source */
9206                         val &= ~DREF_SSC_SOURCE_MASK;
9207                         val |= DREF_SSC_SOURCE_DISABLE;
9208
9209                         /* Turn off SSC1 */
9210                         val &= ~DREF_SSC1_ENABLE;
9211
9212                         I915_WRITE(PCH_DREF_CONTROL, val);
9213                         POSTING_READ(PCH_DREF_CONTROL);
9214                         udelay(200);
9215                 }
9216         }
9217
9218         BUG_ON(val != final);
9219 }
9220
9221 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9222 {
9223         u32 tmp;
9224
9225         tmp = I915_READ(SOUTH_CHICKEN2);
9226         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9227         I915_WRITE(SOUTH_CHICKEN2, tmp);
9228
9229         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9230                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9231                 DRM_ERROR("FDI mPHY reset assert timeout\n");
9232
9233         tmp = I915_READ(SOUTH_CHICKEN2);
9234         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9235         I915_WRITE(SOUTH_CHICKEN2, tmp);
9236
9237         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9238                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9239                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9240 }
9241
9242 /* WaMPhyProgramming:hsw */
9243 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9244 {
9245         u32 tmp;
9246
9247         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9248         tmp &= ~(0xFF << 24);
9249         tmp |= (0x12 << 24);
9250         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9251
9252         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9253         tmp |= (1 << 11);
9254         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9255
9256         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9257         tmp |= (1 << 11);
9258         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9259
9260         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9261         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9262         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9263
9264         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9265         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9266         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9267
9268         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9269         tmp &= ~(7 << 13);
9270         tmp |= (5 << 13);
9271         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9272
9273         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9274         tmp &= ~(7 << 13);
9275         tmp |= (5 << 13);
9276         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9277
9278         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9279         tmp &= ~0xFF;
9280         tmp |= 0x1C;
9281         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9282
9283         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9284         tmp &= ~0xFF;
9285         tmp |= 0x1C;
9286         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9287
9288         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9289         tmp &= ~(0xFF << 16);
9290         tmp |= (0x1C << 16);
9291         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9292
9293         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9294         tmp &= ~(0xFF << 16);
9295         tmp |= (0x1C << 16);
9296         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9297
9298         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9299         tmp |= (1 << 27);
9300         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9301
9302         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9303         tmp |= (1 << 27);
9304         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9305
9306         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9307         tmp &= ~(0xF << 28);
9308         tmp |= (4 << 28);
9309         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9310
9311         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9312         tmp &= ~(0xF << 28);
9313         tmp |= (4 << 28);
9314         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9315 }
9316
9317 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9318  * Programming" based on the parameters passed:
9319  * - Sequence to enable CLKOUT_DP
9320  * - Sequence to enable CLKOUT_DP without spread
9321  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9322  */
9323 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9324                                  bool with_spread, bool with_fdi)
9325 {
9326         u32 reg, tmp;
9327
9328         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9329                 with_spread = true;
9330         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9331             with_fdi, "LP PCH doesn't have FDI\n"))
9332                 with_fdi = false;
9333
9334         mutex_lock(&dev_priv->sb_lock);
9335
9336         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9337         tmp &= ~SBI_SSCCTL_DISABLE;
9338         tmp |= SBI_SSCCTL_PATHALT;
9339         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9340
9341         udelay(24);
9342
9343         if (with_spread) {
9344                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9345                 tmp &= ~SBI_SSCCTL_PATHALT;
9346                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9347
9348                 if (with_fdi) {
9349                         lpt_reset_fdi_mphy(dev_priv);
9350                         lpt_program_fdi_mphy(dev_priv);
9351                 }
9352         }
9353
9354         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9355         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9356         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9357         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9358
9359         mutex_unlock(&dev_priv->sb_lock);
9360 }
9361
9362 /* Sequence to disable CLKOUT_DP */
9363 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9364 {
9365         u32 reg, tmp;
9366
9367         mutex_lock(&dev_priv->sb_lock);
9368
9369         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9370         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9371         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9372         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9373
9374         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9375         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9376                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9377                         tmp |= SBI_SSCCTL_PATHALT;
9378                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9379                         udelay(32);
9380                 }
9381                 tmp |= SBI_SSCCTL_DISABLE;
9382                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9383         }
9384
9385         mutex_unlock(&dev_priv->sb_lock);
9386 }
9387
9388 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9389
9390 static const u16 sscdivintphase[] = {
9391         [BEND_IDX( 50)] = 0x3B23,
9392         [BEND_IDX( 45)] = 0x3B23,
9393         [BEND_IDX( 40)] = 0x3C23,
9394         [BEND_IDX( 35)] = 0x3C23,
9395         [BEND_IDX( 30)] = 0x3D23,
9396         [BEND_IDX( 25)] = 0x3D23,
9397         [BEND_IDX( 20)] = 0x3E23,
9398         [BEND_IDX( 15)] = 0x3E23,
9399         [BEND_IDX( 10)] = 0x3F23,
9400         [BEND_IDX(  5)] = 0x3F23,
9401         [BEND_IDX(  0)] = 0x0025,
9402         [BEND_IDX( -5)] = 0x0025,
9403         [BEND_IDX(-10)] = 0x0125,
9404         [BEND_IDX(-15)] = 0x0125,
9405         [BEND_IDX(-20)] = 0x0225,
9406         [BEND_IDX(-25)] = 0x0225,
9407         [BEND_IDX(-30)] = 0x0325,
9408         [BEND_IDX(-35)] = 0x0325,
9409         [BEND_IDX(-40)] = 0x0425,
9410         [BEND_IDX(-45)] = 0x0425,
9411         [BEND_IDX(-50)] = 0x0525,
9412 };
9413
9414 /*
9415  * Bend CLKOUT_DP
9416  * steps -50 to 50 inclusive, in steps of 5
9417  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9418  * change in clock period = -(steps / 10) * 5.787 ps
9419  */
9420 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9421 {
9422         u32 tmp;
9423         int idx = BEND_IDX(steps);
9424
9425         if (WARN_ON(steps % 5 != 0))
9426                 return;
9427
9428         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9429                 return;
9430
9431         mutex_lock(&dev_priv->sb_lock);
9432
9433         if (steps % 10 != 0)
9434                 tmp = 0xAAAAAAAB;
9435         else
9436                 tmp = 0x00000000;
9437         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9438
9439         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9440         tmp &= 0xffff0000;
9441         tmp |= sscdivintphase[idx];
9442         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9443
9444         mutex_unlock(&dev_priv->sb_lock);
9445 }
9446
9447 #undef BEND_IDX
9448
9449 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9450 {
9451         u32 fuse_strap = I915_READ(FUSE_STRAP);
9452         u32 ctl = I915_READ(SPLL_CTL);
9453
9454         if ((ctl & SPLL_PLL_ENABLE) == 0)
9455                 return false;
9456
9457         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9458             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9459                 return true;
9460
9461         if (IS_BROADWELL(dev_priv) &&
9462             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9463                 return true;
9464
9465         return false;
9466 }
9467
9468 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9469                                enum intel_dpll_id id)
9470 {
9471         u32 fuse_strap = I915_READ(FUSE_STRAP);
9472         u32 ctl = I915_READ(WRPLL_CTL(id));
9473
9474         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9475                 return false;
9476
9477         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9478                 return true;
9479
9480         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9481             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9482             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9483                 return true;
9484
9485         return false;
9486 }
9487
9488 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9489 {
9490         struct intel_encoder *encoder;
9491         bool has_fdi = false;
9492
9493         for_each_intel_encoder(&dev_priv->drm, encoder) {
9494                 switch (encoder->type) {
9495                 case INTEL_OUTPUT_ANALOG:
9496                         has_fdi = true;
9497                         break;
9498                 default:
9499                         break;
9500                 }
9501         }
9502
9503         /*
9504          * The BIOS may have decided to use the PCH SSC
9505          * reference so we must not disable it until the
9506          * relevant PLLs have stopped relying on it. We'll
9507          * just leave the PCH SSC reference enabled in case
9508          * any active PLL is using it. It will get disabled
9509          * after runtime suspend if we don't have FDI.
9510          *
9511          * TODO: Move the whole reference clock handling
9512          * to the modeset sequence proper so that we can
9513          * actually enable/disable/reconfigure these things
9514          * safely. To do that we need to introduce a real
9515          * clock hierarchy. That would also allow us to do
9516          * clock bending finally.
9517          */
9518         dev_priv->pch_ssc_use = 0;
9519
9520         if (spll_uses_pch_ssc(dev_priv)) {
9521                 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9522                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9523         }
9524
9525         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9526                 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9527                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9528         }
9529
9530         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9531                 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9532                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9533         }
9534
9535         if (dev_priv->pch_ssc_use)
9536                 return;
9537
9538         if (has_fdi) {
9539                 lpt_bend_clkout_dp(dev_priv, 0);
9540                 lpt_enable_clkout_dp(dev_priv, true, true);
9541         } else {
9542                 lpt_disable_clkout_dp(dev_priv);
9543         }
9544 }
9545
9546 /*
9547  * Initialize reference clocks when the driver loads
9548  */
9549 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9550 {
9551         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9552                 ironlake_init_pch_refclk(dev_priv);
9553         else if (HAS_PCH_LPT(dev_priv))
9554                 lpt_init_pch_refclk(dev_priv);
9555 }
9556
9557 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9558 {
9559         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9560         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9561         enum pipe pipe = crtc->pipe;
9562         u32 val;
9563
9564         val = 0;
9565
9566         switch (crtc_state->pipe_bpp) {
9567         case 18:
9568                 val |= PIPECONF_6BPC;
9569                 break;
9570         case 24:
9571                 val |= PIPECONF_8BPC;
9572                 break;
9573         case 30:
9574                 val |= PIPECONF_10BPC;
9575                 break;
9576         case 36:
9577                 val |= PIPECONF_12BPC;
9578                 break;
9579         default:
9580                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9581                 BUG();
9582         }
9583
9584         if (crtc_state->dither)
9585                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9586
9587         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9588                 val |= PIPECONF_INTERLACED_ILK;
9589         else
9590                 val |= PIPECONF_PROGRESSIVE;
9591
9592         /*
9593          * This would end up with an odd purple hue over
9594          * the entire display. Make sure we don't do it.
9595          */
9596         WARN_ON(crtc_state->limited_color_range &&
9597                 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9598
9599         if (crtc_state->limited_color_range)
9600                 val |= PIPECONF_COLOR_RANGE_SELECT;
9601
9602         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9603                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9604
9605         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9606
9607         I915_WRITE(PIPECONF(pipe), val);
9608         POSTING_READ(PIPECONF(pipe));
9609 }
9610
9611 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9612 {
9613         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9614         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9615         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9616         u32 val = 0;
9617
9618         if (IS_HASWELL(dev_priv) && crtc_state->dither)
9619                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9620
9621         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9622                 val |= PIPECONF_INTERLACED_ILK;
9623         else
9624                 val |= PIPECONF_PROGRESSIVE;
9625
9626         if (IS_HASWELL(dev_priv) &&
9627             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9628                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9629
9630         I915_WRITE(PIPECONF(cpu_transcoder), val);
9631         POSTING_READ(PIPECONF(cpu_transcoder));
9632 }
9633
9634 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9635 {
9636         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9637         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9638         u32 val = 0;
9639
9640         switch (crtc_state->pipe_bpp) {
9641         case 18:
9642                 val |= PIPEMISC_DITHER_6_BPC;
9643                 break;
9644         case 24:
9645                 val |= PIPEMISC_DITHER_8_BPC;
9646                 break;
9647         case 30:
9648                 val |= PIPEMISC_DITHER_10_BPC;
9649                 break;
9650         case 36:
9651                 val |= PIPEMISC_DITHER_12_BPC;
9652                 break;
9653         default:
9654                 MISSING_CASE(crtc_state->pipe_bpp);
9655                 break;
9656         }
9657
9658         if (crtc_state->dither)
9659                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9660
9661         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9662             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9663                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9664
9665         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9666                 val |= PIPEMISC_YUV420_ENABLE |
9667                         PIPEMISC_YUV420_MODE_FULL_BLEND;
9668
9669         if (INTEL_GEN(dev_priv) >= 11 &&
9670             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9671                                            BIT(PLANE_CURSOR))) == 0)
9672                 val |= PIPEMISC_HDR_MODE_PRECISION;
9673
9674         I915_WRITE(PIPEMISC(crtc->pipe), val);
9675 }
9676
9677 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9678 {
9679         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9680         u32 tmp;
9681
9682         tmp = I915_READ(PIPEMISC(crtc->pipe));
9683
9684         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9685         case PIPEMISC_DITHER_6_BPC:
9686                 return 18;
9687         case PIPEMISC_DITHER_8_BPC:
9688                 return 24;
9689         case PIPEMISC_DITHER_10_BPC:
9690                 return 30;
9691         case PIPEMISC_DITHER_12_BPC:
9692                 return 36;
9693         default:
9694                 MISSING_CASE(tmp);
9695                 return 0;
9696         }
9697 }
9698
9699 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9700 {
9701         /*
9702          * Account for spread spectrum to avoid
9703          * oversubscribing the link. Max center spread
9704          * is 2.5%; use 5% for safety's sake.
9705          */
9706         u32 bps = target_clock * bpp * 21 / 20;
9707         return DIV_ROUND_UP(bps, link_bw * 8);
9708 }
9709
9710 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9711 {
9712         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9713 }
9714
9715 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9716                                   struct intel_crtc_state *crtc_state,
9717                                   struct dpll *reduced_clock)
9718 {
9719         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9720         u32 dpll, fp, fp2;
9721         int factor;
9722
9723         /* Enable autotuning of the PLL clock (if permissible) */
9724         factor = 21;
9725         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9726                 if ((intel_panel_use_ssc(dev_priv) &&
9727                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
9728                     (HAS_PCH_IBX(dev_priv) &&
9729                      intel_is_dual_link_lvds(dev_priv)))
9730                         factor = 25;
9731         } else if (crtc_state->sdvo_tv_clock) {
9732                 factor = 20;
9733         }
9734
9735         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9736
9737         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9738                 fp |= FP_CB_TUNE;
9739
9740         if (reduced_clock) {
9741                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9742
9743                 if (reduced_clock->m < factor * reduced_clock->n)
9744                         fp2 |= FP_CB_TUNE;
9745         } else {
9746                 fp2 = fp;
9747         }
9748
9749         dpll = 0;
9750
9751         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9752                 dpll |= DPLLB_MODE_LVDS;
9753         else
9754                 dpll |= DPLLB_MODE_DAC_SERIAL;
9755
9756         dpll |= (crtc_state->pixel_multiplier - 1)
9757                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9758
9759         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9760             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9761                 dpll |= DPLL_SDVO_HIGH_SPEED;
9762
9763         if (intel_crtc_has_dp_encoder(crtc_state))
9764                 dpll |= DPLL_SDVO_HIGH_SPEED;
9765
9766         /*
9767          * The high speed IO clock is only really required for
9768          * SDVO/HDMI/DP, but we also enable it for CRT to make it
9769          * possible to share the DPLL between CRT and HDMI. Enabling
9770          * the clock needlessly does no real harm, except use up a
9771          * bit of power potentially.
9772          *
9773          * We'll limit this to IVB with 3 pipes, since it has only two
9774          * DPLLs and so DPLL sharing is the only way to get three pipes
9775          * driving PCH ports at the same time. On SNB we could do this,
9776          * and potentially avoid enabling the second DPLL, but it's not
9777          * clear if it''s a win or loss power wise. No point in doing
9778          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9779          */
9780         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
9781             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9782                 dpll |= DPLL_SDVO_HIGH_SPEED;
9783
9784         /* compute bitmask from p1 value */
9785         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9786         /* also FPA1 */
9787         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9788
9789         switch (crtc_state->dpll.p2) {
9790         case 5:
9791                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9792                 break;
9793         case 7:
9794                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9795                 break;
9796         case 10:
9797                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9798                 break;
9799         case 14:
9800                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9801                 break;
9802         }
9803
9804         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9805             intel_panel_use_ssc(dev_priv))
9806                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9807         else
9808                 dpll |= PLL_REF_INPUT_DREFCLK;
9809
9810         dpll |= DPLL_VCO_ENABLE;
9811
9812         crtc_state->dpll_hw_state.dpll = dpll;
9813         crtc_state->dpll_hw_state.fp0 = fp;
9814         crtc_state->dpll_hw_state.fp1 = fp2;
9815 }
9816
9817 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9818                                        struct intel_crtc_state *crtc_state)
9819 {
9820         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9821         struct intel_atomic_state *state =
9822                 to_intel_atomic_state(crtc_state->uapi.state);
9823         const struct intel_limit *limit;
9824         int refclk = 120000;
9825
9826         memset(&crtc_state->dpll_hw_state, 0,
9827                sizeof(crtc_state->dpll_hw_state));
9828
9829         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9830         if (!crtc_state->has_pch_encoder)
9831                 return 0;
9832
9833         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9834                 if (intel_panel_use_ssc(dev_priv)) {
9835                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9836                                       dev_priv->vbt.lvds_ssc_freq);
9837                         refclk = dev_priv->vbt.lvds_ssc_freq;
9838                 }
9839
9840                 if (intel_is_dual_link_lvds(dev_priv)) {
9841                         if (refclk == 100000)
9842                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9843                         else
9844                                 limit = &intel_limits_ironlake_dual_lvds;
9845                 } else {
9846                         if (refclk == 100000)
9847                                 limit = &intel_limits_ironlake_single_lvds_100m;
9848                         else
9849                                 limit = &intel_limits_ironlake_single_lvds;
9850                 }
9851         } else {
9852                 limit = &intel_limits_ironlake_dac;
9853         }
9854
9855         if (!crtc_state->clock_set &&
9856             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9857                                 refclk, NULL, &crtc_state->dpll)) {
9858                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9859                 return -EINVAL;
9860         }
9861
9862         ironlake_compute_dpll(crtc, crtc_state, NULL);
9863
9864         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9865                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9866                               pipe_name(crtc->pipe));
9867                 return -EINVAL;
9868         }
9869
9870         return 0;
9871 }
9872
9873 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9874                                          struct intel_link_m_n *m_n)
9875 {
9876         struct drm_device *dev = crtc->base.dev;
9877         struct drm_i915_private *dev_priv = to_i915(dev);
9878         enum pipe pipe = crtc->pipe;
9879
9880         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9881         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9882         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9883                 & ~TU_SIZE_MASK;
9884         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9885         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9886                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9887 }
9888
9889 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9890                                          enum transcoder transcoder,
9891                                          struct intel_link_m_n *m_n,
9892                                          struct intel_link_m_n *m2_n2)
9893 {
9894         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9895         enum pipe pipe = crtc->pipe;
9896
9897         if (INTEL_GEN(dev_priv) >= 5) {
9898                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9899                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9900                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9901                         & ~TU_SIZE_MASK;
9902                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9903                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9904                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9905
9906                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9907                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9908                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9909                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9910                                         & ~TU_SIZE_MASK;
9911                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9912                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9913                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9914                 }
9915         } else {
9916                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9917                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9918                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9919                         & ~TU_SIZE_MASK;
9920                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9921                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9922                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9923         }
9924 }
9925
9926 void intel_dp_get_m_n(struct intel_crtc *crtc,
9927                       struct intel_crtc_state *pipe_config)
9928 {
9929         if (pipe_config->has_pch_encoder)
9930                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9931         else
9932                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9933                                              &pipe_config->dp_m_n,
9934                                              &pipe_config->dp_m2_n2);
9935 }
9936
9937 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9938                                         struct intel_crtc_state *pipe_config)
9939 {
9940         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9941                                      &pipe_config->fdi_m_n, NULL);
9942 }
9943
9944 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9945                                     struct intel_crtc_state *pipe_config)
9946 {
9947         struct drm_device *dev = crtc->base.dev;
9948         struct drm_i915_private *dev_priv = to_i915(dev);
9949         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9950         u32 ps_ctrl = 0;
9951         int id = -1;
9952         int i;
9953
9954         /* find scaler attached to this pipe */
9955         for (i = 0; i < crtc->num_scalers; i++) {
9956                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9957                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9958                         id = i;
9959                         pipe_config->pch_pfit.enabled = true;
9960                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9961                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9962                         scaler_state->scalers[i].in_use = true;
9963                         break;
9964                 }
9965         }
9966
9967         scaler_state->scaler_id = id;
9968         if (id >= 0) {
9969                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9970         } else {
9971                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9972         }
9973 }
9974
9975 static void
9976 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9977                                  struct intel_initial_plane_config *plane_config)
9978 {
9979         struct drm_device *dev = crtc->base.dev;
9980         struct drm_i915_private *dev_priv = to_i915(dev);
9981         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9982         enum plane_id plane_id = plane->id;
9983         enum pipe pipe;
9984         u32 val, base, offset, stride_mult, tiling, alpha;
9985         int fourcc, pixel_format;
9986         unsigned int aligned_height;
9987         struct drm_framebuffer *fb;
9988         struct intel_framebuffer *intel_fb;
9989
9990         if (!plane->get_hw_state(plane, &pipe))
9991                 return;
9992
9993         WARN_ON(pipe != crtc->pipe);
9994
9995         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9996         if (!intel_fb) {
9997                 DRM_DEBUG_KMS("failed to alloc fb\n");
9998                 return;
9999         }
10000
10001         fb = &intel_fb->base;
10002
10003         fb->dev = dev;
10004
10005         val = I915_READ(PLANE_CTL(pipe, plane_id));
10006
10007         if (INTEL_GEN(dev_priv) >= 11)
10008                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10009         else
10010                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10011
10012         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10013                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
10014                 alpha &= PLANE_COLOR_ALPHA_MASK;
10015         } else {
10016                 alpha = val & PLANE_CTL_ALPHA_MASK;
10017         }
10018
10019         fourcc = skl_format_to_fourcc(pixel_format,
10020                                       val & PLANE_CTL_ORDER_RGBX, alpha);
10021         fb->format = drm_format_info(fourcc);
10022
10023         tiling = val & PLANE_CTL_TILED_MASK;
10024         switch (tiling) {
10025         case PLANE_CTL_TILED_LINEAR:
10026                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10027                 break;
10028         case PLANE_CTL_TILED_X:
10029                 plane_config->tiling = I915_TILING_X;
10030                 fb->modifier = I915_FORMAT_MOD_X_TILED;
10031                 break;
10032         case PLANE_CTL_TILED_Y:
10033                 plane_config->tiling = I915_TILING_Y;
10034                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10035                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
10036                 else
10037                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
10038                 break;
10039         case PLANE_CTL_TILED_YF:
10040                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10041                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10042                 else
10043                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10044                 break;
10045         default:
10046                 MISSING_CASE(tiling);
10047                 goto error;
10048         }
10049
10050         /*
10051          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10052          * while i915 HW rotation is clockwise, thats why this swapping.
10053          */
10054         switch (val & PLANE_CTL_ROTATE_MASK) {
10055         case PLANE_CTL_ROTATE_0:
10056                 plane_config->rotation = DRM_MODE_ROTATE_0;
10057                 break;
10058         case PLANE_CTL_ROTATE_90:
10059                 plane_config->rotation = DRM_MODE_ROTATE_270;
10060                 break;
10061         case PLANE_CTL_ROTATE_180:
10062                 plane_config->rotation = DRM_MODE_ROTATE_180;
10063                 break;
10064         case PLANE_CTL_ROTATE_270:
10065                 plane_config->rotation = DRM_MODE_ROTATE_90;
10066                 break;
10067         }
10068
10069         if (INTEL_GEN(dev_priv) >= 10 &&
10070             val & PLANE_CTL_FLIP_HORIZONTAL)
10071                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10072
10073         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10074         plane_config->base = base;
10075
10076         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
10077
10078         val = I915_READ(PLANE_SIZE(pipe, plane_id));
10079         fb->height = ((val >> 16) & 0xffff) + 1;
10080         fb->width = ((val >> 0) & 0xffff) + 1;
10081
10082         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
10083         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10084         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10085
10086         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10087
10088         plane_config->size = fb->pitches[0] * aligned_height;
10089
10090         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10091                       crtc->base.name, plane->base.name, fb->width, fb->height,
10092                       fb->format->cpp[0] * 8, base, fb->pitches[0],
10093                       plane_config->size);
10094
10095         plane_config->fb = intel_fb;
10096         return;
10097
10098 error:
10099         kfree(intel_fb);
10100 }
10101
10102 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
10103                                      struct intel_crtc_state *pipe_config)
10104 {
10105         struct drm_device *dev = crtc->base.dev;
10106         struct drm_i915_private *dev_priv = to_i915(dev);
10107         u32 tmp;
10108
10109         tmp = I915_READ(PF_CTL(crtc->pipe));
10110
10111         if (tmp & PF_ENABLE) {
10112                 pipe_config->pch_pfit.enabled = true;
10113                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
10114                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
10115
10116                 /* We currently do not free assignements of panel fitters on
10117                  * ivb/hsw (since we don't use the higher upscaling modes which
10118                  * differentiates them) so just WARN about this case for now. */
10119                 if (IS_GEN(dev_priv, 7)) {
10120                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10121                                 PF_PIPE_SEL_IVB(crtc->pipe));
10122                 }
10123         }
10124 }
10125
10126 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
10127                                      struct intel_crtc_state *pipe_config)
10128 {
10129         struct drm_device *dev = crtc->base.dev;
10130         struct drm_i915_private *dev_priv = to_i915(dev);
10131         enum intel_display_power_domain power_domain;
10132         intel_wakeref_t wakeref;
10133         u32 tmp;
10134         bool ret;
10135
10136         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10137         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10138         if (!wakeref)
10139                 return false;
10140
10141         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10142         pipe_config->shared_dpll = NULL;
10143         pipe_config->master_transcoder = INVALID_TRANSCODER;
10144
10145         ret = false;
10146         tmp = I915_READ(PIPECONF(crtc->pipe));
10147         if (!(tmp & PIPECONF_ENABLE))
10148                 goto out;
10149
10150         switch (tmp & PIPECONF_BPC_MASK) {
10151         case PIPECONF_6BPC:
10152                 pipe_config->pipe_bpp = 18;
10153                 break;
10154         case PIPECONF_8BPC:
10155                 pipe_config->pipe_bpp = 24;
10156                 break;
10157         case PIPECONF_10BPC:
10158                 pipe_config->pipe_bpp = 30;
10159                 break;
10160         case PIPECONF_12BPC:
10161                 pipe_config->pipe_bpp = 36;
10162                 break;
10163         default:
10164                 break;
10165         }
10166
10167         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10168                 pipe_config->limited_color_range = true;
10169
10170         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10171         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10172         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10173                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10174                 break;
10175         default:
10176                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10177                 break;
10178         }
10179
10180         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10181                 PIPECONF_GAMMA_MODE_SHIFT;
10182
10183         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10184
10185         i9xx_get_pipe_color_config(pipe_config);
10186         intel_color_get_config(pipe_config);
10187
10188         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10189                 struct intel_shared_dpll *pll;
10190                 enum intel_dpll_id pll_id;
10191
10192                 pipe_config->has_pch_encoder = true;
10193
10194                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10195                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10196                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10197
10198                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10199
10200                 if (HAS_PCH_IBX(dev_priv)) {
10201                         /*
10202                          * The pipe->pch transcoder and pch transcoder->pll
10203                          * mapping is fixed.
10204                          */
10205                         pll_id = (enum intel_dpll_id) crtc->pipe;
10206                 } else {
10207                         tmp = I915_READ(PCH_DPLL_SEL);
10208                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10209                                 pll_id = DPLL_ID_PCH_PLL_B;
10210                         else
10211                                 pll_id= DPLL_ID_PCH_PLL_A;
10212                 }
10213
10214                 pipe_config->shared_dpll =
10215                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10216                 pll = pipe_config->shared_dpll;
10217
10218                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10219                                                 &pipe_config->dpll_hw_state));
10220
10221                 tmp = pipe_config->dpll_hw_state.dpll;
10222                 pipe_config->pixel_multiplier =
10223                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10224                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10225
10226                 ironlake_pch_clock_get(crtc, pipe_config);
10227         } else {
10228                 pipe_config->pixel_multiplier = 1;
10229         }
10230
10231         intel_get_pipe_timings(crtc, pipe_config);
10232         intel_get_pipe_src_size(crtc, pipe_config);
10233
10234         ironlake_get_pfit_config(crtc, pipe_config);
10235
10236         ret = true;
10237
10238 out:
10239         intel_display_power_put(dev_priv, power_domain, wakeref);
10240
10241         return ret;
10242 }
10243 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10244                                       struct intel_crtc_state *crtc_state)
10245 {
10246         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10247         struct intel_atomic_state *state =
10248                 to_intel_atomic_state(crtc_state->uapi.state);
10249
10250         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10251             INTEL_GEN(dev_priv) >= 11) {
10252                 struct intel_encoder *encoder =
10253                         intel_get_crtc_new_encoder(state, crtc_state);
10254
10255                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10256                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10257                                       pipe_name(crtc->pipe));
10258                         return -EINVAL;
10259                 }
10260         }
10261
10262         return 0;
10263 }
10264
10265 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10266                                    enum port port,
10267                                    struct intel_crtc_state *pipe_config)
10268 {
10269         enum intel_dpll_id id;
10270         u32 temp;
10271
10272         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10273         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10274
10275         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10276                 return;
10277
10278         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10279 }
10280
10281 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10282                                 enum port port,
10283                                 struct intel_crtc_state *pipe_config)
10284 {
10285         enum phy phy = intel_port_to_phy(dev_priv, port);
10286         enum icl_port_dpll_id port_dpll_id;
10287         enum intel_dpll_id id;
10288         u32 temp;
10289
10290         if (intel_phy_is_combo(dev_priv, phy)) {
10291                 temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10292                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10293                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10294                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10295         } else if (intel_phy_is_tc(dev_priv, phy)) {
10296                 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10297
10298                 if (clk_sel == DDI_CLK_SEL_MG) {
10299                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10300                                                                     port));
10301                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10302                 } else {
10303                         WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10304                         id = DPLL_ID_ICL_TBTPLL;
10305                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10306                 }
10307         } else {
10308                 WARN(1, "Invalid port %x\n", port);
10309                 return;
10310         }
10311
10312         pipe_config->icl_port_dplls[port_dpll_id].pll =
10313                 intel_get_shared_dpll_by_id(dev_priv, id);
10314
10315         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10316 }
10317
10318 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10319                                 enum port port,
10320                                 struct intel_crtc_state *pipe_config)
10321 {
10322         enum intel_dpll_id id;
10323
10324         switch (port) {
10325         case PORT_A:
10326                 id = DPLL_ID_SKL_DPLL0;
10327                 break;
10328         case PORT_B:
10329                 id = DPLL_ID_SKL_DPLL1;
10330                 break;
10331         case PORT_C:
10332                 id = DPLL_ID_SKL_DPLL2;
10333                 break;
10334         default:
10335                 DRM_ERROR("Incorrect port type\n");
10336                 return;
10337         }
10338
10339         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10340 }
10341
10342 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10343                                 enum port port,
10344                                 struct intel_crtc_state *pipe_config)
10345 {
10346         enum intel_dpll_id id;
10347         u32 temp;
10348
10349         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10350         id = temp >> (port * 3 + 1);
10351
10352         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10353                 return;
10354
10355         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10356 }
10357
10358 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10359                                 enum port port,
10360                                 struct intel_crtc_state *pipe_config)
10361 {
10362         enum intel_dpll_id id;
10363         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10364
10365         switch (ddi_pll_sel) {
10366         case PORT_CLK_SEL_WRPLL1:
10367                 id = DPLL_ID_WRPLL1;
10368                 break;
10369         case PORT_CLK_SEL_WRPLL2:
10370                 id = DPLL_ID_WRPLL2;
10371                 break;
10372         case PORT_CLK_SEL_SPLL:
10373                 id = DPLL_ID_SPLL;
10374                 break;
10375         case PORT_CLK_SEL_LCPLL_810:
10376                 id = DPLL_ID_LCPLL_810;
10377                 break;
10378         case PORT_CLK_SEL_LCPLL_1350:
10379                 id = DPLL_ID_LCPLL_1350;
10380                 break;
10381         case PORT_CLK_SEL_LCPLL_2700:
10382                 id = DPLL_ID_LCPLL_2700;
10383                 break;
10384         default:
10385                 MISSING_CASE(ddi_pll_sel);
10386                 /* fall through */
10387         case PORT_CLK_SEL_NONE:
10388                 return;
10389         }
10390
10391         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10392 }
10393
10394 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10395                                      struct intel_crtc_state *pipe_config,
10396                                      u64 *power_domain_mask,
10397                                      intel_wakeref_t *wakerefs)
10398 {
10399         struct drm_device *dev = crtc->base.dev;
10400         struct drm_i915_private *dev_priv = to_i915(dev);
10401         enum intel_display_power_domain power_domain;
10402         unsigned long panel_transcoder_mask = 0;
10403         unsigned long enabled_panel_transcoders = 0;
10404         enum transcoder panel_transcoder;
10405         intel_wakeref_t wf;
10406         u32 tmp;
10407
10408         if (INTEL_GEN(dev_priv) >= 11)
10409                 panel_transcoder_mask |=
10410                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10411
10412         if (HAS_TRANSCODER_EDP(dev_priv))
10413                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10414
10415         /*
10416          * The pipe->transcoder mapping is fixed with the exception of the eDP
10417          * and DSI transcoders handled below.
10418          */
10419         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10420
10421         /*
10422          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10423          * consistency and less surprising code; it's in always on power).
10424          */
10425         for_each_set_bit(panel_transcoder,
10426                          &panel_transcoder_mask,
10427                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10428                 bool force_thru = false;
10429                 enum pipe trans_pipe;
10430
10431                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10432                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10433                         continue;
10434
10435                 /*
10436                  * Log all enabled ones, only use the first one.
10437                  *
10438                  * FIXME: This won't work for two separate DSI displays.
10439                  */
10440                 enabled_panel_transcoders |= BIT(panel_transcoder);
10441                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10442                         continue;
10443
10444                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10445                 default:
10446                         WARN(1, "unknown pipe linked to transcoder %s\n",
10447                              transcoder_name(panel_transcoder));
10448                         /* fall through */
10449                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10450                         force_thru = true;
10451                         /* fall through */
10452                 case TRANS_DDI_EDP_INPUT_A_ON:
10453                         trans_pipe = PIPE_A;
10454                         break;
10455                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10456                         trans_pipe = PIPE_B;
10457                         break;
10458                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10459                         trans_pipe = PIPE_C;
10460                         break;
10461                 }
10462
10463                 if (trans_pipe == crtc->pipe) {
10464                         pipe_config->cpu_transcoder = panel_transcoder;
10465                         pipe_config->pch_pfit.force_thru = force_thru;
10466                 }
10467         }
10468
10469         /*
10470          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10471          */
10472         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10473                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10474
10475         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10476         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10477
10478         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10479         if (!wf)
10480                 return false;
10481
10482         wakerefs[power_domain] = wf;
10483         *power_domain_mask |= BIT_ULL(power_domain);
10484
10485         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10486
10487         return tmp & PIPECONF_ENABLE;
10488 }
10489
10490 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10491                                          struct intel_crtc_state *pipe_config,
10492                                          u64 *power_domain_mask,
10493                                          intel_wakeref_t *wakerefs)
10494 {
10495         struct drm_device *dev = crtc->base.dev;
10496         struct drm_i915_private *dev_priv = to_i915(dev);
10497         enum intel_display_power_domain power_domain;
10498         enum transcoder cpu_transcoder;
10499         intel_wakeref_t wf;
10500         enum port port;
10501         u32 tmp;
10502
10503         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10504                 if (port == PORT_A)
10505                         cpu_transcoder = TRANSCODER_DSI_A;
10506                 else
10507                         cpu_transcoder = TRANSCODER_DSI_C;
10508
10509                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10510                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10511
10512                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10513                 if (!wf)
10514                         continue;
10515
10516                 wakerefs[power_domain] = wf;
10517                 *power_domain_mask |= BIT_ULL(power_domain);
10518
10519                 /*
10520                  * The PLL needs to be enabled with a valid divider
10521                  * configuration, otherwise accessing DSI registers will hang
10522                  * the machine. See BSpec North Display Engine
10523                  * registers/MIPI[BXT]. We can break out here early, since we
10524                  * need the same DSI PLL to be enabled for both DSI ports.
10525                  */
10526                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10527                         break;
10528
10529                 /* XXX: this works for video mode only */
10530                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10531                 if (!(tmp & DPI_ENABLE))
10532                         continue;
10533
10534                 tmp = I915_READ(MIPI_CTRL(port));
10535                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10536                         continue;
10537
10538                 pipe_config->cpu_transcoder = cpu_transcoder;
10539                 break;
10540         }
10541
10542         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10543 }
10544
10545 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10546                                        struct intel_crtc_state *pipe_config)
10547 {
10548         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10549         struct intel_shared_dpll *pll;
10550         enum port port;
10551         u32 tmp;
10552
10553         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10554
10555         if (INTEL_GEN(dev_priv) >= 12)
10556                 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10557         else
10558                 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10559
10560         if (INTEL_GEN(dev_priv) >= 11)
10561                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10562         else if (IS_CANNONLAKE(dev_priv))
10563                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10564         else if (IS_GEN9_BC(dev_priv))
10565                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10566         else if (IS_GEN9_LP(dev_priv))
10567                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10568         else
10569                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10570
10571         pll = pipe_config->shared_dpll;
10572         if (pll) {
10573                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10574                                                 &pipe_config->dpll_hw_state));
10575         }
10576
10577         /*
10578          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10579          * DDI E. So just check whether this pipe is wired to DDI E and whether
10580          * the PCH transcoder is on.
10581          */
10582         if (INTEL_GEN(dev_priv) < 9 &&
10583             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10584                 pipe_config->has_pch_encoder = true;
10585
10586                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10587                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10588                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10589
10590                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10591         }
10592 }
10593
10594 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
10595                                                  enum transcoder cpu_transcoder)
10596 {
10597         u32 trans_port_sync, master_select;
10598
10599         trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
10600
10601         if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
10602                 return INVALID_TRANSCODER;
10603
10604         master_select = trans_port_sync &
10605                         PORT_SYNC_MODE_MASTER_SELECT_MASK;
10606         if (master_select == 0)
10607                 return TRANSCODER_EDP;
10608         else
10609                 return master_select - 1;
10610 }
10611
10612 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
10613 {
10614         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10615         u32 transcoders;
10616         enum transcoder cpu_transcoder;
10617
10618         crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
10619                                                                   crtc_state->cpu_transcoder);
10620
10621         transcoders = BIT(TRANSCODER_A) |
10622                 BIT(TRANSCODER_B) |
10623                 BIT(TRANSCODER_C) |
10624                 BIT(TRANSCODER_D);
10625         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
10626                 enum intel_display_power_domain power_domain;
10627                 intel_wakeref_t trans_wakeref;
10628
10629                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10630                 trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
10631                                                                    power_domain);
10632
10633                 if (!trans_wakeref)
10634                         continue;
10635
10636                 if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
10637                     crtc_state->cpu_transcoder)
10638                         crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
10639
10640                 intel_display_power_put(dev_priv, power_domain, trans_wakeref);
10641         }
10642
10643         WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
10644                 crtc_state->sync_mode_slaves_mask);
10645 }
10646
10647 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10648                                     struct intel_crtc_state *pipe_config)
10649 {
10650         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10651         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10652         enum intel_display_power_domain power_domain;
10653         u64 power_domain_mask;
10654         bool active;
10655
10656         intel_crtc_init_scalers(crtc, pipe_config);
10657
10658         pipe_config->master_transcoder = INVALID_TRANSCODER;
10659
10660         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10661         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10662         if (!wf)
10663                 return false;
10664
10665         wakerefs[power_domain] = wf;
10666         power_domain_mask = BIT_ULL(power_domain);
10667
10668         pipe_config->shared_dpll = NULL;
10669
10670         active = hsw_get_transcoder_state(crtc, pipe_config,
10671                                           &power_domain_mask, wakerefs);
10672
10673         if (IS_GEN9_LP(dev_priv) &&
10674             bxt_get_dsi_transcoder_state(crtc, pipe_config,
10675                                          &power_domain_mask, wakerefs)) {
10676                 WARN_ON(active);
10677                 active = true;
10678         }
10679
10680         if (!active)
10681                 goto out;
10682
10683         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10684             INTEL_GEN(dev_priv) >= 11) {
10685                 haswell_get_ddi_port_state(crtc, pipe_config);
10686                 intel_get_pipe_timings(crtc, pipe_config);
10687         }
10688
10689         intel_get_pipe_src_size(crtc, pipe_config);
10690
10691         if (IS_HASWELL(dev_priv)) {
10692                 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10693
10694                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10695                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10696                 else
10697                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10698         } else {
10699                 pipe_config->output_format =
10700                         bdw_get_pipemisc_output_format(crtc);
10701
10702                 /*
10703                  * Currently there is no interface defined to
10704                  * check user preference between RGB/YCBCR444
10705                  * or YCBCR420. So the only possible case for
10706                  * YCBCR444 usage is driving YCBCR420 output
10707                  * with LSPCON, when pipe is configured for
10708                  * YCBCR444 output and LSPCON takes care of
10709                  * downsampling it.
10710                  */
10711                 pipe_config->lspcon_downsampling =
10712                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
10713         }
10714
10715         pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10716
10717         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10718
10719         if (INTEL_GEN(dev_priv) >= 9) {
10720                 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10721
10722                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10723                         pipe_config->gamma_enable = true;
10724
10725                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10726                         pipe_config->csc_enable = true;
10727         } else {
10728                 i9xx_get_pipe_color_config(pipe_config);
10729         }
10730
10731         intel_color_get_config(pipe_config);
10732
10733         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10734         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10735
10736         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10737         if (wf) {
10738                 wakerefs[power_domain] = wf;
10739                 power_domain_mask |= BIT_ULL(power_domain);
10740
10741                 if (INTEL_GEN(dev_priv) >= 9)
10742                         skylake_get_pfit_config(crtc, pipe_config);
10743                 else
10744                         ironlake_get_pfit_config(crtc, pipe_config);
10745         }
10746
10747         if (hsw_crtc_supports_ips(crtc)) {
10748                 if (IS_HASWELL(dev_priv))
10749                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10750                 else {
10751                         /*
10752                          * We cannot readout IPS state on broadwell, set to
10753                          * true so we can set it to a defined state on first
10754                          * commit.
10755                          */
10756                         pipe_config->ips_enabled = true;
10757                 }
10758         }
10759
10760         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10761             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10762                 pipe_config->pixel_multiplier =
10763                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10764         } else {
10765                 pipe_config->pixel_multiplier = 1;
10766         }
10767
10768         if (INTEL_GEN(dev_priv) >= 11 &&
10769             !transcoder_is_dsi(pipe_config->cpu_transcoder))
10770                 icelake_get_trans_port_sync_config(pipe_config);
10771
10772 out:
10773         for_each_power_domain(power_domain, power_domain_mask)
10774                 intel_display_power_put(dev_priv,
10775                                         power_domain, wakerefs[power_domain]);
10776
10777         return active;
10778 }
10779
10780 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10781 {
10782         struct drm_i915_private *dev_priv =
10783                 to_i915(plane_state->uapi.plane->dev);
10784         const struct drm_framebuffer *fb = plane_state->hw.fb;
10785         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10786         u32 base;
10787
10788         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10789                 base = obj->phys_handle->busaddr;
10790         else
10791                 base = intel_plane_ggtt_offset(plane_state);
10792
10793         return base + plane_state->color_plane[0].offset;
10794 }
10795
10796 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10797 {
10798         int x = plane_state->uapi.dst.x1;
10799         int y = plane_state->uapi.dst.y1;
10800         u32 pos = 0;
10801
10802         if (x < 0) {
10803                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10804                 x = -x;
10805         }
10806         pos |= x << CURSOR_X_SHIFT;
10807
10808         if (y < 0) {
10809                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10810                 y = -y;
10811         }
10812         pos |= y << CURSOR_Y_SHIFT;
10813
10814         return pos;
10815 }
10816
10817 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10818 {
10819         const struct drm_mode_config *config =
10820                 &plane_state->uapi.plane->dev->mode_config;
10821         int width = drm_rect_width(&plane_state->uapi.dst);
10822         int height = drm_rect_height(&plane_state->uapi.dst);
10823
10824         return width > 0 && width <= config->cursor_width &&
10825                 height > 0 && height <= config->cursor_height;
10826 }
10827
10828 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10829 {
10830         struct drm_i915_private *dev_priv =
10831                 to_i915(plane_state->uapi.plane->dev);
10832         unsigned int rotation = plane_state->hw.rotation;
10833         int src_x, src_y;
10834         u32 offset;
10835         int ret;
10836
10837         ret = intel_plane_compute_gtt(plane_state);
10838         if (ret)
10839                 return ret;
10840
10841         if (!plane_state->uapi.visible)
10842                 return 0;
10843
10844         src_x = plane_state->uapi.src.x1 >> 16;
10845         src_y = plane_state->uapi.src.y1 >> 16;
10846
10847         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10848         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10849                                                     plane_state, 0);
10850
10851         if (src_x != 0 || src_y != 0) {
10852                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10853                 return -EINVAL;
10854         }
10855
10856         /*
10857          * Put the final coordinates back so that the src
10858          * coordinate checks will see the right values.
10859          */
10860         drm_rect_translate_to(&plane_state->uapi.src,
10861                               src_x << 16, src_y << 16);
10862
10863         /* ILK+ do this automagically in hardware */
10864         if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
10865                 const struct drm_framebuffer *fb = plane_state->hw.fb;
10866                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
10867                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
10868
10869                 offset += (src_h * src_w - 1) * fb->format->cpp[0];
10870         }
10871
10872         plane_state->color_plane[0].offset = offset;
10873         plane_state->color_plane[0].x = src_x;
10874         plane_state->color_plane[0].y = src_y;
10875
10876         return 0;
10877 }
10878
10879 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10880                               struct intel_plane_state *plane_state)
10881 {
10882         const struct drm_framebuffer *fb = plane_state->hw.fb;
10883         int ret;
10884
10885         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10886                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10887                 return -EINVAL;
10888         }
10889
10890         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
10891                                                   &crtc_state->uapi,
10892                                                   DRM_PLANE_HELPER_NO_SCALING,
10893                                                   DRM_PLANE_HELPER_NO_SCALING,
10894                                                   true, true);
10895         if (ret)
10896                 return ret;
10897
10898         /* Use the unclipped src/dst rectangles, which we program to hw */
10899         plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
10900         plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
10901
10902         ret = intel_cursor_check_surface(plane_state);
10903         if (ret)
10904                 return ret;
10905
10906         if (!plane_state->uapi.visible)
10907                 return 0;
10908
10909         ret = intel_plane_check_src_coordinates(plane_state);
10910         if (ret)
10911                 return ret;
10912
10913         return 0;
10914 }
10915
10916 static unsigned int
10917 i845_cursor_max_stride(struct intel_plane *plane,
10918                        u32 pixel_format, u64 modifier,
10919                        unsigned int rotation)
10920 {
10921         return 2048;
10922 }
10923
10924 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10925 {
10926         u32 cntl = 0;
10927
10928         if (crtc_state->gamma_enable)
10929                 cntl |= CURSOR_GAMMA_ENABLE;
10930
10931         return cntl;
10932 }
10933
10934 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10935                            const struct intel_plane_state *plane_state)
10936 {
10937         return CURSOR_ENABLE |
10938                 CURSOR_FORMAT_ARGB |
10939                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10940 }
10941
10942 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10943 {
10944         int width = drm_rect_width(&plane_state->uapi.dst);
10945
10946         /*
10947          * 845g/865g are only limited by the width of their cursors,
10948          * the height is arbitrary up to the precision of the register.
10949          */
10950         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10951 }
10952
10953 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10954                              struct intel_plane_state *plane_state)
10955 {
10956         const struct drm_framebuffer *fb = plane_state->hw.fb;
10957         int ret;
10958
10959         ret = intel_check_cursor(crtc_state, plane_state);
10960         if (ret)
10961                 return ret;
10962
10963         /* if we want to turn off the cursor ignore width and height */
10964         if (!fb)
10965                 return 0;
10966
10967         /* Check for which cursor types we support */
10968         if (!i845_cursor_size_ok(plane_state)) {
10969                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10970                           drm_rect_width(&plane_state->uapi.dst),
10971                           drm_rect_height(&plane_state->uapi.dst));
10972                 return -EINVAL;
10973         }
10974
10975         WARN_ON(plane_state->uapi.visible &&
10976                 plane_state->color_plane[0].stride != fb->pitches[0]);
10977
10978         switch (fb->pitches[0]) {
10979         case 256:
10980         case 512:
10981         case 1024:
10982         case 2048:
10983                 break;
10984         default:
10985                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10986                               fb->pitches[0]);
10987                 return -EINVAL;
10988         }
10989
10990         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10991
10992         return 0;
10993 }
10994
10995 static void i845_update_cursor(struct intel_plane *plane,
10996                                const struct intel_crtc_state *crtc_state,
10997                                const struct intel_plane_state *plane_state)
10998 {
10999         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11000         u32 cntl = 0, base = 0, pos = 0, size = 0;
11001         unsigned long irqflags;
11002
11003         if (plane_state && plane_state->uapi.visible) {
11004                 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11005                 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11006
11007                 cntl = plane_state->ctl |
11008                         i845_cursor_ctl_crtc(crtc_state);
11009
11010                 size = (height << 12) | width;
11011
11012                 base = intel_cursor_base(plane_state);
11013                 pos = intel_cursor_position(plane_state);
11014         }
11015
11016         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11017
11018         /* On these chipsets we can only modify the base/size/stride
11019          * whilst the cursor is disabled.
11020          */
11021         if (plane->cursor.base != base ||
11022             plane->cursor.size != size ||
11023             plane->cursor.cntl != cntl) {
11024                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
11025                 I915_WRITE_FW(CURBASE(PIPE_A), base);
11026                 I915_WRITE_FW(CURSIZE, size);
11027                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11028                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
11029
11030                 plane->cursor.base = base;
11031                 plane->cursor.size = size;
11032                 plane->cursor.cntl = cntl;
11033         } else {
11034                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
11035         }
11036
11037         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11038 }
11039
11040 static void i845_disable_cursor(struct intel_plane *plane,
11041                                 const struct intel_crtc_state *crtc_state)
11042 {
11043         i845_update_cursor(plane, crtc_state, NULL);
11044 }
11045
11046 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11047                                      enum pipe *pipe)
11048 {
11049         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11050         enum intel_display_power_domain power_domain;
11051         intel_wakeref_t wakeref;
11052         bool ret;
11053
11054         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11055         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11056         if (!wakeref)
11057                 return false;
11058
11059         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11060
11061         *pipe = PIPE_A;
11062
11063         intel_display_power_put(dev_priv, power_domain, wakeref);
11064
11065         return ret;
11066 }
11067
11068 static unsigned int
11069 i9xx_cursor_max_stride(struct intel_plane *plane,
11070                        u32 pixel_format, u64 modifier,
11071                        unsigned int rotation)
11072 {
11073         return plane->base.dev->mode_config.cursor_width * 4;
11074 }
11075
11076 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11077 {
11078         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11079         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11080         u32 cntl = 0;
11081
11082         if (INTEL_GEN(dev_priv) >= 11)
11083                 return cntl;
11084
11085         if (crtc_state->gamma_enable)
11086                 cntl = MCURSOR_GAMMA_ENABLE;
11087
11088         if (crtc_state->csc_enable)
11089                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11090
11091         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11092                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11093
11094         return cntl;
11095 }
11096
11097 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11098                            const struct intel_plane_state *plane_state)
11099 {
11100         struct drm_i915_private *dev_priv =
11101                 to_i915(plane_state->uapi.plane->dev);
11102         u32 cntl = 0;
11103
11104         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11105                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11106
11107         switch (drm_rect_width(&plane_state->uapi.dst)) {
11108         case 64:
11109                 cntl |= MCURSOR_MODE_64_ARGB_AX;
11110                 break;
11111         case 128:
11112                 cntl |= MCURSOR_MODE_128_ARGB_AX;
11113                 break;
11114         case 256:
11115                 cntl |= MCURSOR_MODE_256_ARGB_AX;
11116                 break;
11117         default:
11118                 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11119                 return 0;
11120         }
11121
11122         if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11123                 cntl |= MCURSOR_ROTATE_180;
11124
11125         return cntl;
11126 }
11127
11128 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11129 {
11130         struct drm_i915_private *dev_priv =
11131                 to_i915(plane_state->uapi.plane->dev);
11132         int width = drm_rect_width(&plane_state->uapi.dst);
11133         int height = drm_rect_height(&plane_state->uapi.dst);
11134
11135         if (!intel_cursor_size_ok(plane_state))
11136                 return false;
11137
11138         /* Cursor width is limited to a few power-of-two sizes */
11139         switch (width) {
11140         case 256:
11141         case 128:
11142         case 64:
11143                 break;
11144         default:
11145                 return false;
11146         }
11147
11148         /*
11149          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11150          * height from 8 lines up to the cursor width, when the
11151          * cursor is not rotated. Everything else requires square
11152          * cursors.
11153          */
11154         if (HAS_CUR_FBC(dev_priv) &&
11155             plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11156                 if (height < 8 || height > width)
11157                         return false;
11158         } else {
11159                 if (height != width)
11160                         return false;
11161         }
11162
11163         return true;
11164 }
11165
11166 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11167                              struct intel_plane_state *plane_state)
11168 {
11169         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11170         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11171         const struct drm_framebuffer *fb = plane_state->hw.fb;
11172         enum pipe pipe = plane->pipe;
11173         int ret;
11174
11175         ret = intel_check_cursor(crtc_state, plane_state);
11176         if (ret)
11177                 return ret;
11178
11179         /* if we want to turn off the cursor ignore width and height */
11180         if (!fb)
11181                 return 0;
11182
11183         /* Check for which cursor types we support */
11184         if (!i9xx_cursor_size_ok(plane_state)) {
11185                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11186                           drm_rect_width(&plane_state->uapi.dst),
11187                           drm_rect_height(&plane_state->uapi.dst));
11188                 return -EINVAL;
11189         }
11190
11191         WARN_ON(plane_state->uapi.visible &&
11192                 plane_state->color_plane[0].stride != fb->pitches[0]);
11193
11194         if (fb->pitches[0] !=
11195             drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11196                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
11197                               fb->pitches[0],
11198                               drm_rect_width(&plane_state->uapi.dst));
11199                 return -EINVAL;
11200         }
11201
11202         /*
11203          * There's something wrong with the cursor on CHV pipe C.
11204          * If it straddles the left edge of the screen then
11205          * moving it away from the edge or disabling it often
11206          * results in a pipe underrun, and often that can lead to
11207          * dead pipe (constant underrun reported, and it scans
11208          * out just a solid color). To recover from that, the
11209          * display power well must be turned off and on again.
11210          * Refuse the put the cursor into that compromised position.
11211          */
11212         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11213             plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11214                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
11215                 return -EINVAL;
11216         }
11217
11218         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11219
11220         return 0;
11221 }
11222
11223 static void i9xx_update_cursor(struct intel_plane *plane,
11224                                const struct intel_crtc_state *crtc_state,
11225                                const struct intel_plane_state *plane_state)
11226 {
11227         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11228         enum pipe pipe = plane->pipe;
11229         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11230         unsigned long irqflags;
11231
11232         if (plane_state && plane_state->uapi.visible) {
11233                 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11234                 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11235
11236                 cntl = plane_state->ctl |
11237                         i9xx_cursor_ctl_crtc(crtc_state);
11238
11239                 if (width != height)
11240                         fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11241
11242                 base = intel_cursor_base(plane_state);
11243                 pos = intel_cursor_position(plane_state);
11244         }
11245
11246         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11247
11248         /*
11249          * On some platforms writing CURCNTR first will also
11250          * cause CURPOS to be armed by the CURBASE write.
11251          * Without the CURCNTR write the CURPOS write would
11252          * arm itself. Thus we always update CURCNTR before
11253          * CURPOS.
11254          *
11255          * On other platforms CURPOS always requires the
11256          * CURBASE write to arm the update. Additonally
11257          * a write to any of the cursor register will cancel
11258          * an already armed cursor update. Thus leaving out
11259          * the CURBASE write after CURPOS could lead to a
11260          * cursor that doesn't appear to move, or even change
11261          * shape. Thus we always write CURBASE.
11262          *
11263          * The other registers are armed by by the CURBASE write
11264          * except when the plane is getting enabled at which time
11265          * the CURCNTR write arms the update.
11266          */
11267
11268         if (INTEL_GEN(dev_priv) >= 9)
11269                 skl_write_cursor_wm(plane, crtc_state);
11270
11271         if (plane->cursor.base != base ||
11272             plane->cursor.size != fbc_ctl ||
11273             plane->cursor.cntl != cntl) {
11274                 if (HAS_CUR_FBC(dev_priv))
11275                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
11276                 I915_WRITE_FW(CURCNTR(pipe), cntl);
11277                 I915_WRITE_FW(CURPOS(pipe), pos);
11278                 I915_WRITE_FW(CURBASE(pipe), base);
11279
11280                 plane->cursor.base = base;
11281                 plane->cursor.size = fbc_ctl;
11282                 plane->cursor.cntl = cntl;
11283         } else {
11284                 I915_WRITE_FW(CURPOS(pipe), pos);
11285                 I915_WRITE_FW(CURBASE(pipe), base);
11286         }
11287
11288         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11289 }
11290
11291 static void i9xx_disable_cursor(struct intel_plane *plane,
11292                                 const struct intel_crtc_state *crtc_state)
11293 {
11294         i9xx_update_cursor(plane, crtc_state, NULL);
11295 }
11296
11297 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11298                                      enum pipe *pipe)
11299 {
11300         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11301         enum intel_display_power_domain power_domain;
11302         intel_wakeref_t wakeref;
11303         bool ret;
11304         u32 val;
11305
11306         /*
11307          * Not 100% correct for planes that can move between pipes,
11308          * but that's only the case for gen2-3 which don't have any
11309          * display power wells.
11310          */
11311         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11312         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11313         if (!wakeref)
11314                 return false;
11315
11316         val = I915_READ(CURCNTR(plane->pipe));
11317
11318         ret = val & MCURSOR_MODE;
11319
11320         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11321                 *pipe = plane->pipe;
11322         else
11323                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11324                         MCURSOR_PIPE_SELECT_SHIFT;
11325
11326         intel_display_power_put(dev_priv, power_domain, wakeref);
11327
11328         return ret;
11329 }
11330
11331 /* VESA 640x480x72Hz mode to set on the pipe */
11332 static const struct drm_display_mode load_detect_mode = {
11333         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11334                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11335 };
11336
11337 struct drm_framebuffer *
11338 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11339                          struct drm_mode_fb_cmd2 *mode_cmd)
11340 {
11341         struct intel_framebuffer *intel_fb;
11342         int ret;
11343
11344         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11345         if (!intel_fb)
11346                 return ERR_PTR(-ENOMEM);
11347
11348         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11349         if (ret)
11350                 goto err;
11351
11352         return &intel_fb->base;
11353
11354 err:
11355         kfree(intel_fb);
11356         return ERR_PTR(ret);
11357 }
11358
11359 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11360                                         struct drm_crtc *crtc)
11361 {
11362         struct drm_plane *plane;
11363         struct drm_plane_state *plane_state;
11364         int ret, i;
11365
11366         ret = drm_atomic_add_affected_planes(state, crtc);
11367         if (ret)
11368                 return ret;
11369
11370         for_each_new_plane_in_state(state, plane, plane_state, i) {
11371                 if (plane_state->crtc != crtc)
11372                         continue;
11373
11374                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11375                 if (ret)
11376                         return ret;
11377
11378                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11379         }
11380
11381         return 0;
11382 }
11383
11384 int intel_get_load_detect_pipe(struct drm_connector *connector,
11385                                struct intel_load_detect_pipe *old,
11386                                struct drm_modeset_acquire_ctx *ctx)
11387 {
11388         struct intel_crtc *intel_crtc;
11389         struct intel_encoder *intel_encoder =
11390                 intel_attached_encoder(connector);
11391         struct drm_crtc *possible_crtc;
11392         struct drm_encoder *encoder = &intel_encoder->base;
11393         struct drm_crtc *crtc = NULL;
11394         struct drm_device *dev = encoder->dev;
11395         struct drm_i915_private *dev_priv = to_i915(dev);
11396         struct drm_mode_config *config = &dev->mode_config;
11397         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11398         struct drm_connector_state *connector_state;
11399         struct intel_crtc_state *crtc_state;
11400         int ret, i = -1;
11401
11402         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11403                       connector->base.id, connector->name,
11404                       encoder->base.id, encoder->name);
11405
11406         old->restore_state = NULL;
11407
11408         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11409
11410         /*
11411          * Algorithm gets a little messy:
11412          *
11413          *   - if the connector already has an assigned crtc, use it (but make
11414          *     sure it's on first)
11415          *
11416          *   - try to find the first unused crtc that can drive this connector,
11417          *     and use that if we find one
11418          */
11419
11420         /* See if we already have a CRTC for this connector */
11421         if (connector->state->crtc) {
11422                 crtc = connector->state->crtc;
11423
11424                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11425                 if (ret)
11426                         goto fail;
11427
11428                 /* Make sure the crtc and connector are running */
11429                 goto found;
11430         }
11431
11432         /* Find an unused one (if possible) */
11433         for_each_crtc(dev, possible_crtc) {
11434                 i++;
11435                 if (!(encoder->possible_crtcs & (1 << i)))
11436                         continue;
11437
11438                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11439                 if (ret)
11440                         goto fail;
11441
11442                 if (possible_crtc->state->enable) {
11443                         drm_modeset_unlock(&possible_crtc->mutex);
11444                         continue;
11445                 }
11446
11447                 crtc = possible_crtc;
11448                 break;
11449         }
11450
11451         /*
11452          * If we didn't find an unused CRTC, don't use any.
11453          */
11454         if (!crtc) {
11455                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11456                 ret = -ENODEV;
11457                 goto fail;
11458         }
11459
11460 found:
11461         intel_crtc = to_intel_crtc(crtc);
11462
11463         state = drm_atomic_state_alloc(dev);
11464         restore_state = drm_atomic_state_alloc(dev);
11465         if (!state || !restore_state) {
11466                 ret = -ENOMEM;
11467                 goto fail;
11468         }
11469
11470         state->acquire_ctx = ctx;
11471         restore_state->acquire_ctx = ctx;
11472
11473         connector_state = drm_atomic_get_connector_state(state, connector);
11474         if (IS_ERR(connector_state)) {
11475                 ret = PTR_ERR(connector_state);
11476                 goto fail;
11477         }
11478
11479         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11480         if (ret)
11481                 goto fail;
11482
11483         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11484         if (IS_ERR(crtc_state)) {
11485                 ret = PTR_ERR(crtc_state);
11486                 goto fail;
11487         }
11488
11489         crtc_state->uapi.active = true;
11490
11491         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11492                                            &load_detect_mode);
11493         if (ret)
11494                 goto fail;
11495
11496         ret = intel_modeset_disable_planes(state, crtc);
11497         if (ret)
11498                 goto fail;
11499
11500         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11501         if (!ret)
11502                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11503         if (!ret)
11504                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11505         if (ret) {
11506                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11507                 goto fail;
11508         }
11509
11510         ret = drm_atomic_commit(state);
11511         if (ret) {
11512                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11513                 goto fail;
11514         }
11515
11516         old->restore_state = restore_state;
11517         drm_atomic_state_put(state);
11518
11519         /* let the connector get through one full cycle before testing */
11520         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11521         return true;
11522
11523 fail:
11524         if (state) {
11525                 drm_atomic_state_put(state);
11526                 state = NULL;
11527         }
11528         if (restore_state) {
11529                 drm_atomic_state_put(restore_state);
11530                 restore_state = NULL;
11531         }
11532
11533         if (ret == -EDEADLK)
11534                 return ret;
11535
11536         return false;
11537 }
11538
11539 void intel_release_load_detect_pipe(struct drm_connector *connector,
11540                                     struct intel_load_detect_pipe *old,
11541                                     struct drm_modeset_acquire_ctx *ctx)
11542 {
11543         struct intel_encoder *intel_encoder =
11544                 intel_attached_encoder(connector);
11545         struct drm_encoder *encoder = &intel_encoder->base;
11546         struct drm_atomic_state *state = old->restore_state;
11547         int ret;
11548
11549         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11550                       connector->base.id, connector->name,
11551                       encoder->base.id, encoder->name);
11552
11553         if (!state)
11554                 return;
11555
11556         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11557         if (ret)
11558                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11559         drm_atomic_state_put(state);
11560 }
11561
11562 static int i9xx_pll_refclk(struct drm_device *dev,
11563                            const struct intel_crtc_state *pipe_config)
11564 {
11565         struct drm_i915_private *dev_priv = to_i915(dev);
11566         u32 dpll = pipe_config->dpll_hw_state.dpll;
11567
11568         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11569                 return dev_priv->vbt.lvds_ssc_freq;
11570         else if (HAS_PCH_SPLIT(dev_priv))
11571                 return 120000;
11572         else if (!IS_GEN(dev_priv, 2))
11573                 return 96000;
11574         else
11575                 return 48000;
11576 }
11577
11578 /* Returns the clock of the currently programmed mode of the given pipe. */
11579 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11580                                 struct intel_crtc_state *pipe_config)
11581 {
11582         struct drm_device *dev = crtc->base.dev;
11583         struct drm_i915_private *dev_priv = to_i915(dev);
11584         enum pipe pipe = crtc->pipe;
11585         u32 dpll = pipe_config->dpll_hw_state.dpll;
11586         u32 fp;
11587         struct dpll clock;
11588         int port_clock;
11589         int refclk = i9xx_pll_refclk(dev, pipe_config);
11590
11591         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11592                 fp = pipe_config->dpll_hw_state.fp0;
11593         else
11594                 fp = pipe_config->dpll_hw_state.fp1;
11595
11596         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11597         if (IS_PINEVIEW(dev_priv)) {
11598                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11599                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11600         } else {
11601                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11602                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11603         }
11604
11605         if (!IS_GEN(dev_priv, 2)) {
11606                 if (IS_PINEVIEW(dev_priv))
11607                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11608                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11609                 else
11610                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11611                                DPLL_FPA01_P1_POST_DIV_SHIFT);
11612
11613                 switch (dpll & DPLL_MODE_MASK) {
11614                 case DPLLB_MODE_DAC_SERIAL:
11615                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11616                                 5 : 10;
11617                         break;
11618                 case DPLLB_MODE_LVDS:
11619                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11620                                 7 : 14;
11621                         break;
11622                 default:
11623                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11624                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
11625                         return;
11626                 }
11627
11628                 if (IS_PINEVIEW(dev_priv))
11629                         port_clock = pnv_calc_dpll_params(refclk, &clock);
11630                 else
11631                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
11632         } else {
11633                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11634                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11635
11636                 if (is_lvds) {
11637                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11638                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
11639
11640                         if (lvds & LVDS_CLKB_POWER_UP)
11641                                 clock.p2 = 7;
11642                         else
11643                                 clock.p2 = 14;
11644                 } else {
11645                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
11646                                 clock.p1 = 2;
11647                         else {
11648                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11649                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11650                         }
11651                         if (dpll & PLL_P2_DIVIDE_BY_4)
11652                                 clock.p2 = 4;
11653                         else
11654                                 clock.p2 = 2;
11655                 }
11656
11657                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11658         }
11659
11660         /*
11661          * This value includes pixel_multiplier. We will use
11662          * port_clock to compute adjusted_mode.crtc_clock in the
11663          * encoder's get_config() function.
11664          */
11665         pipe_config->port_clock = port_clock;
11666 }
11667
11668 int intel_dotclock_calculate(int link_freq,
11669                              const struct intel_link_m_n *m_n)
11670 {
11671         /*
11672          * The calculation for the data clock is:
11673          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11674          * But we want to avoid losing precison if possible, so:
11675          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11676          *
11677          * and the link clock is simpler:
11678          * link_clock = (m * link_clock) / n
11679          */
11680
11681         if (!m_n->link_n)
11682                 return 0;
11683
11684         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11685 }
11686
11687 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11688                                    struct intel_crtc_state *pipe_config)
11689 {
11690         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11691
11692         /* read out port_clock from the DPLL */
11693         i9xx_crtc_clock_get(crtc, pipe_config);
11694
11695         /*
11696          * In case there is an active pipe without active ports,
11697          * we may need some idea for the dotclock anyway.
11698          * Calculate one based on the FDI configuration.
11699          */
11700         pipe_config->hw.adjusted_mode.crtc_clock =
11701                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11702                                          &pipe_config->fdi_m_n);
11703 }
11704
11705 /* Returns the currently programmed mode of the given encoder. */
11706 struct drm_display_mode *
11707 intel_encoder_current_mode(struct intel_encoder *encoder)
11708 {
11709         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11710         struct intel_crtc_state *crtc_state;
11711         struct drm_display_mode *mode;
11712         struct intel_crtc *crtc;
11713         enum pipe pipe;
11714
11715         if (!encoder->get_hw_state(encoder, &pipe))
11716                 return NULL;
11717
11718         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11719
11720         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11721         if (!mode)
11722                 return NULL;
11723
11724         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11725         if (!crtc_state) {
11726                 kfree(mode);
11727                 return NULL;
11728         }
11729
11730         crtc_state->uapi.crtc = &crtc->base;
11731
11732         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11733                 kfree(crtc_state);
11734                 kfree(mode);
11735                 return NULL;
11736         }
11737
11738         encoder->get_config(encoder, crtc_state);
11739
11740         intel_mode_from_pipe_config(mode, crtc_state);
11741
11742         kfree(crtc_state);
11743
11744         return mode;
11745 }
11746
11747 static void intel_crtc_destroy(struct drm_crtc *crtc)
11748 {
11749         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11750
11751         drm_crtc_cleanup(crtc);
11752         kfree(intel_crtc);
11753 }
11754
11755 /**
11756  * intel_wm_need_update - Check whether watermarks need updating
11757  * @cur: current plane state
11758  * @new: new plane state
11759  *
11760  * Check current plane state versus the new one to determine whether
11761  * watermarks need to be recalculated.
11762  *
11763  * Returns true or false.
11764  */
11765 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11766                                  struct intel_plane_state *new)
11767 {
11768         /* Update watermarks on tiling or size changes. */
11769         if (new->uapi.visible != cur->uapi.visible)
11770                 return true;
11771
11772         if (!cur->hw.fb || !new->hw.fb)
11773                 return false;
11774
11775         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
11776             cur->hw.rotation != new->hw.rotation ||
11777             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
11778             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
11779             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
11780             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
11781                 return true;
11782
11783         return false;
11784 }
11785
11786 static bool needs_scaling(const struct intel_plane_state *state)
11787 {
11788         int src_w = drm_rect_width(&state->uapi.src) >> 16;
11789         int src_h = drm_rect_height(&state->uapi.src) >> 16;
11790         int dst_w = drm_rect_width(&state->uapi.dst);
11791         int dst_h = drm_rect_height(&state->uapi.dst);
11792
11793         return (src_w != dst_w || src_h != dst_h);
11794 }
11795
11796 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11797                                     struct intel_crtc_state *crtc_state,
11798                                     const struct intel_plane_state *old_plane_state,
11799                                     struct intel_plane_state *plane_state)
11800 {
11801         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11802         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11803         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11804         bool mode_changed = needs_modeset(crtc_state);
11805         bool was_crtc_enabled = old_crtc_state->hw.active;
11806         bool is_crtc_enabled = crtc_state->hw.active;
11807         bool turn_off, turn_on, visible, was_visible;
11808         int ret;
11809
11810         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11811                 ret = skl_update_scaler_plane(crtc_state, plane_state);
11812                 if (ret)
11813                         return ret;
11814         }
11815
11816         was_visible = old_plane_state->uapi.visible;
11817         visible = plane_state->uapi.visible;
11818
11819         if (!was_crtc_enabled && WARN_ON(was_visible))
11820                 was_visible = false;
11821
11822         /*
11823          * Visibility is calculated as if the crtc was on, but
11824          * after scaler setup everything depends on it being off
11825          * when the crtc isn't active.
11826          *
11827          * FIXME this is wrong for watermarks. Watermarks should also
11828          * be computed as if the pipe would be active. Perhaps move
11829          * per-plane wm computation to the .check_plane() hook, and
11830          * only combine the results from all planes in the current place?
11831          */
11832         if (!is_crtc_enabled) {
11833                 plane_state->uapi.visible = visible = false;
11834                 crtc_state->active_planes &= ~BIT(plane->id);
11835                 crtc_state->data_rate[plane->id] = 0;
11836                 crtc_state->min_cdclk[plane->id] = 0;
11837         }
11838
11839         if (!was_visible && !visible)
11840                 return 0;
11841
11842         turn_off = was_visible && (!visible || mode_changed);
11843         turn_on = visible && (!was_visible || mode_changed);
11844
11845         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11846                          crtc->base.base.id, crtc->base.name,
11847                          plane->base.base.id, plane->base.name,
11848                          was_visible, visible,
11849                          turn_off, turn_on, mode_changed);
11850
11851         if (turn_on) {
11852                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11853                         crtc_state->update_wm_pre = true;
11854
11855                 /* must disable cxsr around plane enable/disable */
11856                 if (plane->id != PLANE_CURSOR)
11857                         crtc_state->disable_cxsr = true;
11858         } else if (turn_off) {
11859                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11860                         crtc_state->update_wm_post = true;
11861
11862                 /* must disable cxsr around plane enable/disable */
11863                 if (plane->id != PLANE_CURSOR)
11864                         crtc_state->disable_cxsr = true;
11865         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
11866                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11867                         /* FIXME bollocks */
11868                         crtc_state->update_wm_pre = true;
11869                         crtc_state->update_wm_post = true;
11870                 }
11871         }
11872
11873         if (visible || was_visible)
11874                 crtc_state->fb_bits |= plane->frontbuffer_bit;
11875
11876         /*
11877          * ILK/SNB DVSACNTR/Sprite Enable
11878          * IVB SPR_CTL/Sprite Enable
11879          * "When in Self Refresh Big FIFO mode, a write to enable the
11880          *  plane will be internally buffered and delayed while Big FIFO
11881          *  mode is exiting."
11882          *
11883          * Which means that enabling the sprite can take an extra frame
11884          * when we start in big FIFO mode (LP1+). Thus we need to drop
11885          * down to LP0 and wait for vblank in order to make sure the
11886          * sprite gets enabled on the next vblank after the register write.
11887          * Doing otherwise would risk enabling the sprite one frame after
11888          * we've already signalled flip completion. We can resume LP1+
11889          * once the sprite has been enabled.
11890          *
11891          *
11892          * WaCxSRDisabledForSpriteScaling:ivb
11893          * IVB SPR_SCALE/Scaling Enable
11894          * "Low Power watermarks must be disabled for at least one
11895          *  frame before enabling sprite scaling, and kept disabled
11896          *  until sprite scaling is disabled."
11897          *
11898          * ILK/SNB DVSASCALE/Scaling Enable
11899          * "When in Self Refresh Big FIFO mode, scaling enable will be
11900          *  masked off while Big FIFO mode is exiting."
11901          *
11902          * Despite the w/a only being listed for IVB we assume that
11903          * the ILK/SNB note has similar ramifications, hence we apply
11904          * the w/a on all three platforms.
11905          *
11906          * With experimental results seems this is needed also for primary
11907          * plane, not only sprite plane.
11908          */
11909         if (plane->id != PLANE_CURSOR &&
11910             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11911              IS_IVYBRIDGE(dev_priv)) &&
11912             (turn_on || (!needs_scaling(old_plane_state) &&
11913                          needs_scaling(plane_state))))
11914                 crtc_state->disable_lp_wm = true;
11915
11916         return 0;
11917 }
11918
11919 static bool encoders_cloneable(const struct intel_encoder *a,
11920                                const struct intel_encoder *b)
11921 {
11922         /* masks could be asymmetric, so check both ways */
11923         return a == b || (a->cloneable & (1 << b->type) &&
11924                           b->cloneable & (1 << a->type));
11925 }
11926
11927 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11928                                          struct intel_crtc *crtc,
11929                                          struct intel_encoder *encoder)
11930 {
11931         struct intel_encoder *source_encoder;
11932         struct drm_connector *connector;
11933         struct drm_connector_state *connector_state;
11934         int i;
11935
11936         for_each_new_connector_in_state(state, connector, connector_state, i) {
11937                 if (connector_state->crtc != &crtc->base)
11938                         continue;
11939
11940                 source_encoder =
11941                         to_intel_encoder(connector_state->best_encoder);
11942                 if (!encoders_cloneable(encoder, source_encoder))
11943                         return false;
11944         }
11945
11946         return true;
11947 }
11948
11949 static int icl_add_linked_planes(struct intel_atomic_state *state)
11950 {
11951         struct intel_plane *plane, *linked;
11952         struct intel_plane_state *plane_state, *linked_plane_state;
11953         int i;
11954
11955         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11956                 linked = plane_state->planar_linked_plane;
11957
11958                 if (!linked)
11959                         continue;
11960
11961                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11962                 if (IS_ERR(linked_plane_state))
11963                         return PTR_ERR(linked_plane_state);
11964
11965                 WARN_ON(linked_plane_state->planar_linked_plane != plane);
11966                 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
11967         }
11968
11969         return 0;
11970 }
11971
11972 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11973 {
11974         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11975         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11976         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
11977         struct intel_plane *plane, *linked;
11978         struct intel_plane_state *plane_state;
11979         int i;
11980
11981         if (INTEL_GEN(dev_priv) < 11)
11982                 return 0;
11983
11984         /*
11985          * Destroy all old plane links and make the slave plane invisible
11986          * in the crtc_state->active_planes mask.
11987          */
11988         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11989                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
11990                         continue;
11991
11992                 plane_state->planar_linked_plane = NULL;
11993                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
11994                         crtc_state->active_planes &= ~BIT(plane->id);
11995                         crtc_state->update_planes |= BIT(plane->id);
11996                 }
11997
11998                 plane_state->planar_slave = false;
11999         }
12000
12001         if (!crtc_state->nv12_planes)
12002                 return 0;
12003
12004         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12005                 struct intel_plane_state *linked_state = NULL;
12006
12007                 if (plane->pipe != crtc->pipe ||
12008                     !(crtc_state->nv12_planes & BIT(plane->id)))
12009                         continue;
12010
12011                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12012                         if (!icl_is_nv12_y_plane(linked->id))
12013                                 continue;
12014
12015                         if (crtc_state->active_planes & BIT(linked->id))
12016                                 continue;
12017
12018                         linked_state = intel_atomic_get_plane_state(state, linked);
12019                         if (IS_ERR(linked_state))
12020                                 return PTR_ERR(linked_state);
12021
12022                         break;
12023                 }
12024
12025                 if (!linked_state) {
12026                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
12027                                       hweight8(crtc_state->nv12_planes));
12028
12029                         return -EINVAL;
12030                 }
12031
12032                 plane_state->planar_linked_plane = linked;
12033
12034                 linked_state->planar_slave = true;
12035                 linked_state->planar_linked_plane = plane;
12036                 crtc_state->active_planes |= BIT(linked->id);
12037                 crtc_state->update_planes |= BIT(linked->id);
12038                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
12039
12040                 /* Copy parameters to slave plane */
12041                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12042                 linked_state->color_ctl = plane_state->color_ctl;
12043                 linked_state->color_plane[0] = plane_state->color_plane[0];
12044
12045                 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12046                 linked_state->uapi.src = plane_state->uapi.src;
12047                 linked_state->uapi.dst = plane_state->uapi.dst;
12048
12049                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12050                         if (linked->id == PLANE_SPRITE5)
12051                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12052                         else if (linked->id == PLANE_SPRITE4)
12053                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12054                         else
12055                                 MISSING_CASE(linked->id);
12056                 }
12057         }
12058
12059         return 0;
12060 }
12061
12062 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12063 {
12064         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12065         struct intel_atomic_state *state =
12066                 to_intel_atomic_state(new_crtc_state->uapi.state);
12067         const struct intel_crtc_state *old_crtc_state =
12068                 intel_atomic_get_old_crtc_state(state, crtc);
12069
12070         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12071 }
12072
12073 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
12074 {
12075         struct drm_crtc *crtc = crtc_state->uapi.crtc;
12076         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12077         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12078         struct drm_connector *master_connector, *connector;
12079         struct drm_connector_state *connector_state;
12080         struct drm_connector_list_iter conn_iter;
12081         struct drm_crtc *master_crtc = NULL;
12082         struct drm_crtc_state *master_crtc_state;
12083         struct intel_crtc_state *master_pipe_config;
12084         int i, tile_group_id;
12085
12086         if (INTEL_GEN(dev_priv) < 11)
12087                 return 0;
12088
12089         /*
12090          * In case of tiled displays there could be one or more slaves but there is
12091          * only one master. Lets make the CRTC used by the connector corresponding
12092          * to the last horizonal and last vertical tile a master/genlock CRTC.
12093          * All the other CRTCs corresponding to other tiles of the same Tile group
12094          * are the slave CRTCs and hold a pointer to their genlock CRTC.
12095          */
12096         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12097                 if (connector_state->crtc != crtc)
12098                         continue;
12099                 if (!connector->has_tile)
12100                         continue;
12101                 if (crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
12102                     crtc_state->hw.mode.vdisplay != connector->tile_v_size)
12103                         return 0;
12104                 if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12105                     connector->tile_v_loc == connector->num_v_tile - 1)
12106                         continue;
12107                 crtc_state->sync_mode_slaves_mask = 0;
12108                 tile_group_id = connector->tile_group->id;
12109                 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12110                 drm_for_each_connector_iter(master_connector, &conn_iter) {
12111                         struct drm_connector_state *master_conn_state = NULL;
12112
12113                         if (!master_connector->has_tile)
12114                                 continue;
12115                         if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12116                             master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12117                                 continue;
12118                         if (master_connector->tile_group->id != tile_group_id)
12119                                 continue;
12120
12121                         master_conn_state = drm_atomic_get_connector_state(&state->base,
12122                                                                            master_connector);
12123                         if (IS_ERR(master_conn_state)) {
12124                                 drm_connector_list_iter_end(&conn_iter);
12125                                 return PTR_ERR(master_conn_state);
12126                         }
12127                         if (master_conn_state->crtc) {
12128                                 master_crtc = master_conn_state->crtc;
12129                                 break;
12130                         }
12131                 }
12132                 drm_connector_list_iter_end(&conn_iter);
12133
12134                 if (!master_crtc) {
12135                         DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
12136                                       connector_state->crtc->base.id);
12137                         return -EINVAL;
12138                 }
12139
12140                 master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12141                                                               master_crtc);
12142                 if (IS_ERR(master_crtc_state))
12143                         return PTR_ERR(master_crtc_state);
12144
12145                 master_pipe_config = to_intel_crtc_state(master_crtc_state);
12146                 crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12147                 master_pipe_config->sync_mode_slaves_mask |=
12148                         BIT(crtc_state->cpu_transcoder);
12149                 DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12150                               transcoder_name(crtc_state->master_transcoder),
12151                               crtc_state->uapi.crtc->base.id,
12152                               master_pipe_config->sync_mode_slaves_mask);
12153         }
12154
12155         return 0;
12156 }
12157
12158 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12159                                    struct intel_crtc *crtc)
12160 {
12161         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12162         struct intel_crtc_state *crtc_state =
12163                 intel_atomic_get_new_crtc_state(state, crtc);
12164         bool mode_changed = needs_modeset(crtc_state);
12165         int ret;
12166
12167         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12168             mode_changed && !crtc_state->hw.active)
12169                 crtc_state->update_wm_post = true;
12170
12171         if (mode_changed && crtc_state->hw.enable &&
12172             dev_priv->display.crtc_compute_clock &&
12173             !WARN_ON(crtc_state->shared_dpll)) {
12174                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12175                 if (ret)
12176                         return ret;
12177         }
12178
12179         /*
12180          * May need to update pipe gamma enable bits
12181          * when C8 planes are getting enabled/disabled.
12182          */
12183         if (c8_planes_changed(crtc_state))
12184                 crtc_state->uapi.color_mgmt_changed = true;
12185
12186         if (mode_changed || crtc_state->update_pipe ||
12187             crtc_state->uapi.color_mgmt_changed) {
12188                 ret = intel_color_check(crtc_state);
12189                 if (ret)
12190                         return ret;
12191         }
12192
12193         ret = 0;
12194         if (dev_priv->display.compute_pipe_wm) {
12195                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12196                 if (ret) {
12197                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12198                         return ret;
12199                 }
12200         }
12201
12202         if (dev_priv->display.compute_intermediate_wm) {
12203                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12204                         return 0;
12205
12206                 /*
12207                  * Calculate 'intermediate' watermarks that satisfy both the
12208                  * old state and the new state.  We can program these
12209                  * immediately.
12210                  */
12211                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12212                 if (ret) {
12213                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12214                         return ret;
12215                 }
12216         }
12217
12218         if (INTEL_GEN(dev_priv) >= 9) {
12219                 if (mode_changed || crtc_state->update_pipe)
12220                         ret = skl_update_scaler_crtc(crtc_state);
12221                 if (!ret)
12222                         ret = intel_atomic_setup_scalers(dev_priv, crtc,
12223                                                          crtc_state);
12224         }
12225
12226         if (HAS_IPS(dev_priv))
12227                 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
12228
12229         return ret;
12230 }
12231
12232 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12233 {
12234         struct intel_connector *connector;
12235         struct drm_connector_list_iter conn_iter;
12236
12237         drm_connector_list_iter_begin(dev, &conn_iter);
12238         for_each_intel_connector_iter(connector, &conn_iter) {
12239                 if (connector->base.state->crtc)
12240                         drm_connector_put(&connector->base);
12241
12242                 if (connector->base.encoder) {
12243                         connector->base.state->best_encoder =
12244                                 connector->base.encoder;
12245                         connector->base.state->crtc =
12246                                 connector->base.encoder->crtc;
12247
12248                         drm_connector_get(&connector->base);
12249                 } else {
12250                         connector->base.state->best_encoder = NULL;
12251                         connector->base.state->crtc = NULL;
12252                 }
12253         }
12254         drm_connector_list_iter_end(&conn_iter);
12255 }
12256
12257 static int
12258 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12259                       struct intel_crtc_state *pipe_config)
12260 {
12261         struct drm_connector *connector = conn_state->connector;
12262         const struct drm_display_info *info = &connector->display_info;
12263         int bpp;
12264
12265         switch (conn_state->max_bpc) {
12266         case 6 ... 7:
12267                 bpp = 6 * 3;
12268                 break;
12269         case 8 ... 9:
12270                 bpp = 8 * 3;
12271                 break;
12272         case 10 ... 11:
12273                 bpp = 10 * 3;
12274                 break;
12275         case 12:
12276                 bpp = 12 * 3;
12277                 break;
12278         default:
12279                 return -EINVAL;
12280         }
12281
12282         if (bpp < pipe_config->pipe_bpp) {
12283                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12284                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12285                               connector->base.id, connector->name,
12286                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
12287                               pipe_config->pipe_bpp);
12288
12289                 pipe_config->pipe_bpp = bpp;
12290         }
12291
12292         return 0;
12293 }
12294
12295 static int
12296 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12297                           struct intel_crtc_state *pipe_config)
12298 {
12299         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12300         struct drm_atomic_state *state = pipe_config->uapi.state;
12301         struct drm_connector *connector;
12302         struct drm_connector_state *connector_state;
12303         int bpp, i;
12304
12305         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12306             IS_CHERRYVIEW(dev_priv)))
12307                 bpp = 10*3;
12308         else if (INTEL_GEN(dev_priv) >= 5)
12309                 bpp = 12*3;
12310         else
12311                 bpp = 8*3;
12312
12313         pipe_config->pipe_bpp = bpp;
12314
12315         /* Clamp display bpp to connector max bpp */
12316         for_each_new_connector_in_state(state, connector, connector_state, i) {
12317                 int ret;
12318
12319                 if (connector_state->crtc != &crtc->base)
12320                         continue;
12321
12322                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12323                 if (ret)
12324                         return ret;
12325         }
12326
12327         return 0;
12328 }
12329
12330 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12331 {
12332         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12333                       "type: 0x%x flags: 0x%x\n",
12334                       mode->crtc_clock,
12335                       mode->crtc_hdisplay, mode->crtc_hsync_start,
12336                       mode->crtc_hsync_end, mode->crtc_htotal,
12337                       mode->crtc_vdisplay, mode->crtc_vsync_start,
12338                       mode->crtc_vsync_end, mode->crtc_vtotal,
12339                       mode->type, mode->flags);
12340 }
12341
12342 static inline void
12343 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12344                       const char *id, unsigned int lane_count,
12345                       const struct intel_link_m_n *m_n)
12346 {
12347         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12348                       id, lane_count,
12349                       m_n->gmch_m, m_n->gmch_n,
12350                       m_n->link_m, m_n->link_n, m_n->tu);
12351 }
12352
12353 static void
12354 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12355                      const union hdmi_infoframe *frame)
12356 {
12357         if ((drm_debug & DRM_UT_KMS) == 0)
12358                 return;
12359
12360         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12361 }
12362
12363 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12364
12365 static const char * const output_type_str[] = {
12366         OUTPUT_TYPE(UNUSED),
12367         OUTPUT_TYPE(ANALOG),
12368         OUTPUT_TYPE(DVO),
12369         OUTPUT_TYPE(SDVO),
12370         OUTPUT_TYPE(LVDS),
12371         OUTPUT_TYPE(TVOUT),
12372         OUTPUT_TYPE(HDMI),
12373         OUTPUT_TYPE(DP),
12374         OUTPUT_TYPE(EDP),
12375         OUTPUT_TYPE(DSI),
12376         OUTPUT_TYPE(DDI),
12377         OUTPUT_TYPE(DP_MST),
12378 };
12379
12380 #undef OUTPUT_TYPE
12381
12382 static void snprintf_output_types(char *buf, size_t len,
12383                                   unsigned int output_types)
12384 {
12385         char *str = buf;
12386         int i;
12387
12388         str[0] = '\0';
12389
12390         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12391                 int r;
12392
12393                 if ((output_types & BIT(i)) == 0)
12394                         continue;
12395
12396                 r = snprintf(str, len, "%s%s",
12397                              str != buf ? "," : "", output_type_str[i]);
12398                 if (r >= len)
12399                         break;
12400                 str += r;
12401                 len -= r;
12402
12403                 output_types &= ~BIT(i);
12404         }
12405
12406         WARN_ON_ONCE(output_types != 0);
12407 }
12408
12409 static const char * const output_format_str[] = {
12410         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12411         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12412         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12413         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12414 };
12415
12416 static const char *output_formats(enum intel_output_format format)
12417 {
12418         if (format >= ARRAY_SIZE(output_format_str))
12419                 format = INTEL_OUTPUT_FORMAT_INVALID;
12420         return output_format_str[format];
12421 }
12422
12423 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12424 {
12425         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12426         const struct drm_framebuffer *fb = plane_state->hw.fb;
12427         struct drm_format_name_buf format_name;
12428
12429         if (!fb) {
12430                 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12431                               plane->base.base.id, plane->base.name,
12432                               yesno(plane_state->uapi.visible));
12433                 return;
12434         }
12435
12436         DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12437                       plane->base.base.id, plane->base.name,
12438                       fb->base.id, fb->width, fb->height,
12439                       drm_get_format_name(fb->format->format, &format_name),
12440                       yesno(plane_state->uapi.visible));
12441         DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12442                       plane_state->hw.rotation, plane_state->scaler_id);
12443         if (plane_state->uapi.visible)
12444                 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12445                               DRM_RECT_FP_ARG(&plane_state->uapi.src),
12446                               DRM_RECT_ARG(&plane_state->uapi.dst));
12447 }
12448
12449 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12450                                    struct intel_atomic_state *state,
12451                                    const char *context)
12452 {
12453         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12454         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12455         const struct intel_plane_state *plane_state;
12456         struct intel_plane *plane;
12457         char buf[64];
12458         int i;
12459
12460         DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12461                       crtc->base.base.id, crtc->base.name,
12462                       yesno(pipe_config->hw.enable), context);
12463
12464         if (!pipe_config->hw.enable)
12465                 goto dump_planes;
12466
12467         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12468         DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12469                       yesno(pipe_config->hw.active),
12470                       buf, pipe_config->output_types,
12471                       output_formats(pipe_config->output_format));
12472
12473         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12474                       transcoder_name(pipe_config->cpu_transcoder),
12475                       pipe_config->pipe_bpp, pipe_config->dither);
12476
12477         if (pipe_config->has_pch_encoder)
12478                 intel_dump_m_n_config(pipe_config, "fdi",
12479                                       pipe_config->fdi_lanes,
12480                                       &pipe_config->fdi_m_n);
12481
12482         if (intel_crtc_has_dp_encoder(pipe_config)) {
12483                 intel_dump_m_n_config(pipe_config, "dp m_n",
12484                                 pipe_config->lane_count, &pipe_config->dp_m_n);
12485                 if (pipe_config->has_drrs)
12486                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
12487                                               pipe_config->lane_count,
12488                                               &pipe_config->dp_m2_n2);
12489         }
12490
12491         DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12492                       pipe_config->has_audio, pipe_config->has_infoframe,
12493                       pipe_config->infoframes.enable);
12494
12495         if (pipe_config->infoframes.enable &
12496             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12497                 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12498         if (pipe_config->infoframes.enable &
12499             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12500                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12501         if (pipe_config->infoframes.enable &
12502             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12503                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12504         if (pipe_config->infoframes.enable &
12505             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12506                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12507
12508         DRM_DEBUG_KMS("requested mode:\n");
12509         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12510         DRM_DEBUG_KMS("adjusted mode:\n");
12511         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12512         intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
12513         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12514                       pipe_config->port_clock,
12515                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12516                       pipe_config->pixel_rate);
12517
12518         if (INTEL_GEN(dev_priv) >= 9)
12519                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12520                               crtc->num_scalers,
12521                               pipe_config->scaler_state.scaler_users,
12522                               pipe_config->scaler_state.scaler_id);
12523
12524         if (HAS_GMCH(dev_priv))
12525                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12526                               pipe_config->gmch_pfit.control,
12527                               pipe_config->gmch_pfit.pgm_ratios,
12528                               pipe_config->gmch_pfit.lvds_border_bits);
12529         else
12530                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12531                               pipe_config->pch_pfit.pos,
12532                               pipe_config->pch_pfit.size,
12533                               enableddisabled(pipe_config->pch_pfit.enabled),
12534                               yesno(pipe_config->pch_pfit.force_thru));
12535
12536         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12537                       pipe_config->ips_enabled, pipe_config->double_wide);
12538
12539         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12540
12541         if (IS_CHERRYVIEW(dev_priv))
12542                 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12543                               pipe_config->cgm_mode, pipe_config->gamma_mode,
12544                               pipe_config->gamma_enable, pipe_config->csc_enable);
12545         else
12546                 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12547                               pipe_config->csc_mode, pipe_config->gamma_mode,
12548                               pipe_config->gamma_enable, pipe_config->csc_enable);
12549
12550 dump_planes:
12551         if (!state)
12552                 return;
12553
12554         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12555                 if (plane->pipe == crtc->pipe)
12556                         intel_dump_plane_state(plane_state);
12557         }
12558 }
12559
12560 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12561 {
12562         struct drm_device *dev = state->base.dev;
12563         struct drm_connector *connector;
12564         struct drm_connector_list_iter conn_iter;
12565         unsigned int used_ports = 0;
12566         unsigned int used_mst_ports = 0;
12567         bool ret = true;
12568
12569         /*
12570          * We're going to peek into connector->state,
12571          * hence connection_mutex must be held.
12572          */
12573         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12574
12575         /*
12576          * Walk the connector list instead of the encoder
12577          * list to detect the problem on ddi platforms
12578          * where there's just one encoder per digital port.
12579          */
12580         drm_connector_list_iter_begin(dev, &conn_iter);
12581         drm_for_each_connector_iter(connector, &conn_iter) {
12582                 struct drm_connector_state *connector_state;
12583                 struct intel_encoder *encoder;
12584
12585                 connector_state =
12586                         drm_atomic_get_new_connector_state(&state->base,
12587                                                            connector);
12588                 if (!connector_state)
12589                         connector_state = connector->state;
12590
12591                 if (!connector_state->best_encoder)
12592                         continue;
12593
12594                 encoder = to_intel_encoder(connector_state->best_encoder);
12595
12596                 WARN_ON(!connector_state->crtc);
12597
12598                 switch (encoder->type) {
12599                         unsigned int port_mask;
12600                 case INTEL_OUTPUT_DDI:
12601                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
12602                                 break;
12603                         /* else, fall through */
12604                 case INTEL_OUTPUT_DP:
12605                 case INTEL_OUTPUT_HDMI:
12606                 case INTEL_OUTPUT_EDP:
12607                         port_mask = 1 << encoder->port;
12608
12609                         /* the same port mustn't appear more than once */
12610                         if (used_ports & port_mask)
12611                                 ret = false;
12612
12613                         used_ports |= port_mask;
12614                         break;
12615                 case INTEL_OUTPUT_DP_MST:
12616                         used_mst_ports |=
12617                                 1 << encoder->port;
12618                         break;
12619                 default:
12620                         break;
12621                 }
12622         }
12623         drm_connector_list_iter_end(&conn_iter);
12624
12625         /* can't mix MST and SST/HDMI on the same port */
12626         if (used_ports & used_mst_ports)
12627                 return false;
12628
12629         return ret;
12630 }
12631
12632 static void
12633 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
12634 {
12635         intel_crtc_copy_color_blobs(crtc_state);
12636 }
12637
12638 static void
12639 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
12640 {
12641         crtc_state->hw.enable = crtc_state->uapi.enable;
12642         crtc_state->hw.active = crtc_state->uapi.active;
12643         crtc_state->hw.mode = crtc_state->uapi.mode;
12644         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
12645         intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
12646 }
12647
12648 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
12649 {
12650         crtc_state->uapi.enable = crtc_state->hw.enable;
12651         crtc_state->uapi.active = crtc_state->hw.active;
12652         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
12653
12654         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
12655
12656         /* copy color blobs to uapi */
12657         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
12658                                   crtc_state->hw.degamma_lut);
12659         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
12660                                   crtc_state->hw.gamma_lut);
12661         drm_property_replace_blob(&crtc_state->uapi.ctm,
12662                                   crtc_state->hw.ctm);
12663 }
12664
12665 static int
12666 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
12667 {
12668         struct drm_i915_private *dev_priv =
12669                 to_i915(crtc_state->uapi.crtc->dev);
12670         struct intel_crtc_state *saved_state;
12671
12672         saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12673         if (!saved_state)
12674                 return -ENOMEM;
12675
12676         /* free the old crtc_state->hw members */
12677         intel_crtc_free_hw_state(crtc_state);
12678
12679         /* FIXME: before the switch to atomic started, a new pipe_config was
12680          * kzalloc'd. Code that depends on any field being zero should be
12681          * fixed, so that the crtc_state can be safely duplicated. For now,
12682          * only fields that are know to not cause problems are preserved. */
12683
12684         saved_state->uapi = crtc_state->uapi;
12685         saved_state->scaler_state = crtc_state->scaler_state;
12686         saved_state->shared_dpll = crtc_state->shared_dpll;
12687         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12688         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12689                sizeof(saved_state->icl_port_dplls));
12690         saved_state->crc_enabled = crtc_state->crc_enabled;
12691         if (IS_G4X(dev_priv) ||
12692             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12693                 saved_state->wm = crtc_state->wm;
12694         /*
12695          * Save the slave bitmask which gets filled for master crtc state during
12696          * slave atomic check call.
12697          */
12698         if (is_trans_port_sync_master(crtc_state))
12699                 saved_state->sync_mode_slaves_mask =
12700                         crtc_state->sync_mode_slaves_mask;
12701
12702         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12703         kfree(saved_state);
12704
12705         intel_crtc_copy_uapi_to_hw_state(crtc_state);
12706
12707         return 0;
12708 }
12709
12710 static int
12711 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12712 {
12713         struct drm_crtc *crtc = pipe_config->uapi.crtc;
12714         struct drm_atomic_state *state = pipe_config->uapi.state;
12715         struct intel_encoder *encoder;
12716         struct drm_connector *connector;
12717         struct drm_connector_state *connector_state;
12718         int base_bpp, ret;
12719         int i;
12720         bool retry = true;
12721
12722         pipe_config->cpu_transcoder =
12723                 (enum transcoder) to_intel_crtc(crtc)->pipe;
12724
12725         /*
12726          * Sanitize sync polarity flags based on requested ones. If neither
12727          * positive or negative polarity is requested, treat this as meaning
12728          * negative polarity.
12729          */
12730         if (!(pipe_config->hw.adjusted_mode.flags &
12731               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12732                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12733
12734         if (!(pipe_config->hw.adjusted_mode.flags &
12735               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12736                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12737
12738         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12739                                         pipe_config);
12740         if (ret)
12741                 return ret;
12742
12743         base_bpp = pipe_config->pipe_bpp;
12744
12745         /*
12746          * Determine the real pipe dimensions. Note that stereo modes can
12747          * increase the actual pipe size due to the frame doubling and
12748          * insertion of additional space for blanks between the frame. This
12749          * is stored in the crtc timings. We use the requested mode to do this
12750          * computation to clearly distinguish it from the adjusted mode, which
12751          * can be changed by the connectors in the below retry loop.
12752          */
12753         drm_mode_get_hv_timing(&pipe_config->hw.mode,
12754                                &pipe_config->pipe_src_w,
12755                                &pipe_config->pipe_src_h);
12756
12757         for_each_new_connector_in_state(state, connector, connector_state, i) {
12758                 if (connector_state->crtc != crtc)
12759                         continue;
12760
12761                 encoder = to_intel_encoder(connector_state->best_encoder);
12762
12763                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12764                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12765                         return -EINVAL;
12766                 }
12767
12768                 /*
12769                  * Determine output_types before calling the .compute_config()
12770                  * hooks so that the hooks can use this information safely.
12771                  */
12772                 if (encoder->compute_output_type)
12773                         pipe_config->output_types |=
12774                                 BIT(encoder->compute_output_type(encoder, pipe_config,
12775                                                                  connector_state));
12776                 else
12777                         pipe_config->output_types |= BIT(encoder->type);
12778         }
12779
12780 encoder_retry:
12781         /* Ensure the port clock defaults are reset when retrying. */
12782         pipe_config->port_clock = 0;
12783         pipe_config->pixel_multiplier = 1;
12784
12785         /* Fill in default crtc timings, allow encoders to overwrite them. */
12786         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
12787                               CRTC_STEREO_DOUBLE);
12788
12789         /* Set the crtc_state defaults for trans_port_sync */
12790         pipe_config->master_transcoder = INVALID_TRANSCODER;
12791         ret = icl_add_sync_mode_crtcs(pipe_config);
12792         if (ret) {
12793                 DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
12794                               ret);
12795                 return ret;
12796         }
12797
12798         /* Pass our mode to the connectors and the CRTC to give them a chance to
12799          * adjust it according to limitations or connector properties, and also
12800          * a chance to reject the mode entirely.
12801          */
12802         for_each_new_connector_in_state(state, connector, connector_state, i) {
12803                 if (connector_state->crtc != crtc)
12804                         continue;
12805
12806                 encoder = to_intel_encoder(connector_state->best_encoder);
12807                 ret = encoder->compute_config(encoder, pipe_config,
12808                                               connector_state);
12809                 if (ret < 0) {
12810                         if (ret != -EDEADLK)
12811                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12812                                               ret);
12813                         return ret;
12814                 }
12815         }
12816
12817         /* Set default port clock if not overwritten by the encoder. Needs to be
12818          * done afterwards in case the encoder adjusts the mode. */
12819         if (!pipe_config->port_clock)
12820                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
12821                         * pipe_config->pixel_multiplier;
12822
12823         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12824         if (ret == -EDEADLK)
12825                 return ret;
12826         if (ret < 0) {
12827                 DRM_DEBUG_KMS("CRTC fixup failed\n");
12828                 return ret;
12829         }
12830
12831         if (ret == RETRY) {
12832                 if (WARN(!retry, "loop in pipe configuration computation\n"))
12833                         return -EINVAL;
12834
12835                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12836                 retry = false;
12837                 goto encoder_retry;
12838         }
12839
12840         /* Dithering seems to not pass-through bits correctly when it should, so
12841          * only enable it on 6bpc panels and when its not a compliance
12842          * test requesting 6bpc video pattern.
12843          */
12844         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12845                 !pipe_config->dither_force_disable;
12846         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12847                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12848
12849         /*
12850          * Make drm_calc_timestamping_constants in
12851          * drm_atomic_helper_update_legacy_modeset_state() happy
12852          */
12853         pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
12854
12855         return 0;
12856 }
12857
12858 bool intel_fuzzy_clock_check(int clock1, int clock2)
12859 {
12860         int diff;
12861
12862         if (clock1 == clock2)
12863                 return true;
12864
12865         if (!clock1 || !clock2)
12866                 return false;
12867
12868         diff = abs(clock1 - clock2);
12869
12870         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12871                 return true;
12872
12873         return false;
12874 }
12875
12876 static bool
12877 intel_compare_m_n(unsigned int m, unsigned int n,
12878                   unsigned int m2, unsigned int n2,
12879                   bool exact)
12880 {
12881         if (m == m2 && n == n2)
12882                 return true;
12883
12884         if (exact || !m || !n || !m2 || !n2)
12885                 return false;
12886
12887         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12888
12889         if (n > n2) {
12890                 while (n > n2) {
12891                         m2 <<= 1;
12892                         n2 <<= 1;
12893                 }
12894         } else if (n < n2) {
12895                 while (n < n2) {
12896                         m <<= 1;
12897                         n <<= 1;
12898                 }
12899         }
12900
12901         if (n != n2)
12902                 return false;
12903
12904         return intel_fuzzy_clock_check(m, m2);
12905 }
12906
12907 static bool
12908 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12909                        const struct intel_link_m_n *m2_n2,
12910                        bool exact)
12911 {
12912         return m_n->tu == m2_n2->tu &&
12913                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12914                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12915                 intel_compare_m_n(m_n->link_m, m_n->link_n,
12916                                   m2_n2->link_m, m2_n2->link_n, exact);
12917 }
12918
12919 static bool
12920 intel_compare_infoframe(const union hdmi_infoframe *a,
12921                         const union hdmi_infoframe *b)
12922 {
12923         return memcmp(a, b, sizeof(*a)) == 0;
12924 }
12925
12926 static void
12927 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12928                                bool fastset, const char *name,
12929                                const union hdmi_infoframe *a,
12930                                const union hdmi_infoframe *b)
12931 {
12932         if (fastset) {
12933                 if ((drm_debug & DRM_UT_KMS) == 0)
12934                         return;
12935
12936                 DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
12937                 DRM_DEBUG_KMS("expected:\n");
12938                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12939                 DRM_DEBUG_KMS("found:\n");
12940                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12941         } else {
12942                 DRM_ERROR("mismatch in %s infoframe\n", name);
12943                 DRM_ERROR("expected:\n");
12944                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12945                 DRM_ERROR("found:\n");
12946                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12947         }
12948 }
12949
12950 static void __printf(4, 5)
12951 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
12952                      const char *name, const char *format, ...)
12953 {
12954         struct va_format vaf;
12955         va_list args;
12956
12957         va_start(args, format);
12958         vaf.fmt = format;
12959         vaf.va = &args;
12960
12961         if (fastset)
12962                 DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
12963                               crtc->base.base.id, crtc->base.name, name, &vaf);
12964         else
12965                 DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
12966                           crtc->base.base.id, crtc->base.name, name, &vaf);
12967
12968         va_end(args);
12969 }
12970
12971 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12972 {
12973         if (i915_modparams.fastboot != -1)
12974                 return i915_modparams.fastboot;
12975
12976         /* Enable fastboot by default on Skylake and newer */
12977         if (INTEL_GEN(dev_priv) >= 9)
12978                 return true;
12979
12980         /* Enable fastboot by default on VLV and CHV */
12981         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12982                 return true;
12983
12984         /* Disabled by default on all others */
12985         return false;
12986 }
12987
12988 static bool
12989 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
12990                           const struct intel_crtc_state *pipe_config,
12991                           bool fastset)
12992 {
12993         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
12994         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12995         bool ret = true;
12996         u32 bp_gamma = 0;
12997         bool fixup_inherited = fastset &&
12998                 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12999                 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
13000
13001         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13002                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
13003                 ret = false;
13004         }
13005
13006 #define PIPE_CONF_CHECK_X(name) do { \
13007         if (current_config->name != pipe_config->name) { \
13008                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13009                                      "(expected 0x%08x, found 0x%08x)", \
13010                                      current_config->name, \
13011                                      pipe_config->name); \
13012                 ret = false; \
13013         } \
13014 } while (0)
13015
13016 #define PIPE_CONF_CHECK_I(name) do { \
13017         if (current_config->name != pipe_config->name) { \
13018                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13019                                      "(expected %i, found %i)", \
13020                                      current_config->name, \
13021                                      pipe_config->name); \
13022                 ret = false; \
13023         } \
13024 } while (0)
13025
13026 #define PIPE_CONF_CHECK_BOOL(name) do { \
13027         if (current_config->name != pipe_config->name) { \
13028                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13029                                      "(expected %s, found %s)", \
13030                                      yesno(current_config->name), \
13031                                      yesno(pipe_config->name)); \
13032                 ret = false; \
13033         } \
13034 } while (0)
13035
13036 /*
13037  * Checks state where we only read out the enabling, but not the entire
13038  * state itself (like full infoframes or ELD for audio). These states
13039  * require a full modeset on bootup to fix up.
13040  */
13041 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13042         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13043                 PIPE_CONF_CHECK_BOOL(name); \
13044         } else { \
13045                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13046                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13047                                      yesno(current_config->name), \
13048                                      yesno(pipe_config->name)); \
13049                 ret = false; \
13050         } \
13051 } while (0)
13052
13053 #define PIPE_CONF_CHECK_P(name) do { \
13054         if (current_config->name != pipe_config->name) { \
13055                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13056                                      "(expected %p, found %p)", \
13057                                      current_config->name, \
13058                                      pipe_config->name); \
13059                 ret = false; \
13060         } \
13061 } while (0)
13062
13063 #define PIPE_CONF_CHECK_M_N(name) do { \
13064         if (!intel_compare_link_m_n(&current_config->name, \
13065                                     &pipe_config->name,\
13066                                     !fastset)) { \
13067                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13068                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13069                                      "found tu %i, gmch %i/%i link %i/%i)", \
13070                                      current_config->name.tu, \
13071                                      current_config->name.gmch_m, \
13072                                      current_config->name.gmch_n, \
13073                                      current_config->name.link_m, \
13074                                      current_config->name.link_n, \
13075                                      pipe_config->name.tu, \
13076                                      pipe_config->name.gmch_m, \
13077                                      pipe_config->name.gmch_n, \
13078                                      pipe_config->name.link_m, \
13079                                      pipe_config->name.link_n); \
13080                 ret = false; \
13081         } \
13082 } while (0)
13083
13084 /* This is required for BDW+ where there is only one set of registers for
13085  * switching between high and low RR.
13086  * This macro can be used whenever a comparison has to be made between one
13087  * hw state and multiple sw state variables.
13088  */
13089 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13090         if (!intel_compare_link_m_n(&current_config->name, \
13091                                     &pipe_config->name, !fastset) && \
13092             !intel_compare_link_m_n(&current_config->alt_name, \
13093                                     &pipe_config->name, !fastset)) { \
13094                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13095                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13096                                      "or tu %i gmch %i/%i link %i/%i, " \
13097                                      "found tu %i, gmch %i/%i link %i/%i)", \
13098                                      current_config->name.tu, \
13099                                      current_config->name.gmch_m, \
13100                                      current_config->name.gmch_n, \
13101                                      current_config->name.link_m, \
13102                                      current_config->name.link_n, \
13103                                      current_config->alt_name.tu, \
13104                                      current_config->alt_name.gmch_m, \
13105                                      current_config->alt_name.gmch_n, \
13106                                      current_config->alt_name.link_m, \
13107                                      current_config->alt_name.link_n, \
13108                                      pipe_config->name.tu, \
13109                                      pipe_config->name.gmch_m, \
13110                                      pipe_config->name.gmch_n, \
13111                                      pipe_config->name.link_m, \
13112                                      pipe_config->name.link_n); \
13113                 ret = false; \
13114         } \
13115 } while (0)
13116
13117 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13118         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13119                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13120                                      "(%x) (expected %i, found %i)", \
13121                                      (mask), \
13122                                      current_config->name & (mask), \
13123                                      pipe_config->name & (mask)); \
13124                 ret = false; \
13125         } \
13126 } while (0)
13127
13128 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13129         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13130                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13131                                      "(expected %i, found %i)", \
13132                                      current_config->name, \
13133                                      pipe_config->name); \
13134                 ret = false; \
13135         } \
13136 } while (0)
13137
13138 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13139         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13140                                      &pipe_config->infoframes.name)) { \
13141                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13142                                                &current_config->infoframes.name, \
13143                                                &pipe_config->infoframes.name); \
13144                 ret = false; \
13145         } \
13146 } while (0)
13147
13148 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13149         if (current_config->name1 != pipe_config->name1) { \
13150                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13151                                 "(expected %i, found %i, won't compare lut values)", \
13152                                 current_config->name1, \
13153                                 pipe_config->name1); \
13154                 ret = false;\
13155         } else { \
13156                 if (!intel_color_lut_equal(current_config->name2, \
13157                                         pipe_config->name2, pipe_config->name1, \
13158                                         bit_precision)) { \
13159                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13160                                         "hw_state doesn't match sw_state"); \
13161                         ret = false; \
13162                 } \
13163         } \
13164 } while (0)
13165
13166 #define PIPE_CONF_QUIRK(quirk) \
13167         ((current_config->quirks | pipe_config->quirks) & (quirk))
13168
13169         PIPE_CONF_CHECK_I(cpu_transcoder);
13170
13171         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13172         PIPE_CONF_CHECK_I(fdi_lanes);
13173         PIPE_CONF_CHECK_M_N(fdi_m_n);
13174
13175         PIPE_CONF_CHECK_I(lane_count);
13176         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13177
13178         if (INTEL_GEN(dev_priv) < 8) {
13179                 PIPE_CONF_CHECK_M_N(dp_m_n);
13180
13181                 if (current_config->has_drrs)
13182                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13183         } else
13184                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13185
13186         PIPE_CONF_CHECK_X(output_types);
13187
13188         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13189         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13190         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13191         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13192         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13193         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13194
13195         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13196         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13197         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13198         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13199         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13200         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13201
13202         PIPE_CONF_CHECK_I(pixel_multiplier);
13203         PIPE_CONF_CHECK_I(output_format);
13204         PIPE_CONF_CHECK_I(dc3co_exitline);
13205         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13206         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13207             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13208                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13209
13210         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13211         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13212         PIPE_CONF_CHECK_BOOL(has_infoframe);
13213         PIPE_CONF_CHECK_BOOL(fec_enable);
13214
13215         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13216
13217         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13218                               DRM_MODE_FLAG_INTERLACE);
13219
13220         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13221                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13222                                       DRM_MODE_FLAG_PHSYNC);
13223                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13224                                       DRM_MODE_FLAG_NHSYNC);
13225                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13226                                       DRM_MODE_FLAG_PVSYNC);
13227                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13228                                       DRM_MODE_FLAG_NVSYNC);
13229         }
13230
13231         PIPE_CONF_CHECK_X(gmch_pfit.control);
13232         /* pfit ratios are autocomputed by the hw on gen4+ */
13233         if (INTEL_GEN(dev_priv) < 4)
13234                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13235         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13236
13237         /*
13238          * Changing the EDP transcoder input mux
13239          * (A_ONOFF vs. A_ON) requires a full modeset.
13240          */
13241         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13242
13243         if (!fastset) {
13244                 PIPE_CONF_CHECK_I(pipe_src_w);
13245                 PIPE_CONF_CHECK_I(pipe_src_h);
13246
13247                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13248                 if (current_config->pch_pfit.enabled) {
13249                         PIPE_CONF_CHECK_X(pch_pfit.pos);
13250                         PIPE_CONF_CHECK_X(pch_pfit.size);
13251                 }
13252
13253                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13254                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13255
13256                 PIPE_CONF_CHECK_X(gamma_mode);
13257                 if (IS_CHERRYVIEW(dev_priv))
13258                         PIPE_CONF_CHECK_X(cgm_mode);
13259                 else
13260                         PIPE_CONF_CHECK_X(csc_mode);
13261                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13262                 PIPE_CONF_CHECK_BOOL(csc_enable);
13263
13264                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13265                 if (bp_gamma)
13266                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13267
13268         }
13269
13270         PIPE_CONF_CHECK_BOOL(double_wide);
13271
13272         PIPE_CONF_CHECK_P(shared_dpll);
13273         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13274         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13275         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13276         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13277         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13278         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13279         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13280         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13281         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13282         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13283         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13284         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13285         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13286         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13287         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13288         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13289         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13290         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13291         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13292         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13293         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13294         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13295         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13296         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13297         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13298         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13299         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13300         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13301         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13302         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13303         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13304
13305         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13306         PIPE_CONF_CHECK_X(dsi_pll.div);
13307
13308         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13309                 PIPE_CONF_CHECK_I(pipe_bpp);
13310
13311         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13312         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13313
13314         PIPE_CONF_CHECK_I(min_voltage_level);
13315
13316         PIPE_CONF_CHECK_X(infoframes.enable);
13317         PIPE_CONF_CHECK_X(infoframes.gcp);
13318         PIPE_CONF_CHECK_INFOFRAME(avi);
13319         PIPE_CONF_CHECK_INFOFRAME(spd);
13320         PIPE_CONF_CHECK_INFOFRAME(hdmi);
13321         PIPE_CONF_CHECK_INFOFRAME(drm);
13322
13323         PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
13324         PIPE_CONF_CHECK_I(master_transcoder);
13325
13326 #undef PIPE_CONF_CHECK_X
13327 #undef PIPE_CONF_CHECK_I
13328 #undef PIPE_CONF_CHECK_BOOL
13329 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13330 #undef PIPE_CONF_CHECK_P
13331 #undef PIPE_CONF_CHECK_FLAGS
13332 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13333 #undef PIPE_CONF_CHECK_COLOR_LUT
13334 #undef PIPE_CONF_QUIRK
13335
13336         return ret;
13337 }
13338
13339 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13340                                            const struct intel_crtc_state *pipe_config)
13341 {
13342         if (pipe_config->has_pch_encoder) {
13343                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13344                                                             &pipe_config->fdi_m_n);
13345                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13346
13347                 /*
13348                  * FDI already provided one idea for the dotclock.
13349                  * Yell if the encoder disagrees.
13350                  */
13351                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13352                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13353                      fdi_dotclock, dotclock);
13354         }
13355 }
13356
13357 static void verify_wm_state(struct intel_crtc *crtc,
13358                             struct intel_crtc_state *new_crtc_state)
13359 {
13360         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13361         struct skl_hw_state {
13362                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13363                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13364                 struct skl_ddb_allocation ddb;
13365                 struct skl_pipe_wm wm;
13366         } *hw;
13367         struct skl_ddb_allocation *sw_ddb;
13368         struct skl_pipe_wm *sw_wm;
13369         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13370         const enum pipe pipe = crtc->pipe;
13371         int plane, level, max_level = ilk_wm_max_level(dev_priv);
13372
13373         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13374                 return;
13375
13376         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13377         if (!hw)
13378                 return;
13379
13380         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13381         sw_wm = &new_crtc_state->wm.skl.optimal;
13382
13383         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13384
13385         skl_ddb_get_hw_state(dev_priv, &hw->ddb);
13386         sw_ddb = &dev_priv->wm.skl_hw.ddb;
13387
13388         if (INTEL_GEN(dev_priv) >= 11 &&
13389             hw->ddb.enabled_slices != sw_ddb->enabled_slices)
13390                 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
13391                           sw_ddb->enabled_slices,
13392                           hw->ddb.enabled_slices);
13393
13394         /* planes */
13395         for_each_universal_plane(dev_priv, pipe, plane) {
13396                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13397
13398                 hw_plane_wm = &hw->wm.planes[plane];
13399                 sw_plane_wm = &sw_wm->planes[plane];
13400
13401                 /* Watermarks */
13402                 for (level = 0; level <= max_level; level++) {
13403                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13404                                                 &sw_plane_wm->wm[level]))
13405                                 continue;
13406
13407                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13408                                   pipe_name(pipe), plane + 1, level,
13409                                   sw_plane_wm->wm[level].plane_en,
13410                                   sw_plane_wm->wm[level].plane_res_b,
13411                                   sw_plane_wm->wm[level].plane_res_l,
13412                                   hw_plane_wm->wm[level].plane_en,
13413                                   hw_plane_wm->wm[level].plane_res_b,
13414                                   hw_plane_wm->wm[level].plane_res_l);
13415                 }
13416
13417                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13418                                          &sw_plane_wm->trans_wm)) {
13419                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13420                                   pipe_name(pipe), plane + 1,
13421                                   sw_plane_wm->trans_wm.plane_en,
13422                                   sw_plane_wm->trans_wm.plane_res_b,
13423                                   sw_plane_wm->trans_wm.plane_res_l,
13424                                   hw_plane_wm->trans_wm.plane_en,
13425                                   hw_plane_wm->trans_wm.plane_res_b,
13426                                   hw_plane_wm->trans_wm.plane_res_l);
13427                 }
13428
13429                 /* DDB */
13430                 hw_ddb_entry = &hw->ddb_y[plane];
13431                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13432
13433                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13434                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13435                                   pipe_name(pipe), plane + 1,
13436                                   sw_ddb_entry->start, sw_ddb_entry->end,
13437                                   hw_ddb_entry->start, hw_ddb_entry->end);
13438                 }
13439         }
13440
13441         /*
13442          * cursor
13443          * If the cursor plane isn't active, we may not have updated it's ddb
13444          * allocation. In that case since the ddb allocation will be updated
13445          * once the plane becomes visible, we can skip this check
13446          */
13447         if (1) {
13448                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13449
13450                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13451                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13452
13453                 /* Watermarks */
13454                 for (level = 0; level <= max_level; level++) {
13455                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13456                                                 &sw_plane_wm->wm[level]))
13457                                 continue;
13458
13459                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13460                                   pipe_name(pipe), level,
13461                                   sw_plane_wm->wm[level].plane_en,
13462                                   sw_plane_wm->wm[level].plane_res_b,
13463                                   sw_plane_wm->wm[level].plane_res_l,
13464                                   hw_plane_wm->wm[level].plane_en,
13465                                   hw_plane_wm->wm[level].plane_res_b,
13466                                   hw_plane_wm->wm[level].plane_res_l);
13467                 }
13468
13469                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13470                                          &sw_plane_wm->trans_wm)) {
13471                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13472                                   pipe_name(pipe),
13473                                   sw_plane_wm->trans_wm.plane_en,
13474                                   sw_plane_wm->trans_wm.plane_res_b,
13475                                   sw_plane_wm->trans_wm.plane_res_l,
13476                                   hw_plane_wm->trans_wm.plane_en,
13477                                   hw_plane_wm->trans_wm.plane_res_b,
13478                                   hw_plane_wm->trans_wm.plane_res_l);
13479                 }
13480
13481                 /* DDB */
13482                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13483                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13484
13485                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13486                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13487                                   pipe_name(pipe),
13488                                   sw_ddb_entry->start, sw_ddb_entry->end,
13489                                   hw_ddb_entry->start, hw_ddb_entry->end);
13490                 }
13491         }
13492
13493         kfree(hw);
13494 }
13495
13496 static void
13497 verify_connector_state(struct intel_atomic_state *state,
13498                        struct intel_crtc *crtc)
13499 {
13500         struct drm_connector *connector;
13501         struct drm_connector_state *new_conn_state;
13502         int i;
13503
13504         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13505                 struct drm_encoder *encoder = connector->encoder;
13506                 struct intel_crtc_state *crtc_state = NULL;
13507
13508                 if (new_conn_state->crtc != &crtc->base)
13509                         continue;
13510
13511                 if (crtc)
13512                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13513
13514                 intel_connector_verify_state(crtc_state, new_conn_state);
13515
13516                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13517                      "connector's atomic encoder doesn't match legacy encoder\n");
13518         }
13519 }
13520
13521 static void
13522 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13523 {
13524         struct intel_encoder *encoder;
13525         struct drm_connector *connector;
13526         struct drm_connector_state *old_conn_state, *new_conn_state;
13527         int i;
13528
13529         for_each_intel_encoder(&dev_priv->drm, encoder) {
13530                 bool enabled = false, found = false;
13531                 enum pipe pipe;
13532
13533                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13534                               encoder->base.base.id,
13535                               encoder->base.name);
13536
13537                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13538                                                    new_conn_state, i) {
13539                         if (old_conn_state->best_encoder == &encoder->base)
13540                                 found = true;
13541
13542                         if (new_conn_state->best_encoder != &encoder->base)
13543                                 continue;
13544                         found = enabled = true;
13545
13546                         I915_STATE_WARN(new_conn_state->crtc !=
13547                                         encoder->base.crtc,
13548                              "connector's crtc doesn't match encoder crtc\n");
13549                 }
13550
13551                 if (!found)
13552                         continue;
13553
13554                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13555                      "encoder's enabled state mismatch "
13556                      "(expected %i, found %i)\n",
13557                      !!encoder->base.crtc, enabled);
13558
13559                 if (!encoder->base.crtc) {
13560                         bool active;
13561
13562                         active = encoder->get_hw_state(encoder, &pipe);
13563                         I915_STATE_WARN(active,
13564                              "encoder detached but still enabled on pipe %c.\n",
13565                              pipe_name(pipe));
13566                 }
13567         }
13568 }
13569
13570 static void
13571 verify_crtc_state(struct intel_crtc *crtc,
13572                   struct intel_crtc_state *old_crtc_state,
13573                   struct intel_crtc_state *new_crtc_state)
13574 {
13575         struct drm_device *dev = crtc->base.dev;
13576         struct drm_i915_private *dev_priv = to_i915(dev);
13577         struct intel_encoder *encoder;
13578         struct intel_crtc_state *pipe_config;
13579         struct drm_atomic_state *state;
13580         bool active;
13581
13582         state = old_crtc_state->uapi.state;
13583         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
13584         intel_crtc_free_hw_state(old_crtc_state);
13585
13586         pipe_config = old_crtc_state;
13587         memset(pipe_config, 0, sizeof(*pipe_config));
13588         pipe_config->uapi.crtc = &crtc->base;
13589         pipe_config->uapi.state = state;
13590
13591         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13592
13593         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13594
13595         /* we keep both pipes enabled on 830 */
13596         if (IS_I830(dev_priv))
13597                 active = new_crtc_state->hw.active;
13598
13599         I915_STATE_WARN(new_crtc_state->hw.active != active,
13600                         "crtc active state doesn't match with hw state "
13601                         "(expected %i, found %i)\n",
13602                         new_crtc_state->hw.active, active);
13603
13604         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
13605                         "transitional active state does not match atomic hw state "
13606                         "(expected %i, found %i)\n",
13607                         new_crtc_state->hw.active, crtc->active);
13608
13609         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13610                 enum pipe pipe;
13611
13612                 active = encoder->get_hw_state(encoder, &pipe);
13613                 I915_STATE_WARN(active != new_crtc_state->hw.active,
13614                                 "[ENCODER:%i] active %i with crtc active %i\n",
13615                                 encoder->base.base.id, active,
13616                                 new_crtc_state->hw.active);
13617
13618                 I915_STATE_WARN(active && crtc->pipe != pipe,
13619                                 "Encoder connected to wrong pipe %c\n",
13620                                 pipe_name(pipe));
13621
13622                 if (active)
13623                         encoder->get_config(encoder, pipe_config);
13624         }
13625
13626         intel_crtc_compute_pixel_rate(pipe_config);
13627
13628         if (!new_crtc_state->hw.active)
13629                 return;
13630
13631         intel_pipe_config_sanity_check(dev_priv, pipe_config);
13632
13633         if (!intel_pipe_config_compare(new_crtc_state,
13634                                        pipe_config, false)) {
13635                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13636                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13637                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13638         }
13639 }
13640
13641 static void
13642 intel_verify_planes(struct intel_atomic_state *state)
13643 {
13644         struct intel_plane *plane;
13645         const struct intel_plane_state *plane_state;
13646         int i;
13647
13648         for_each_new_intel_plane_in_state(state, plane,
13649                                           plane_state, i)
13650                 assert_plane(plane, plane_state->planar_slave ||
13651                              plane_state->uapi.visible);
13652 }
13653
13654 static void
13655 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13656                          struct intel_shared_dpll *pll,
13657                          struct intel_crtc *crtc,
13658                          struct intel_crtc_state *new_crtc_state)
13659 {
13660         struct intel_dpll_hw_state dpll_hw_state;
13661         unsigned int crtc_mask;
13662         bool active;
13663
13664         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13665
13666         DRM_DEBUG_KMS("%s\n", pll->info->name);
13667
13668         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13669
13670         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13671                 I915_STATE_WARN(!pll->on && pll->active_mask,
13672                      "pll in active use but not on in sw tracking\n");
13673                 I915_STATE_WARN(pll->on && !pll->active_mask,
13674                      "pll is on but not used by any active crtc\n");
13675                 I915_STATE_WARN(pll->on != active,
13676                      "pll on state mismatch (expected %i, found %i)\n",
13677                      pll->on, active);
13678         }
13679
13680         if (!crtc) {
13681                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13682                                 "more active pll users than references: %x vs %x\n",
13683                                 pll->active_mask, pll->state.crtc_mask);
13684
13685                 return;
13686         }
13687
13688         crtc_mask = drm_crtc_mask(&crtc->base);
13689
13690         if (new_crtc_state->hw.active)
13691                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13692                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13693                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13694         else
13695                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13696                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13697                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13698
13699         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13700                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13701                         crtc_mask, pll->state.crtc_mask);
13702
13703         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13704                                           &dpll_hw_state,
13705                                           sizeof(dpll_hw_state)),
13706                         "pll hw state mismatch\n");
13707 }
13708
13709 static void
13710 verify_shared_dpll_state(struct intel_crtc *crtc,
13711                          struct intel_crtc_state *old_crtc_state,
13712                          struct intel_crtc_state *new_crtc_state)
13713 {
13714         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13715
13716         if (new_crtc_state->shared_dpll)
13717                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13718
13719         if (old_crtc_state->shared_dpll &&
13720             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13721                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13722                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13723
13724                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13725                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13726                                 pipe_name(drm_crtc_index(&crtc->base)));
13727                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13728                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13729                                 pipe_name(drm_crtc_index(&crtc->base)));
13730         }
13731 }
13732
13733 static void
13734 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13735                           struct intel_atomic_state *state,
13736                           struct intel_crtc_state *old_crtc_state,
13737                           struct intel_crtc_state *new_crtc_state)
13738 {
13739         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13740                 return;
13741
13742         verify_wm_state(crtc, new_crtc_state);
13743         verify_connector_state(state, crtc);
13744         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13745         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13746 }
13747
13748 static void
13749 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13750 {
13751         int i;
13752
13753         for (i = 0; i < dev_priv->num_shared_dpll; i++)
13754                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13755 }
13756
13757 static void
13758 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13759                               struct intel_atomic_state *state)
13760 {
13761         verify_encoder_state(dev_priv, state);
13762         verify_connector_state(state, NULL);
13763         verify_disabled_dpll_state(dev_priv);
13764 }
13765
13766 static void
13767 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13768 {
13769         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13770         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13771         const struct drm_display_mode *adjusted_mode =
13772                 &crtc_state->hw.adjusted_mode;
13773
13774         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13775
13776         /*
13777          * The scanline counter increments at the leading edge of hsync.
13778          *
13779          * On most platforms it starts counting from vtotal-1 on the
13780          * first active line. That means the scanline counter value is
13781          * always one less than what we would expect. Ie. just after
13782          * start of vblank, which also occurs at start of hsync (on the
13783          * last active line), the scanline counter will read vblank_start-1.
13784          *
13785          * On gen2 the scanline counter starts counting from 1 instead
13786          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13787          * to keep the value positive), instead of adding one.
13788          *
13789          * On HSW+ the behaviour of the scanline counter depends on the output
13790          * type. For DP ports it behaves like most other platforms, but on HDMI
13791          * there's an extra 1 line difference. So we need to add two instead of
13792          * one to the value.
13793          *
13794          * On VLV/CHV DSI the scanline counter would appear to increment
13795          * approx. 1/3 of a scanline before start of vblank. Unfortunately
13796          * that means we can't tell whether we're in vblank or not while
13797          * we're on that particular line. We must still set scanline_offset
13798          * to 1 so that the vblank timestamps come out correct when we query
13799          * the scanline counter from within the vblank interrupt handler.
13800          * However if queried just before the start of vblank we'll get an
13801          * answer that's slightly in the future.
13802          */
13803         if (IS_GEN(dev_priv, 2)) {
13804                 int vtotal;
13805
13806                 vtotal = adjusted_mode->crtc_vtotal;
13807                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13808                         vtotal /= 2;
13809
13810                 crtc->scanline_offset = vtotal - 1;
13811         } else if (HAS_DDI(dev_priv) &&
13812                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13813                 crtc->scanline_offset = 2;
13814         } else {
13815                 crtc->scanline_offset = 1;
13816         }
13817 }
13818
13819 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13820 {
13821         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13822         struct intel_crtc_state *new_crtc_state;
13823         struct intel_crtc *crtc;
13824         int i;
13825
13826         if (!dev_priv->display.crtc_compute_clock)
13827                 return;
13828
13829         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13830                 if (!needs_modeset(new_crtc_state))
13831                         continue;
13832
13833                 intel_release_shared_dplls(state, crtc);
13834         }
13835 }
13836
13837 /*
13838  * This implements the workaround described in the "notes" section of the mode
13839  * set sequence documentation. When going from no pipes or single pipe to
13840  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13841  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13842  */
13843 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13844 {
13845         struct intel_crtc_state *crtc_state;
13846         struct intel_crtc *crtc;
13847         struct intel_crtc_state *first_crtc_state = NULL;
13848         struct intel_crtc_state *other_crtc_state = NULL;
13849         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13850         int i;
13851
13852         /* look at all crtc's that are going to be enabled in during modeset */
13853         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13854                 if (!crtc_state->hw.active ||
13855                     !needs_modeset(crtc_state))
13856                         continue;
13857
13858                 if (first_crtc_state) {
13859                         other_crtc_state = crtc_state;
13860                         break;
13861                 } else {
13862                         first_crtc_state = crtc_state;
13863                         first_pipe = crtc->pipe;
13864                 }
13865         }
13866
13867         /* No workaround needed? */
13868         if (!first_crtc_state)
13869                 return 0;
13870
13871         /* w/a possibly needed, check how many crtc's are already enabled. */
13872         for_each_intel_crtc(state->base.dev, crtc) {
13873                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13874                 if (IS_ERR(crtc_state))
13875                         return PTR_ERR(crtc_state);
13876
13877                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13878
13879                 if (!crtc_state->hw.active ||
13880                     needs_modeset(crtc_state))
13881                         continue;
13882
13883                 /* 2 or more enabled crtcs means no need for w/a */
13884                 if (enabled_pipe != INVALID_PIPE)
13885                         return 0;
13886
13887                 enabled_pipe = crtc->pipe;
13888         }
13889
13890         if (enabled_pipe != INVALID_PIPE)
13891                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13892         else if (other_crtc_state)
13893                 other_crtc_state->hsw_workaround_pipe = first_pipe;
13894
13895         return 0;
13896 }
13897
13898 static int intel_modeset_checks(struct intel_atomic_state *state)
13899 {
13900         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13901         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13902         struct intel_crtc *crtc;
13903         int ret, i;
13904
13905         /* keep the current setting */
13906         if (!state->cdclk.force_min_cdclk_changed)
13907                 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13908
13909         state->modeset = true;
13910         state->active_pipes = dev_priv->active_pipes;
13911         state->cdclk.logical = dev_priv->cdclk.logical;
13912         state->cdclk.actual = dev_priv->cdclk.actual;
13913
13914         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13915                                             new_crtc_state, i) {
13916                 if (new_crtc_state->hw.active)
13917                         state->active_pipes |= BIT(crtc->pipe);
13918                 else
13919                         state->active_pipes &= ~BIT(crtc->pipe);
13920
13921                 if (old_crtc_state->hw.active != new_crtc_state->hw.active)
13922                         state->active_pipe_changes |= BIT(crtc->pipe);
13923         }
13924
13925         if (state->active_pipe_changes) {
13926                 ret = intel_atomic_lock_global_state(state);
13927                 if (ret)
13928                         return ret;
13929         }
13930
13931         ret = intel_modeset_calc_cdclk(state);
13932         if (ret)
13933                 return ret;
13934
13935         intel_modeset_clear_plls(state);
13936
13937         if (IS_HASWELL(dev_priv))
13938                 return haswell_mode_set_planes_workaround(state);
13939
13940         return 0;
13941 }
13942
13943 /*
13944  * Handle calculation of various watermark data at the end of the atomic check
13945  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13946  * handlers to ensure that all derived state has been updated.
13947  */
13948 static int calc_watermark_data(struct intel_atomic_state *state)
13949 {
13950         struct drm_device *dev = state->base.dev;
13951         struct drm_i915_private *dev_priv = to_i915(dev);
13952
13953         /* Is there platform-specific watermark information to calculate? */
13954         if (dev_priv->display.compute_global_watermarks)
13955                 return dev_priv->display.compute_global_watermarks(state);
13956
13957         return 0;
13958 }
13959
13960 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
13961                                      struct intel_crtc_state *new_crtc_state)
13962 {
13963         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
13964                 return;
13965
13966         new_crtc_state->uapi.mode_changed = false;
13967         new_crtc_state->update_pipe = true;
13968
13969         /*
13970          * If we're not doing the full modeset we want to
13971          * keep the current M/N values as they may be
13972          * sufficiently different to the computed values
13973          * to cause problems.
13974          *
13975          * FIXME: should really copy more fuzzy state here
13976          */
13977         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
13978         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
13979         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
13980         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
13981 }
13982
13983 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
13984                                           struct intel_crtc *crtc,
13985                                           u8 plane_ids_mask)
13986 {
13987         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13988         struct intel_plane *plane;
13989
13990         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
13991                 struct intel_plane_state *plane_state;
13992
13993                 if ((plane_ids_mask & BIT(plane->id)) == 0)
13994                         continue;
13995
13996                 plane_state = intel_atomic_get_plane_state(state, plane);
13997                 if (IS_ERR(plane_state))
13998                         return PTR_ERR(plane_state);
13999         }
14000
14001         return 0;
14002 }
14003
14004 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14005 {
14006         /* See {hsw,vlv,ivb}_plane_ratio() */
14007         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14008                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14009                 IS_IVYBRIDGE(dev_priv);
14010 }
14011
14012 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14013                                      bool *need_modeset)
14014 {
14015         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14016         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14017         struct intel_plane_state *plane_state;
14018         struct intel_plane *plane;
14019         struct intel_crtc *crtc;
14020         int i, ret;
14021
14022         ret = icl_add_linked_planes(state);
14023         if (ret)
14024                 return ret;
14025
14026         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14027                 ret = intel_plane_atomic_check(state, plane);
14028                 if (ret) {
14029                         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
14030                                          plane->base.base.id, plane->base.name);
14031                         return ret;
14032                 }
14033         }
14034
14035         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14036                                             new_crtc_state, i) {
14037                 u8 old_active_planes, new_active_planes;
14038
14039                 ret = icl_check_nv12_planes(new_crtc_state);
14040                 if (ret)
14041                         return ret;
14042
14043                 /*
14044                  * On some platforms the number of active planes affects
14045                  * the planes' minimum cdclk calculation. Add such planes
14046                  * to the state before we compute the minimum cdclk.
14047                  */
14048                 if (!active_planes_affects_min_cdclk(dev_priv))
14049                         continue;
14050
14051                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14052                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14053
14054                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14055                         continue;
14056
14057                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14058                 if (ret)
14059                         return ret;
14060         }
14061
14062         /*
14063          * active_planes bitmask has been updated, and potentially
14064          * affected planes are part of the state. We can now
14065          * compute the minimum cdclk for each plane.
14066          */
14067         for_each_new_intel_plane_in_state(state, plane, plane_state, i)
14068                 *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
14069
14070         return 0;
14071 }
14072
14073 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14074 {
14075         struct intel_crtc_state *crtc_state;
14076         struct intel_crtc *crtc;
14077         int i;
14078
14079         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14080                 int ret = intel_crtc_atomic_check(state, crtc);
14081                 if (ret) {
14082                         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
14083                                          crtc->base.base.id, crtc->base.name);
14084                         return ret;
14085                 }
14086         }
14087
14088         return 0;
14089 }
14090
14091 /**
14092  * intel_atomic_check - validate state object
14093  * @dev: drm device
14094  * @_state: state to validate
14095  */
14096 static int intel_atomic_check(struct drm_device *dev,
14097                               struct drm_atomic_state *_state)
14098 {
14099         struct drm_i915_private *dev_priv = to_i915(dev);
14100         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14101         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14102         struct intel_crtc *crtc;
14103         int ret, i;
14104         bool any_ms = false;
14105
14106         /* Catch I915_MODE_FLAG_INHERITED */
14107         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14108                                             new_crtc_state, i) {
14109                 if (new_crtc_state->hw.mode.private_flags !=
14110                     old_crtc_state->hw.mode.private_flags)
14111                         new_crtc_state->uapi.mode_changed = true;
14112         }
14113
14114         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14115         if (ret)
14116                 goto fail;
14117
14118         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14119                                             new_crtc_state, i) {
14120                 if (!needs_modeset(new_crtc_state)) {
14121                         /* Light copy */
14122                         intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14123
14124                         continue;
14125                 }
14126
14127                 if (!new_crtc_state->uapi.enable) {
14128                         intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
14129
14130                         any_ms = true;
14131                         continue;
14132                 }
14133
14134                 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14135                 if (ret)
14136                         goto fail;
14137
14138                 ret = intel_modeset_pipe_config(new_crtc_state);
14139                 if (ret)
14140                         goto fail;
14141
14142                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14143
14144                 if (needs_modeset(new_crtc_state))
14145                         any_ms = true;
14146         }
14147
14148         if (any_ms && !check_digital_port_conflicts(state)) {
14149                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
14150                 ret = EINVAL;
14151                 goto fail;
14152         }
14153
14154         ret = drm_dp_mst_atomic_check(&state->base);
14155         if (ret)
14156                 goto fail;
14157
14158         any_ms |= state->cdclk.force_min_cdclk_changed;
14159
14160         ret = intel_atomic_check_planes(state, &any_ms);
14161         if (ret)
14162                 goto fail;
14163
14164         if (any_ms) {
14165                 ret = intel_modeset_checks(state);
14166                 if (ret)
14167                         goto fail;
14168         } else {
14169                 state->cdclk.logical = dev_priv->cdclk.logical;
14170         }
14171
14172         ret = intel_atomic_check_crtcs(state);
14173         if (ret)
14174                 goto fail;
14175
14176         intel_fbc_choose_crtc(dev_priv, state);
14177         ret = calc_watermark_data(state);
14178         if (ret)
14179                 goto fail;
14180
14181         ret = intel_bw_atomic_check(state);
14182         if (ret)
14183                 goto fail;
14184
14185         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14186                                             new_crtc_state, i) {
14187                 if (!needs_modeset(new_crtc_state) &&
14188                     !new_crtc_state->update_pipe)
14189                         continue;
14190
14191                 intel_dump_pipe_config(new_crtc_state, state,
14192                                        needs_modeset(new_crtc_state) ?
14193                                        "[modeset]" : "[fastset]");
14194         }
14195
14196         return 0;
14197
14198  fail:
14199         if (ret == -EDEADLK)
14200                 return ret;
14201
14202         /*
14203          * FIXME would probably be nice to know which crtc specifically
14204          * caused the failure, in cases where we can pinpoint it.
14205          */
14206         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14207                                             new_crtc_state, i)
14208                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14209
14210         return ret;
14211 }
14212
14213 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14214 {
14215         return drm_atomic_helper_prepare_planes(state->base.dev,
14216                                                 &state->base);
14217 }
14218
14219 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14220 {
14221         struct drm_device *dev = crtc->base.dev;
14222         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14223
14224         if (!vblank->max_vblank_count)
14225                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14226
14227         return crtc->base.funcs->get_vblank_counter(&crtc->base);
14228 }
14229
14230 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14231                                   struct intel_crtc_state *crtc_state)
14232 {
14233         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14234
14235         if (!IS_GEN(dev_priv, 2))
14236                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14237
14238         if (crtc_state->has_pch_encoder) {
14239                 enum pipe pch_transcoder =
14240                         intel_crtc_pch_transcoder(crtc);
14241
14242                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14243         }
14244 }
14245
14246 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14247                                const struct intel_crtc_state *new_crtc_state)
14248 {
14249         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14250         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14251
14252         /*
14253          * Update pipe size and adjust fitter if needed: the reason for this is
14254          * that in compute_mode_changes we check the native mode (not the pfit
14255          * mode) to see if we can flip rather than do a full mode set. In the
14256          * fastboot case, we'll flip, but if we don't update the pipesrc and
14257          * pfit state, we'll end up with a big fb scanned out into the wrong
14258          * sized surface.
14259          */
14260         intel_set_pipe_src_size(new_crtc_state);
14261
14262         /* on skylake this is done by detaching scalers */
14263         if (INTEL_GEN(dev_priv) >= 9) {
14264                 skl_detach_scalers(new_crtc_state);
14265
14266                 if (new_crtc_state->pch_pfit.enabled)
14267                         skylake_pfit_enable(new_crtc_state);
14268         } else if (HAS_PCH_SPLIT(dev_priv)) {
14269                 if (new_crtc_state->pch_pfit.enabled)
14270                         ironlake_pfit_enable(new_crtc_state);
14271                 else if (old_crtc_state->pch_pfit.enabled)
14272                         ironlake_pfit_disable(old_crtc_state);
14273         }
14274
14275         if (INTEL_GEN(dev_priv) >= 11)
14276                 icl_set_pipe_chicken(crtc);
14277 }
14278
14279 static void commit_pipe_config(struct intel_atomic_state *state,
14280                                struct intel_crtc_state *old_crtc_state,
14281                                struct intel_crtc_state *new_crtc_state)
14282 {
14283         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14284         bool modeset = needs_modeset(new_crtc_state);
14285
14286         /*
14287          * During modesets pipe configuration was programmed as the
14288          * CRTC was enabled.
14289          */
14290         if (!modeset) {
14291                 if (new_crtc_state->uapi.color_mgmt_changed ||
14292                     new_crtc_state->update_pipe)
14293                         intel_color_commit(new_crtc_state);
14294
14295                 if (INTEL_GEN(dev_priv) >= 9)
14296                         skl_detach_scalers(new_crtc_state);
14297
14298                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14299                         bdw_set_pipemisc(new_crtc_state);
14300
14301                 if (new_crtc_state->update_pipe)
14302                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
14303         }
14304
14305         if (dev_priv->display.atomic_update_watermarks)
14306                 dev_priv->display.atomic_update_watermarks(state,
14307                                                            new_crtc_state);
14308 }
14309
14310 static void intel_update_crtc(struct intel_crtc *crtc,
14311                               struct intel_atomic_state *state,
14312                               struct intel_crtc_state *old_crtc_state,
14313                               struct intel_crtc_state *new_crtc_state)
14314 {
14315         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14316         bool modeset = needs_modeset(new_crtc_state);
14317         struct intel_plane_state *new_plane_state =
14318                 intel_atomic_get_new_plane_state(state,
14319                                                  to_intel_plane(crtc->base.primary));
14320
14321         if (modeset) {
14322                 intel_crtc_update_active_timings(new_crtc_state);
14323
14324                 dev_priv->display.crtc_enable(new_crtc_state, state);
14325
14326                 /* vblanks work again, re-enable pipe CRC. */
14327                 intel_crtc_enable_pipe_crc(crtc);
14328         } else {
14329                 if (new_crtc_state->preload_luts &&
14330                     (new_crtc_state->uapi.color_mgmt_changed ||
14331                      new_crtc_state->update_pipe))
14332                         intel_color_load_luts(new_crtc_state);
14333
14334                 intel_pre_plane_update(old_crtc_state, new_crtc_state);
14335
14336                 if (new_crtc_state->update_pipe)
14337                         intel_encoders_update_pipe(crtc, new_crtc_state, state);
14338         }
14339
14340         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14341                 intel_fbc_disable(crtc);
14342         else if (new_plane_state)
14343                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14344
14345         /* Perform vblank evasion around commit operation */
14346         intel_pipe_update_start(new_crtc_state);
14347
14348         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14349
14350         if (INTEL_GEN(dev_priv) >= 9)
14351                 skl_update_planes_on_crtc(state, crtc);
14352         else
14353                 i9xx_update_planes_on_crtc(state, crtc);
14354
14355         intel_pipe_update_end(new_crtc_state);
14356
14357         /*
14358          * We usually enable FIFO underrun interrupts as part of the
14359          * CRTC enable sequence during modesets.  But when we inherit a
14360          * valid pipe configuration from the BIOS we need to take care
14361          * of enabling them on the CRTC's first fastset.
14362          */
14363         if (new_crtc_state->update_pipe && !modeset &&
14364             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14365                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14366 }
14367
14368 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
14369 {
14370         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
14371         enum transcoder slave_transcoder;
14372
14373         WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
14374
14375         slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
14376         return intel_get_crtc_for_pipe(dev_priv,
14377                                        (enum pipe)slave_transcoder);
14378 }
14379
14380 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14381                                           struct intel_crtc_state *old_crtc_state,
14382                                           struct intel_crtc_state *new_crtc_state,
14383                                           struct intel_crtc *crtc)
14384 {
14385         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14386
14387         intel_crtc_disable_planes(state, crtc);
14388
14389         /*
14390          * We need to disable pipe CRC before disabling the pipe,
14391          * or we race against vblank off.
14392          */
14393         intel_crtc_disable_pipe_crc(crtc);
14394
14395         dev_priv->display.crtc_disable(old_crtc_state, state);
14396         crtc->active = false;
14397         intel_fbc_disable(crtc);
14398         intel_disable_shared_dpll(old_crtc_state);
14399
14400         /*
14401          * Underruns don't always raise interrupts,
14402          * so check manually.
14403          */
14404         intel_check_cpu_fifo_underruns(dev_priv);
14405         intel_check_pch_fifo_underruns(dev_priv);
14406
14407         /* FIXME unify this for all platforms */
14408         if (!new_crtc_state->hw.active &&
14409             !HAS_GMCH(dev_priv) &&
14410             dev_priv->display.initial_watermarks)
14411                 dev_priv->display.initial_watermarks(state,
14412                                                      new_crtc_state);
14413 }
14414
14415 static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
14416                                                    struct intel_crtc *crtc,
14417                                                    struct intel_crtc_state *old_crtc_state,
14418                                                    struct intel_crtc_state *new_crtc_state)
14419 {
14420         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14421         struct intel_crtc_state *new_slave_crtc_state =
14422                 intel_atomic_get_new_crtc_state(state, slave_crtc);
14423         struct intel_crtc_state *old_slave_crtc_state =
14424                 intel_atomic_get_old_crtc_state(state, slave_crtc);
14425
14426         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14427                 !old_slave_crtc_state);
14428
14429         /* Disable Slave first */
14430         intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
14431         if (old_slave_crtc_state->hw.active)
14432                 intel_old_crtc_state_disables(state,
14433                                               old_slave_crtc_state,
14434                                               new_slave_crtc_state,
14435                                               slave_crtc);
14436
14437         /* Disable Master */
14438         intel_pre_plane_update(old_crtc_state, new_crtc_state);
14439         if (old_crtc_state->hw.active)
14440                 intel_old_crtc_state_disables(state,
14441                                               old_crtc_state,
14442                                               new_crtc_state,
14443                                               crtc);
14444 }
14445
14446 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14447 {
14448         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14449         struct intel_crtc *crtc;
14450         int i;
14451
14452         /*
14453          * Disable CRTC/pipes in reverse order because some features(MST in
14454          * TGL+) requires master and slave relationship between pipes, so it
14455          * should always pick the lowest pipe as master as it will be enabled
14456          * first and disable in the reverse order so the master will be the
14457          * last one to be disabled.
14458          */
14459         for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
14460                                                     new_crtc_state, i) {
14461                 if (!needs_modeset(new_crtc_state))
14462                         continue;
14463
14464                 /* In case of Transcoder port Sync master slave CRTCs can be
14465                  * assigned in any order and we need to make sure that
14466                  * slave CRTCs are disabled first and then master CRTC since
14467                  * Slave vblanks are masked till Master Vblanks.
14468                  */
14469                 if (is_trans_port_sync_mode(new_crtc_state)) {
14470                         if (is_trans_port_sync_master(new_crtc_state))
14471                                 intel_trans_port_sync_modeset_disables(state,
14472                                                                        crtc,
14473                                                                        old_crtc_state,
14474                                                                        new_crtc_state);
14475                         else
14476                                 continue;
14477                 } else {
14478                         intel_pre_plane_update(old_crtc_state, new_crtc_state);
14479
14480                         if (old_crtc_state->hw.active)
14481                                 intel_old_crtc_state_disables(state,
14482                                                               old_crtc_state,
14483                                                               new_crtc_state,
14484                                                               crtc);
14485                 }
14486         }
14487 }
14488
14489 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
14490 {
14491         struct intel_crtc *crtc;
14492         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14493         int i;
14494
14495         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14496                 if (!new_crtc_state->hw.active)
14497                         continue;
14498
14499                 intel_update_crtc(crtc, state, old_crtc_state,
14500                                   new_crtc_state);
14501         }
14502 }
14503
14504 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
14505                                               struct intel_atomic_state *state,
14506                                               struct intel_crtc_state *new_crtc_state)
14507 {
14508         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14509
14510         intel_crtc_update_active_timings(new_crtc_state);
14511         dev_priv->display.crtc_enable(new_crtc_state, state);
14512         intel_crtc_enable_pipe_crc(crtc);
14513 }
14514
14515 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
14516                                        struct intel_atomic_state *state)
14517 {
14518         struct drm_connector *uninitialized_var(conn);
14519         struct drm_connector_state *conn_state;
14520         struct intel_dp *intel_dp;
14521         int i;
14522
14523         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
14524                 if (conn_state->crtc == &crtc->base)
14525                         break;
14526         }
14527         intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
14528         intel_dp_stop_link_train(intel_dp);
14529 }
14530
14531 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
14532                                            struct intel_atomic_state *state)
14533 {
14534         struct intel_crtc_state *new_crtc_state =
14535                 intel_atomic_get_new_crtc_state(state, crtc);
14536         struct intel_crtc_state *old_crtc_state =
14537                 intel_atomic_get_old_crtc_state(state, crtc);
14538         struct intel_plane_state *new_plane_state =
14539                 intel_atomic_get_new_plane_state(state,
14540                                                  to_intel_plane(crtc->base.primary));
14541         bool modeset = needs_modeset(new_crtc_state);
14542
14543         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14544                 intel_fbc_disable(crtc);
14545         else if (new_plane_state)
14546                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14547
14548         /* Perform vblank evasion around commit operation */
14549         intel_pipe_update_start(new_crtc_state);
14550         commit_pipe_config(state, old_crtc_state, new_crtc_state);
14551         skl_update_planes_on_crtc(state, crtc);
14552         intel_pipe_update_end(new_crtc_state);
14553
14554         /*
14555          * We usually enable FIFO underrun interrupts as part of the
14556          * CRTC enable sequence during modesets.  But when we inherit a
14557          * valid pipe configuration from the BIOS we need to take care
14558          * of enabling them on the CRTC's first fastset.
14559          */
14560         if (new_crtc_state->update_pipe && !modeset &&
14561             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
14562                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14563 }
14564
14565 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
14566                                                struct intel_atomic_state *state,
14567                                                struct intel_crtc_state *old_crtc_state,
14568                                                struct intel_crtc_state *new_crtc_state)
14569 {
14570         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14571         struct intel_crtc_state *new_slave_crtc_state =
14572                 intel_atomic_get_new_crtc_state(state, slave_crtc);
14573         struct intel_crtc_state *old_slave_crtc_state =
14574                 intel_atomic_get_old_crtc_state(state, slave_crtc);
14575
14576         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14577                 !old_slave_crtc_state);
14578
14579         DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
14580                       crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
14581                       slave_crtc->base.name);
14582
14583         /* Enable seq for slave with with DP_TP_CTL left Idle until the
14584          * master is ready
14585          */
14586         intel_crtc_enable_trans_port_sync(slave_crtc,
14587                                           state,
14588                                           new_slave_crtc_state);
14589
14590         /* Enable seq for master with with DP_TP_CTL left Idle */
14591         intel_crtc_enable_trans_port_sync(crtc,
14592                                           state,
14593                                           new_crtc_state);
14594
14595         /* Set Slave's DP_TP_CTL to Normal */
14596         intel_set_dp_tp_ctl_normal(slave_crtc,
14597                                    state);
14598
14599         /* Set Master's DP_TP_CTL To Normal */
14600         usleep_range(200, 400);
14601         intel_set_dp_tp_ctl_normal(crtc,
14602                                    state);
14603
14604         /* Now do the post crtc enable for all master and slaves */
14605         intel_post_crtc_enable_updates(slave_crtc,
14606                                        state);
14607         intel_post_crtc_enable_updates(crtc,
14608                                        state);
14609 }
14610
14611 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
14612 {
14613         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14614         struct intel_crtc *crtc;
14615         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14616         unsigned int updated = 0;
14617         bool progress;
14618         int i;
14619         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
14620         u8 required_slices = state->wm_results.ddb.enabled_slices;
14621         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
14622
14623         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
14624                 /* ignore allocations for crtc's that have been turned off. */
14625                 if (new_crtc_state->hw.active)
14626                         entries[i] = old_crtc_state->wm.skl.ddb;
14627
14628         /* If 2nd DBuf slice required, enable it here */
14629         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
14630                 icl_dbuf_slices_update(dev_priv, required_slices);
14631
14632         /*
14633          * Whenever the number of active pipes changes, we need to make sure we
14634          * update the pipes in the right order so that their ddb allocations
14635          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
14636          * cause pipe underruns and other bad stuff.
14637          */
14638         do {
14639                 progress = false;
14640
14641                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14642                         enum pipe pipe = crtc->pipe;
14643                         bool vbl_wait = false;
14644                         bool modeset = needs_modeset(new_crtc_state);
14645
14646                         if (updated & BIT(crtc->pipe) || !new_crtc_state->hw.active)
14647                                 continue;
14648
14649                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
14650                                                         entries,
14651                                                         INTEL_NUM_PIPES(dev_priv), i))
14652                                 continue;
14653
14654                         updated |= BIT(pipe);
14655                         entries[i] = new_crtc_state->wm.skl.ddb;
14656
14657                         /*
14658                          * If this is an already active pipe, it's DDB changed,
14659                          * and this isn't the last pipe that needs updating
14660                          * then we need to wait for a vblank to pass for the
14661                          * new ddb allocation to take effect.
14662                          */
14663                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
14664                                                  &old_crtc_state->wm.skl.ddb) &&
14665                             !modeset &&
14666                             state->wm_results.dirty_pipes != updated)
14667                                 vbl_wait = true;
14668
14669                         if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
14670                                 if (is_trans_port_sync_master(new_crtc_state))
14671                                         intel_update_trans_port_sync_crtcs(crtc,
14672                                                                            state,
14673                                                                            old_crtc_state,
14674                                                                            new_crtc_state);
14675                                 else
14676                                         continue;
14677                         } else {
14678                                 intel_update_crtc(crtc, state, old_crtc_state,
14679                                                   new_crtc_state);
14680                         }
14681
14682                         if (vbl_wait)
14683                                 intel_wait_for_vblank(dev_priv, pipe);
14684
14685                         progress = true;
14686                 }
14687         } while (progress);
14688
14689         /* If 2nd DBuf slice is no more required disable it */
14690         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
14691                 icl_dbuf_slices_update(dev_priv, required_slices);
14692 }
14693
14694 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14695 {
14696         struct intel_atomic_state *state, *next;
14697         struct llist_node *freed;
14698
14699         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14700         llist_for_each_entry_safe(state, next, freed, freed)
14701                 drm_atomic_state_put(&state->base);
14702 }
14703
14704 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14705 {
14706         struct drm_i915_private *dev_priv =
14707                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14708
14709         intel_atomic_helper_free_state(dev_priv);
14710 }
14711
14712 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
14713 {
14714         struct wait_queue_entry wait_fence, wait_reset;
14715         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
14716
14717         init_wait_entry(&wait_fence, 0);
14718         init_wait_entry(&wait_reset, 0);
14719         for (;;) {
14720                 prepare_to_wait(&intel_state->commit_ready.wait,
14721                                 &wait_fence, TASK_UNINTERRUPTIBLE);
14722                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14723                                               I915_RESET_MODESET),
14724                                 &wait_reset, TASK_UNINTERRUPTIBLE);
14725
14726
14727                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
14728                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
14729                         break;
14730
14731                 schedule();
14732         }
14733         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
14734         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14735                                   I915_RESET_MODESET),
14736                     &wait_reset);
14737 }
14738
14739 static void intel_atomic_cleanup_work(struct work_struct *work)
14740 {
14741         struct drm_atomic_state *state =
14742                 container_of(work, struct drm_atomic_state, commit_work);
14743         struct drm_i915_private *i915 = to_i915(state->dev);
14744
14745         drm_atomic_helper_cleanup_planes(&i915->drm, state);
14746         drm_atomic_helper_commit_cleanup_done(state);
14747         drm_atomic_state_put(state);
14748
14749         intel_atomic_helper_free_state(i915);
14750 }
14751
14752 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
14753 {
14754         struct drm_device *dev = state->base.dev;
14755         struct drm_i915_private *dev_priv = to_i915(dev);
14756         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14757         struct intel_crtc *crtc;
14758         u64 put_domains[I915_MAX_PIPES] = {};
14759         intel_wakeref_t wakeref = 0;
14760         int i;
14761
14762         intel_atomic_commit_fence_wait(state);
14763
14764         drm_atomic_helper_wait_for_dependencies(&state->base);
14765
14766         if (state->modeset)
14767                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
14768
14769         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14770                                             new_crtc_state, i) {
14771                 if (needs_modeset(new_crtc_state) ||
14772                     new_crtc_state->update_pipe) {
14773
14774                         put_domains[crtc->pipe] =
14775                                 modeset_get_crtc_power_domains(new_crtc_state);
14776                 }
14777         }
14778
14779         intel_commit_modeset_disables(state);
14780
14781         /* FIXME: Eventually get rid of our crtc->config pointer */
14782         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14783                 crtc->config = new_crtc_state;
14784
14785         if (state->modeset) {
14786                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
14787
14788                 intel_set_cdclk_pre_plane_update(dev_priv,
14789                                                  &state->cdclk.actual,
14790                                                  &dev_priv->cdclk.actual,
14791                                                  state->cdclk.pipe);
14792
14793                 /*
14794                  * SKL workaround: bspec recommends we disable the SAGV when we
14795                  * have more then one pipe enabled
14796                  */
14797                 if (!intel_can_enable_sagv(state))
14798                         intel_disable_sagv(dev_priv);
14799
14800                 intel_modeset_verify_disabled(dev_priv, state);
14801         }
14802
14803         /* Complete the events for pipes that have now been disabled */
14804         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14805                 bool modeset = needs_modeset(new_crtc_state);
14806
14807                 /* Complete events for now disable pipes here. */
14808                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
14809                         spin_lock_irq(&dev->event_lock);
14810                         drm_crtc_send_vblank_event(&crtc->base,
14811                                                    new_crtc_state->uapi.event);
14812                         spin_unlock_irq(&dev->event_lock);
14813
14814                         new_crtc_state->uapi.event = NULL;
14815                 }
14816         }
14817
14818         if (state->modeset)
14819                 intel_encoders_update_prepare(state);
14820
14821         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14822         dev_priv->display.commit_modeset_enables(state);
14823
14824         if (state->modeset) {
14825                 intel_encoders_update_complete(state);
14826
14827                 intel_set_cdclk_post_plane_update(dev_priv,
14828                                                   &state->cdclk.actual,
14829                                                   &dev_priv->cdclk.actual,
14830                                                   state->cdclk.pipe);
14831         }
14832
14833         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14834          * already, but still need the state for the delayed optimization. To
14835          * fix this:
14836          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14837          * - schedule that vblank worker _before_ calling hw_done
14838          * - at the start of commit_tail, cancel it _synchrously
14839          * - switch over to the vblank wait helper in the core after that since
14840          *   we don't need out special handling any more.
14841          */
14842         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14843
14844         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14845                 if (new_crtc_state->hw.active &&
14846                     !needs_modeset(new_crtc_state) &&
14847                     !new_crtc_state->preload_luts &&
14848                     (new_crtc_state->uapi.color_mgmt_changed ||
14849                      new_crtc_state->update_pipe))
14850                         intel_color_load_luts(new_crtc_state);
14851         }
14852
14853         /*
14854          * Now that the vblank has passed, we can go ahead and program the
14855          * optimal watermarks on platforms that need two-step watermark
14856          * programming.
14857          *
14858          * TODO: Move this (and other cleanup) to an async worker eventually.
14859          */
14860         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14861                 if (dev_priv->display.optimize_watermarks)
14862                         dev_priv->display.optimize_watermarks(state,
14863                                                               new_crtc_state);
14864         }
14865
14866         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14867                 intel_post_plane_update(old_crtc_state);
14868
14869                 if (put_domains[i])
14870                         modeset_put_power_domains(dev_priv, put_domains[i]);
14871
14872                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14873         }
14874
14875         if (state->modeset)
14876                 intel_verify_planes(state);
14877
14878         if (state->modeset && intel_can_enable_sagv(state))
14879                 intel_enable_sagv(dev_priv);
14880
14881         drm_atomic_helper_commit_hw_done(&state->base);
14882
14883         if (state->modeset) {
14884                 /* As one of the primary mmio accessors, KMS has a high
14885                  * likelihood of triggering bugs in unclaimed access. After we
14886                  * finish modesetting, see if an error has been flagged, and if
14887                  * so enable debugging for the next modeset - and hope we catch
14888                  * the culprit.
14889                  */
14890                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
14891                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
14892         }
14893         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14894
14895         /*
14896          * Defer the cleanup of the old state to a separate worker to not
14897          * impede the current task (userspace for blocking modesets) that
14898          * are executed inline. For out-of-line asynchronous modesets/flips,
14899          * deferring to a new worker seems overkill, but we would place a
14900          * schedule point (cond_resched()) here anyway to keep latencies
14901          * down.
14902          */
14903         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
14904         queue_work(system_highpri_wq, &state->base.commit_work);
14905 }
14906
14907 static void intel_atomic_commit_work(struct work_struct *work)
14908 {
14909         struct intel_atomic_state *state =
14910                 container_of(work, struct intel_atomic_state, base.commit_work);
14911
14912         intel_atomic_commit_tail(state);
14913 }
14914
14915 static int __i915_sw_fence_call
14916 intel_atomic_commit_ready(struct i915_sw_fence *fence,
14917                           enum i915_sw_fence_notify notify)
14918 {
14919         struct intel_atomic_state *state =
14920                 container_of(fence, struct intel_atomic_state, commit_ready);
14921
14922         switch (notify) {
14923         case FENCE_COMPLETE:
14924                 /* we do blocking waits in the worker, nothing to do here */
14925                 break;
14926         case FENCE_FREE:
14927                 {
14928                         struct intel_atomic_helper *helper =
14929                                 &to_i915(state->base.dev)->atomic_helper;
14930
14931                         if (llist_add(&state->freed, &helper->free_list))
14932                                 schedule_work(&helper->free_work);
14933                         break;
14934                 }
14935         }
14936
14937         return NOTIFY_DONE;
14938 }
14939
14940 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
14941 {
14942         struct intel_plane_state *old_plane_state, *new_plane_state;
14943         struct intel_plane *plane;
14944         int i;
14945
14946         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14947                                              new_plane_state, i)
14948                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
14949                                         to_intel_frontbuffer(new_plane_state->hw.fb),
14950                                         plane->frontbuffer_bit);
14951 }
14952
14953 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
14954 {
14955         struct intel_crtc *crtc;
14956
14957         for_each_intel_crtc(&dev_priv->drm, crtc)
14958                 drm_modeset_lock_assert_held(&crtc->base.mutex);
14959 }
14960
14961 static int intel_atomic_commit(struct drm_device *dev,
14962                                struct drm_atomic_state *_state,
14963                                bool nonblock)
14964 {
14965         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14966         struct drm_i915_private *dev_priv = to_i915(dev);
14967         int ret = 0;
14968
14969         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
14970
14971         drm_atomic_state_get(&state->base);
14972         i915_sw_fence_init(&state->commit_ready,
14973                            intel_atomic_commit_ready);
14974
14975         /*
14976          * The intel_legacy_cursor_update() fast path takes care
14977          * of avoiding the vblank waits for simple cursor
14978          * movement and flips. For cursor on/off and size changes,
14979          * we want to perform the vblank waits so that watermark
14980          * updates happen during the correct frames. Gen9+ have
14981          * double buffered watermarks and so shouldn't need this.
14982          *
14983          * Unset state->legacy_cursor_update before the call to
14984          * drm_atomic_helper_setup_commit() because otherwise
14985          * drm_atomic_helper_wait_for_flip_done() is a noop and
14986          * we get FIFO underruns because we didn't wait
14987          * for vblank.
14988          *
14989          * FIXME doing watermarks and fb cleanup from a vblank worker
14990          * (assuming we had any) would solve these problems.
14991          */
14992         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
14993                 struct intel_crtc_state *new_crtc_state;
14994                 struct intel_crtc *crtc;
14995                 int i;
14996
14997                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14998                         if (new_crtc_state->wm.need_postvbl_update ||
14999                             new_crtc_state->update_wm_post)
15000                                 state->base.legacy_cursor_update = false;
15001         }
15002
15003         ret = intel_atomic_prepare_commit(state);
15004         if (ret) {
15005                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
15006                 i915_sw_fence_commit(&state->commit_ready);
15007                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15008                 return ret;
15009         }
15010
15011         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15012         if (!ret)
15013                 ret = drm_atomic_helper_swap_state(&state->base, true);
15014
15015         if (ret) {
15016                 i915_sw_fence_commit(&state->commit_ready);
15017
15018                 drm_atomic_helper_cleanup_planes(dev, &state->base);
15019                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15020                 return ret;
15021         }
15022         dev_priv->wm.distrust_bios_wm = false;
15023         intel_shared_dpll_swap_state(state);
15024         intel_atomic_track_fbs(state);
15025
15026         if (state->global_state_changed) {
15027                 assert_global_state_locked(dev_priv);
15028
15029                 memcpy(dev_priv->min_cdclk, state->min_cdclk,
15030                        sizeof(state->min_cdclk));
15031                 memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
15032                        sizeof(state->min_voltage_level));
15033                 dev_priv->active_pipes = state->active_pipes;
15034                 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
15035
15036                 intel_cdclk_swap_state(state);
15037         }
15038
15039         drm_atomic_state_get(&state->base);
15040         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15041
15042         i915_sw_fence_commit(&state->commit_ready);
15043         if (nonblock && state->modeset) {
15044                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15045         } else if (nonblock) {
15046                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15047         } else {
15048                 if (state->modeset)
15049                         flush_workqueue(dev_priv->modeset_wq);
15050                 intel_atomic_commit_tail(state);
15051         }
15052
15053         return 0;
15054 }
15055
15056 struct wait_rps_boost {
15057         struct wait_queue_entry wait;
15058
15059         struct drm_crtc *crtc;
15060         struct i915_request *request;
15061 };
15062
15063 static int do_rps_boost(struct wait_queue_entry *_wait,
15064                         unsigned mode, int sync, void *key)
15065 {
15066         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15067         struct i915_request *rq = wait->request;
15068
15069         /*
15070          * If we missed the vblank, but the request is already running it
15071          * is reasonable to assume that it will complete before the next
15072          * vblank without our intervention, so leave RPS alone.
15073          */
15074         if (!i915_request_started(rq))
15075                 intel_rps_boost(rq);
15076         i915_request_put(rq);
15077
15078         drm_crtc_vblank_put(wait->crtc);
15079
15080         list_del(&wait->wait.entry);
15081         kfree(wait);
15082         return 1;
15083 }
15084
15085 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15086                                        struct dma_fence *fence)
15087 {
15088         struct wait_rps_boost *wait;
15089
15090         if (!dma_fence_is_i915(fence))
15091                 return;
15092
15093         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15094                 return;
15095
15096         if (drm_crtc_vblank_get(crtc))
15097                 return;
15098
15099         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15100         if (!wait) {
15101                 drm_crtc_vblank_put(crtc);
15102                 return;
15103         }
15104
15105         wait->request = to_request(dma_fence_get(fence));
15106         wait->crtc = crtc;
15107
15108         wait->wait.func = do_rps_boost;
15109         wait->wait.flags = 0;
15110
15111         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15112 }
15113
15114 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15115 {
15116         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15117         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15118         struct drm_framebuffer *fb = plane_state->hw.fb;
15119         struct i915_vma *vma;
15120
15121         if (plane->id == PLANE_CURSOR &&
15122             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15123                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15124                 const int align = intel_cursor_alignment(dev_priv);
15125                 int err;
15126
15127                 err = i915_gem_object_attach_phys(obj, align);
15128                 if (err)
15129                         return err;
15130         }
15131
15132         vma = intel_pin_and_fence_fb_obj(fb,
15133                                          &plane_state->view,
15134                                          intel_plane_uses_fence(plane_state),
15135                                          &plane_state->flags);
15136         if (IS_ERR(vma))
15137                 return PTR_ERR(vma);
15138
15139         plane_state->vma = vma;
15140
15141         return 0;
15142 }
15143
15144 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15145 {
15146         struct i915_vma *vma;
15147
15148         vma = fetch_and_zero(&old_plane_state->vma);
15149         if (vma)
15150                 intel_unpin_fb_vma(vma, old_plane_state->flags);
15151 }
15152
15153 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15154 {
15155         struct i915_sched_attr attr = {
15156                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15157         };
15158
15159         i915_gem_object_wait_priority(obj, 0, &attr);
15160 }
15161
15162 /**
15163  * intel_prepare_plane_fb - Prepare fb for usage on plane
15164  * @plane: drm plane to prepare for
15165  * @_new_plane_state: the plane state being prepared
15166  *
15167  * Prepares a framebuffer for usage on a display plane.  Generally this
15168  * involves pinning the underlying object and updating the frontbuffer tracking
15169  * bits.  Some older platforms need special physical address handling for
15170  * cursor planes.
15171  *
15172  * Returns 0 on success, negative error code on failure.
15173  */
15174 int
15175 intel_prepare_plane_fb(struct drm_plane *plane,
15176                        struct drm_plane_state *_new_plane_state)
15177 {
15178         struct intel_plane_state *new_plane_state =
15179                 to_intel_plane_state(_new_plane_state);
15180         struct intel_atomic_state *intel_state =
15181                 to_intel_atomic_state(new_plane_state->uapi.state);
15182         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15183         struct drm_framebuffer *fb = new_plane_state->hw.fb;
15184         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15185         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
15186         int ret;
15187
15188         if (old_obj) {
15189                 struct intel_crtc_state *crtc_state =
15190                         intel_atomic_get_new_crtc_state(intel_state,
15191                                                         to_intel_crtc(plane->state->crtc));
15192
15193                 /* Big Hammer, we also need to ensure that any pending
15194                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15195                  * current scanout is retired before unpinning the old
15196                  * framebuffer. Note that we rely on userspace rendering
15197                  * into the buffer attached to the pipe they are waiting
15198                  * on. If not, userspace generates a GPU hang with IPEHR
15199                  * point to the MI_WAIT_FOR_EVENT.
15200                  *
15201                  * This should only fail upon a hung GPU, in which case we
15202                  * can safely continue.
15203                  */
15204                 if (needs_modeset(crtc_state)) {
15205                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15206                                                               old_obj->base.resv, NULL,
15207                                                               false, 0,
15208                                                               GFP_KERNEL);
15209                         if (ret < 0)
15210                                 return ret;
15211                 }
15212         }
15213
15214         if (new_plane_state->uapi.fence) { /* explicit fencing */
15215                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
15216                                                     new_plane_state->uapi.fence,
15217                                                     I915_FENCE_TIMEOUT,
15218                                                     GFP_KERNEL);
15219                 if (ret < 0)
15220                         return ret;
15221         }
15222
15223         if (!obj)
15224                 return 0;
15225
15226         ret = i915_gem_object_pin_pages(obj);
15227         if (ret)
15228                 return ret;
15229
15230         ret = intel_plane_pin_fb(new_plane_state);
15231
15232         i915_gem_object_unpin_pages(obj);
15233         if (ret)
15234                 return ret;
15235
15236         fb_obj_bump_render_priority(obj);
15237         intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
15238
15239         if (!new_plane_state->uapi.fence) { /* implicit fencing */
15240                 struct dma_fence *fence;
15241
15242                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15243                                                       obj->base.resv, NULL,
15244                                                       false, I915_FENCE_TIMEOUT,
15245                                                       GFP_KERNEL);
15246                 if (ret < 0)
15247                         return ret;
15248
15249                 fence = dma_resv_get_excl_rcu(obj->base.resv);
15250                 if (fence) {
15251                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15252                                                    fence);
15253                         dma_fence_put(fence);
15254                 }
15255         } else {
15256                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15257                                            new_plane_state->uapi.fence);
15258         }
15259
15260         /*
15261          * We declare pageflips to be interactive and so merit a small bias
15262          * towards upclocking to deliver the frame on time. By only changing
15263          * the RPS thresholds to sample more regularly and aim for higher
15264          * clocks we can hopefully deliver low power workloads (like kodi)
15265          * that are not quite steady state without resorting to forcing
15266          * maximum clocks following a vblank miss (see do_rps_boost()).
15267          */
15268         if (!intel_state->rps_interactive) {
15269                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15270                 intel_state->rps_interactive = true;
15271         }
15272
15273         return 0;
15274 }
15275
15276 /**
15277  * intel_cleanup_plane_fb - Cleans up an fb after plane use
15278  * @plane: drm plane to clean up for
15279  * @_old_plane_state: the state from the previous modeset
15280  *
15281  * Cleans up a framebuffer that has just been removed from a plane.
15282  */
15283 void
15284 intel_cleanup_plane_fb(struct drm_plane *plane,
15285                        struct drm_plane_state *_old_plane_state)
15286 {
15287         struct intel_plane_state *old_plane_state =
15288                 to_intel_plane_state(_old_plane_state);
15289         struct intel_atomic_state *intel_state =
15290                 to_intel_atomic_state(old_plane_state->uapi.state);
15291         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15292
15293         if (intel_state->rps_interactive) {
15294                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15295                 intel_state->rps_interactive = false;
15296         }
15297
15298         /* Should only be called after a successful intel_prepare_plane_fb()! */
15299         intel_plane_unpin_fb(old_plane_state);
15300 }
15301
15302 /**
15303  * intel_plane_destroy - destroy a plane
15304  * @plane: plane to destroy
15305  *
15306  * Common destruction function for all types of planes (primary, cursor,
15307  * sprite).
15308  */
15309 void intel_plane_destroy(struct drm_plane *plane)
15310 {
15311         drm_plane_cleanup(plane);
15312         kfree(to_intel_plane(plane));
15313 }
15314
15315 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15316                                             u32 format, u64 modifier)
15317 {
15318         switch (modifier) {
15319         case DRM_FORMAT_MOD_LINEAR:
15320         case I915_FORMAT_MOD_X_TILED:
15321                 break;
15322         default:
15323                 return false;
15324         }
15325
15326         switch (format) {
15327         case DRM_FORMAT_C8:
15328         case DRM_FORMAT_RGB565:
15329         case DRM_FORMAT_XRGB1555:
15330         case DRM_FORMAT_XRGB8888:
15331                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15332                         modifier == I915_FORMAT_MOD_X_TILED;
15333         default:
15334                 return false;
15335         }
15336 }
15337
15338 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
15339                                             u32 format, u64 modifier)
15340 {
15341         switch (modifier) {
15342         case DRM_FORMAT_MOD_LINEAR:
15343         case I915_FORMAT_MOD_X_TILED:
15344                 break;
15345         default:
15346                 return false;
15347         }
15348
15349         switch (format) {
15350         case DRM_FORMAT_C8:
15351         case DRM_FORMAT_RGB565:
15352         case DRM_FORMAT_XRGB8888:
15353         case DRM_FORMAT_XBGR8888:
15354         case DRM_FORMAT_ARGB8888:
15355         case DRM_FORMAT_ABGR8888:
15356         case DRM_FORMAT_XRGB2101010:
15357         case DRM_FORMAT_XBGR2101010:
15358         case DRM_FORMAT_ARGB2101010:
15359         case DRM_FORMAT_ABGR2101010:
15360         case DRM_FORMAT_XBGR16161616F:
15361                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15362                         modifier == I915_FORMAT_MOD_X_TILED;
15363         default:
15364                 return false;
15365         }
15366 }
15367
15368 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
15369                                               u32 format, u64 modifier)
15370 {
15371         return modifier == DRM_FORMAT_MOD_LINEAR &&
15372                 format == DRM_FORMAT_ARGB8888;
15373 }
15374
15375 static const struct drm_plane_funcs i965_plane_funcs = {
15376         .update_plane = drm_atomic_helper_update_plane,
15377         .disable_plane = drm_atomic_helper_disable_plane,
15378         .destroy = intel_plane_destroy,
15379         .atomic_duplicate_state = intel_plane_duplicate_state,
15380         .atomic_destroy_state = intel_plane_destroy_state,
15381         .format_mod_supported = i965_plane_format_mod_supported,
15382 };
15383
15384 static const struct drm_plane_funcs i8xx_plane_funcs = {
15385         .update_plane = drm_atomic_helper_update_plane,
15386         .disable_plane = drm_atomic_helper_disable_plane,
15387         .destroy = intel_plane_destroy,
15388         .atomic_duplicate_state = intel_plane_duplicate_state,
15389         .atomic_destroy_state = intel_plane_destroy_state,
15390         .format_mod_supported = i8xx_plane_format_mod_supported,
15391 };
15392
15393 static int
15394 intel_legacy_cursor_update(struct drm_plane *_plane,
15395                            struct drm_crtc *_crtc,
15396                            struct drm_framebuffer *fb,
15397                            int crtc_x, int crtc_y,
15398                            unsigned int crtc_w, unsigned int crtc_h,
15399                            u32 src_x, u32 src_y,
15400                            u32 src_w, u32 src_h,
15401                            struct drm_modeset_acquire_ctx *ctx)
15402 {
15403         struct intel_plane *plane = to_intel_plane(_plane);
15404         struct intel_crtc *crtc = to_intel_crtc(_crtc);
15405         struct intel_plane_state *old_plane_state =
15406                 to_intel_plane_state(plane->base.state);
15407         struct intel_plane_state *new_plane_state;
15408         struct intel_crtc_state *crtc_state =
15409                 to_intel_crtc_state(crtc->base.state);
15410         struct intel_crtc_state *new_crtc_state;
15411         int ret;
15412
15413         /*
15414          * When crtc is inactive or there is a modeset pending,
15415          * wait for it to complete in the slowpath
15416          */
15417         if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
15418             crtc_state->update_pipe)
15419                 goto slow;
15420
15421         /*
15422          * Don't do an async update if there is an outstanding commit modifying
15423          * the plane.  This prevents our async update's changes from getting
15424          * overridden by a previous synchronous update's state.
15425          */
15426         if (old_plane_state->uapi.commit &&
15427             !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
15428                 goto slow;
15429
15430         /*
15431          * If any parameters change that may affect watermarks,
15432          * take the slowpath. Only changing fb or position should be
15433          * in the fastpath.
15434          */
15435         if (old_plane_state->uapi.crtc != &crtc->base ||
15436             old_plane_state->uapi.src_w != src_w ||
15437             old_plane_state->uapi.src_h != src_h ||
15438             old_plane_state->uapi.crtc_w != crtc_w ||
15439             old_plane_state->uapi.crtc_h != crtc_h ||
15440             !old_plane_state->uapi.fb != !fb)
15441                 goto slow;
15442
15443         new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
15444         if (!new_plane_state)
15445                 return -ENOMEM;
15446
15447         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
15448         if (!new_crtc_state) {
15449                 ret = -ENOMEM;
15450                 goto out_free;
15451         }
15452
15453         drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
15454
15455         new_plane_state->uapi.src_x = src_x;
15456         new_plane_state->uapi.src_y = src_y;
15457         new_plane_state->uapi.src_w = src_w;
15458         new_plane_state->uapi.src_h = src_h;
15459         new_plane_state->uapi.crtc_x = crtc_x;
15460         new_plane_state->uapi.crtc_y = crtc_y;
15461         new_plane_state->uapi.crtc_w = crtc_w;
15462         new_plane_state->uapi.crtc_h = crtc_h;
15463
15464         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
15465                                                   old_plane_state, new_plane_state);
15466         if (ret)
15467                 goto out_free;
15468
15469         ret = intel_plane_pin_fb(new_plane_state);
15470         if (ret)
15471                 goto out_free;
15472
15473         intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
15474                                 ORIGIN_FLIP);
15475         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15476                                 to_intel_frontbuffer(new_plane_state->hw.fb),
15477                                 plane->frontbuffer_bit);
15478
15479         /* Swap plane state */
15480         plane->base.state = &new_plane_state->uapi;
15481
15482         /*
15483          * We cannot swap crtc_state as it may be in use by an atomic commit or
15484          * page flip that's running simultaneously. If we swap crtc_state and
15485          * destroy the old state, we will cause a use-after-free there.
15486          *
15487          * Only update active_planes, which is needed for our internal
15488          * bookkeeping. Either value will do the right thing when updating
15489          * planes atomically. If the cursor was part of the atomic update then
15490          * we would have taken the slowpath.
15491          */
15492         crtc_state->active_planes = new_crtc_state->active_planes;
15493
15494         if (new_plane_state->uapi.visible)
15495                 intel_update_plane(plane, crtc_state, new_plane_state);
15496         else
15497                 intel_disable_plane(plane, crtc_state);
15498
15499         intel_plane_unpin_fb(old_plane_state);
15500
15501 out_free:
15502         if (new_crtc_state)
15503                 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
15504         if (ret)
15505                 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
15506         else
15507                 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
15508         return ret;
15509
15510 slow:
15511         return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
15512                                               crtc_x, crtc_y, crtc_w, crtc_h,
15513                                               src_x, src_y, src_w, src_h, ctx);
15514 }
15515
15516 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
15517         .update_plane = intel_legacy_cursor_update,
15518         .disable_plane = drm_atomic_helper_disable_plane,
15519         .destroy = intel_plane_destroy,
15520         .atomic_duplicate_state = intel_plane_duplicate_state,
15521         .atomic_destroy_state = intel_plane_destroy_state,
15522         .format_mod_supported = intel_cursor_format_mod_supported,
15523 };
15524
15525 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
15526                                enum i9xx_plane_id i9xx_plane)
15527 {
15528         if (!HAS_FBC(dev_priv))
15529                 return false;
15530
15531         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15532                 return i9xx_plane == PLANE_A; /* tied to pipe A */
15533         else if (IS_IVYBRIDGE(dev_priv))
15534                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
15535                         i9xx_plane == PLANE_C;
15536         else if (INTEL_GEN(dev_priv) >= 4)
15537                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
15538         else
15539                 return i9xx_plane == PLANE_A;
15540 }
15541
15542 static struct intel_plane *
15543 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
15544 {
15545         struct intel_plane *plane;
15546         const struct drm_plane_funcs *plane_funcs;
15547         unsigned int supported_rotations;
15548         unsigned int possible_crtcs;
15549         const u32 *formats;
15550         int num_formats;
15551         int ret, zpos;
15552
15553         if (INTEL_GEN(dev_priv) >= 9)
15554                 return skl_universal_plane_create(dev_priv, pipe,
15555                                                   PLANE_PRIMARY);
15556
15557         plane = intel_plane_alloc();
15558         if (IS_ERR(plane))
15559                 return plane;
15560
15561         plane->pipe = pipe;
15562         /*
15563          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
15564          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
15565          */
15566         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
15567                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
15568         else
15569                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
15570         plane->id = PLANE_PRIMARY;
15571         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
15572
15573         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
15574         if (plane->has_fbc) {
15575                 struct intel_fbc *fbc = &dev_priv->fbc;
15576
15577                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
15578         }
15579
15580         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15581                 formats = vlv_primary_formats;
15582                 num_formats = ARRAY_SIZE(vlv_primary_formats);
15583         } else if (INTEL_GEN(dev_priv) >= 4) {
15584                 /*
15585                  * WaFP16GammaEnabling:ivb
15586                  * "Workaround : When using the 64-bit format, the plane
15587                  *  output on each color channel has one quarter amplitude.
15588                  *  It can be brought up to full amplitude by using pipe
15589                  *  gamma correction or pipe color space conversion to
15590                  *  multiply the plane output by four."
15591                  *
15592                  * There is no dedicated plane gamma for the primary plane,
15593                  * and using the pipe gamma/csc could conflict with other
15594                  * planes, so we choose not to expose fp16 on IVB primary
15595                  * planes. HSW primary planes no longer have this problem.
15596                  */
15597                 if (IS_IVYBRIDGE(dev_priv)) {
15598                         formats = ivb_primary_formats;
15599                         num_formats = ARRAY_SIZE(ivb_primary_formats);
15600                 } else {
15601                         formats = i965_primary_formats;
15602                         num_formats = ARRAY_SIZE(i965_primary_formats);
15603                 }
15604         } else {
15605                 formats = i8xx_primary_formats;
15606                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
15607         }
15608
15609         if (INTEL_GEN(dev_priv) >= 4)
15610                 plane_funcs = &i965_plane_funcs;
15611         else
15612                 plane_funcs = &i8xx_plane_funcs;
15613
15614         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15615                 plane->min_cdclk = vlv_plane_min_cdclk;
15616         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15617                 plane->min_cdclk = hsw_plane_min_cdclk;
15618         else if (IS_IVYBRIDGE(dev_priv))
15619                 plane->min_cdclk = ivb_plane_min_cdclk;
15620         else
15621                 plane->min_cdclk = i9xx_plane_min_cdclk;
15622
15623         plane->max_stride = i9xx_plane_max_stride;
15624         plane->update_plane = i9xx_update_plane;
15625         plane->disable_plane = i9xx_disable_plane;
15626         plane->get_hw_state = i9xx_plane_get_hw_state;
15627         plane->check_plane = i9xx_plane_check;
15628
15629         possible_crtcs = BIT(pipe);
15630
15631         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
15632                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15633                                                possible_crtcs, plane_funcs,
15634                                                formats, num_formats,
15635                                                i9xx_format_modifiers,
15636                                                DRM_PLANE_TYPE_PRIMARY,
15637                                                "primary %c", pipe_name(pipe));
15638         else
15639                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15640                                                possible_crtcs, plane_funcs,
15641                                                formats, num_formats,
15642                                                i9xx_format_modifiers,
15643                                                DRM_PLANE_TYPE_PRIMARY,
15644                                                "plane %c",
15645                                                plane_name(plane->i9xx_plane));
15646         if (ret)
15647                 goto fail;
15648
15649         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
15650                 supported_rotations =
15651                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
15652                         DRM_MODE_REFLECT_X;
15653         } else if (INTEL_GEN(dev_priv) >= 4) {
15654                 supported_rotations =
15655                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
15656         } else {
15657                 supported_rotations = DRM_MODE_ROTATE_0;
15658         }
15659
15660         if (INTEL_GEN(dev_priv) >= 4)
15661                 drm_plane_create_rotation_property(&plane->base,
15662                                                    DRM_MODE_ROTATE_0,
15663                                                    supported_rotations);
15664
15665         zpos = 0;
15666         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
15667
15668         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
15669
15670         return plane;
15671
15672 fail:
15673         intel_plane_free(plane);
15674
15675         return ERR_PTR(ret);
15676 }
15677
15678 static struct intel_plane *
15679 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
15680                           enum pipe pipe)
15681 {
15682         unsigned int possible_crtcs;
15683         struct intel_plane *cursor;
15684         int ret, zpos;
15685
15686         cursor = intel_plane_alloc();
15687         if (IS_ERR(cursor))
15688                 return cursor;
15689
15690         cursor->pipe = pipe;
15691         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
15692         cursor->id = PLANE_CURSOR;
15693         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
15694
15695         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15696                 cursor->max_stride = i845_cursor_max_stride;
15697                 cursor->update_plane = i845_update_cursor;
15698                 cursor->disable_plane = i845_disable_cursor;
15699                 cursor->get_hw_state = i845_cursor_get_hw_state;
15700                 cursor->check_plane = i845_check_cursor;
15701         } else {
15702                 cursor->max_stride = i9xx_cursor_max_stride;
15703                 cursor->update_plane = i9xx_update_cursor;
15704                 cursor->disable_plane = i9xx_disable_cursor;
15705                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
15706                 cursor->check_plane = i9xx_check_cursor;
15707         }
15708
15709         cursor->cursor.base = ~0;
15710         cursor->cursor.cntl = ~0;
15711
15712         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
15713                 cursor->cursor.size = ~0;
15714
15715         possible_crtcs = BIT(pipe);
15716
15717         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
15718                                        possible_crtcs, &intel_cursor_plane_funcs,
15719                                        intel_cursor_formats,
15720                                        ARRAY_SIZE(intel_cursor_formats),
15721                                        cursor_format_modifiers,
15722                                        DRM_PLANE_TYPE_CURSOR,
15723                                        "cursor %c", pipe_name(pipe));
15724         if (ret)
15725                 goto fail;
15726
15727         if (INTEL_GEN(dev_priv) >= 4)
15728                 drm_plane_create_rotation_property(&cursor->base,
15729                                                    DRM_MODE_ROTATE_0,
15730                                                    DRM_MODE_ROTATE_0 |
15731                                                    DRM_MODE_ROTATE_180);
15732
15733         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
15734         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
15735
15736         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15737
15738         return cursor;
15739
15740 fail:
15741         intel_plane_free(cursor);
15742
15743         return ERR_PTR(ret);
15744 }
15745
15746 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
15747                                     struct intel_crtc_state *crtc_state)
15748 {
15749         struct intel_crtc_scaler_state *scaler_state =
15750                 &crtc_state->scaler_state;
15751         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15752         int i;
15753
15754         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
15755         if (!crtc->num_scalers)
15756                 return;
15757
15758         for (i = 0; i < crtc->num_scalers; i++) {
15759                 struct intel_scaler *scaler = &scaler_state->scalers[i];
15760
15761                 scaler->in_use = 0;
15762                 scaler->mode = 0;
15763         }
15764
15765         scaler_state->scaler_id = -1;
15766 }
15767
15768 #define INTEL_CRTC_FUNCS \
15769         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
15770         .set_config = drm_atomic_helper_set_config, \
15771         .destroy = intel_crtc_destroy, \
15772         .page_flip = drm_atomic_helper_page_flip, \
15773         .atomic_duplicate_state = intel_crtc_duplicate_state, \
15774         .atomic_destroy_state = intel_crtc_destroy_state, \
15775         .set_crc_source = intel_crtc_set_crc_source, \
15776         .verify_crc_source = intel_crtc_verify_crc_source, \
15777         .get_crc_sources = intel_crtc_get_crc_sources
15778
15779 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15780         INTEL_CRTC_FUNCS,
15781
15782         .get_vblank_counter = g4x_get_vblank_counter,
15783         .enable_vblank = bdw_enable_vblank,
15784         .disable_vblank = bdw_disable_vblank,
15785 };
15786
15787 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15788         INTEL_CRTC_FUNCS,
15789
15790         .get_vblank_counter = g4x_get_vblank_counter,
15791         .enable_vblank = ilk_enable_vblank,
15792         .disable_vblank = ilk_disable_vblank,
15793 };
15794
15795 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15796         INTEL_CRTC_FUNCS,
15797
15798         .get_vblank_counter = g4x_get_vblank_counter,
15799         .enable_vblank = i965_enable_vblank,
15800         .disable_vblank = i965_disable_vblank,
15801 };
15802
15803 static const struct drm_crtc_funcs i965_crtc_funcs = {
15804         INTEL_CRTC_FUNCS,
15805
15806         .get_vblank_counter = i915_get_vblank_counter,
15807         .enable_vblank = i965_enable_vblank,
15808         .disable_vblank = i965_disable_vblank,
15809 };
15810
15811 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15812         INTEL_CRTC_FUNCS,
15813
15814         .get_vblank_counter = i915_get_vblank_counter,
15815         .enable_vblank = i915gm_enable_vblank,
15816         .disable_vblank = i915gm_disable_vblank,
15817 };
15818
15819 static const struct drm_crtc_funcs i915_crtc_funcs = {
15820         INTEL_CRTC_FUNCS,
15821
15822         .get_vblank_counter = i915_get_vblank_counter,
15823         .enable_vblank = i8xx_enable_vblank,
15824         .disable_vblank = i8xx_disable_vblank,
15825 };
15826
15827 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15828         INTEL_CRTC_FUNCS,
15829
15830         /* no hw vblank counter */
15831         .enable_vblank = i8xx_enable_vblank,
15832         .disable_vblank = i8xx_disable_vblank,
15833 };
15834
15835 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15836 {
15837         const struct drm_crtc_funcs *funcs;
15838         struct intel_crtc *intel_crtc;
15839         struct intel_crtc_state *crtc_state = NULL;
15840         struct intel_plane *primary = NULL;
15841         struct intel_plane *cursor = NULL;
15842         int sprite, ret;
15843
15844         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
15845         if (!intel_crtc)
15846                 return -ENOMEM;
15847
15848         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
15849         if (!crtc_state) {
15850                 ret = -ENOMEM;
15851                 goto fail;
15852         }
15853         __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->uapi);
15854         intel_crtc->config = crtc_state;
15855
15856         primary = intel_primary_plane_create(dev_priv, pipe);
15857         if (IS_ERR(primary)) {
15858                 ret = PTR_ERR(primary);
15859                 goto fail;
15860         }
15861         intel_crtc->plane_ids_mask |= BIT(primary->id);
15862
15863         for_each_sprite(dev_priv, pipe, sprite) {
15864                 struct intel_plane *plane;
15865
15866                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15867                 if (IS_ERR(plane)) {
15868                         ret = PTR_ERR(plane);
15869                         goto fail;
15870                 }
15871                 intel_crtc->plane_ids_mask |= BIT(plane->id);
15872         }
15873
15874         cursor = intel_cursor_plane_create(dev_priv, pipe);
15875         if (IS_ERR(cursor)) {
15876                 ret = PTR_ERR(cursor);
15877                 goto fail;
15878         }
15879         intel_crtc->plane_ids_mask |= BIT(cursor->id);
15880
15881         if (HAS_GMCH(dev_priv)) {
15882                 if (IS_CHERRYVIEW(dev_priv) ||
15883                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15884                         funcs = &g4x_crtc_funcs;
15885                 else if (IS_GEN(dev_priv, 4))
15886                         funcs = &i965_crtc_funcs;
15887                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
15888                         funcs = &i915gm_crtc_funcs;
15889                 else if (IS_GEN(dev_priv, 3))
15890                         funcs = &i915_crtc_funcs;
15891                 else
15892                         funcs = &i8xx_crtc_funcs;
15893         } else {
15894                 if (INTEL_GEN(dev_priv) >= 8)
15895                         funcs = &bdw_crtc_funcs;
15896                 else
15897                         funcs = &ilk_crtc_funcs;
15898         }
15899
15900         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
15901                                         &primary->base, &cursor->base,
15902                                         funcs, "pipe %c", pipe_name(pipe));
15903         if (ret)
15904                 goto fail;
15905
15906         intel_crtc->pipe = pipe;
15907
15908         /* initialize shared scalers */
15909         intel_crtc_init_scalers(intel_crtc, crtc_state);
15910
15911         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15912                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15913         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
15914
15915         if (INTEL_GEN(dev_priv) < 9) {
15916                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15917
15918                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15919                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15920                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
15921         }
15922
15923         intel_color_init(intel_crtc);
15924
15925         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15926
15927         return 0;
15928
15929 fail:
15930         /*
15931          * drm_mode_config_cleanup() will free up any
15932          * crtcs/planes already initialized.
15933          */
15934         kfree(crtc_state);
15935         kfree(intel_crtc);
15936
15937         return ret;
15938 }
15939
15940 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15941                                       struct drm_file *file)
15942 {
15943         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15944         struct drm_crtc *drmmode_crtc;
15945         struct intel_crtc *crtc;
15946
15947         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15948         if (!drmmode_crtc)
15949                 return -ENOENT;
15950
15951         crtc = to_intel_crtc(drmmode_crtc);
15952         pipe_from_crtc_id->pipe = crtc->pipe;
15953
15954         return 0;
15955 }
15956
15957 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
15958 {
15959         struct drm_device *dev = encoder->base.dev;
15960         struct intel_encoder *source_encoder;
15961         u32 possible_clones = 0;
15962
15963         for_each_intel_encoder(dev, source_encoder) {
15964                 if (encoders_cloneable(encoder, source_encoder))
15965                         possible_clones |= drm_encoder_mask(&source_encoder->base);
15966         }
15967
15968         return possible_clones;
15969 }
15970
15971 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
15972 {
15973         struct drm_device *dev = encoder->base.dev;
15974         struct intel_crtc *crtc;
15975         u32 possible_crtcs = 0;
15976
15977         for_each_intel_crtc(dev, crtc) {
15978                 if (encoder->pipe_mask & BIT(crtc->pipe))
15979                         possible_crtcs |= drm_crtc_mask(&crtc->base);
15980         }
15981
15982         return possible_crtcs;
15983 }
15984
15985 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
15986 {
15987         if (!IS_MOBILE(dev_priv))
15988                 return false;
15989
15990         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
15991                 return false;
15992
15993         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
15994                 return false;
15995
15996         return true;
15997 }
15998
15999 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16000 {
16001         if (INTEL_GEN(dev_priv) >= 9)
16002                 return false;
16003
16004         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16005                 return false;
16006
16007         if (HAS_PCH_LPT_H(dev_priv) &&
16008             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16009                 return false;
16010
16011         /* DDI E can't be used if DDI A requires 4 lanes */
16012         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16013                 return false;
16014
16015         if (!dev_priv->vbt.int_crt_support)
16016                 return false;
16017
16018         return true;
16019 }
16020
16021 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16022 {
16023         int pps_num;
16024         int pps_idx;
16025
16026         if (HAS_DDI(dev_priv))
16027                 return;
16028         /*
16029          * This w/a is needed at least on CPT/PPT, but to be sure apply it
16030          * everywhere where registers can be write protected.
16031          */
16032         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16033                 pps_num = 2;
16034         else
16035                 pps_num = 1;
16036
16037         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16038                 u32 val = I915_READ(PP_CONTROL(pps_idx));
16039
16040                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16041                 I915_WRITE(PP_CONTROL(pps_idx), val);
16042         }
16043 }
16044
16045 static void intel_pps_init(struct drm_i915_private *dev_priv)
16046 {
16047         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16048                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16049         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16050                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16051         else
16052                 dev_priv->pps_mmio_base = PPS_BASE;
16053
16054         intel_pps_unlock_regs_wa(dev_priv);
16055 }
16056
16057 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16058 {
16059         struct intel_encoder *encoder;
16060         bool dpd_is_edp = false;
16061
16062         intel_pps_init(dev_priv);
16063
16064         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16065                 return;
16066
16067         if (INTEL_GEN(dev_priv) >= 12) {
16068                 intel_ddi_init(dev_priv, PORT_A);
16069                 intel_ddi_init(dev_priv, PORT_B);
16070                 intel_ddi_init(dev_priv, PORT_D);
16071                 intel_ddi_init(dev_priv, PORT_E);
16072                 intel_ddi_init(dev_priv, PORT_F);
16073                 intel_ddi_init(dev_priv, PORT_G);
16074                 intel_ddi_init(dev_priv, PORT_H);
16075                 intel_ddi_init(dev_priv, PORT_I);
16076                 icl_dsi_init(dev_priv);
16077         } else if (IS_ELKHARTLAKE(dev_priv)) {
16078                 intel_ddi_init(dev_priv, PORT_A);
16079                 intel_ddi_init(dev_priv, PORT_B);
16080                 intel_ddi_init(dev_priv, PORT_C);
16081                 intel_ddi_init(dev_priv, PORT_D);
16082                 icl_dsi_init(dev_priv);
16083         } else if (IS_GEN(dev_priv, 11)) {
16084                 intel_ddi_init(dev_priv, PORT_A);
16085                 intel_ddi_init(dev_priv, PORT_B);
16086                 intel_ddi_init(dev_priv, PORT_C);
16087                 intel_ddi_init(dev_priv, PORT_D);
16088                 intel_ddi_init(dev_priv, PORT_E);
16089                 /*
16090                  * On some ICL SKUs port F is not present. No strap bits for
16091                  * this, so rely on VBT.
16092                  * Work around broken VBTs on SKUs known to have no port F.
16093                  */
16094                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16095                     intel_bios_is_port_present(dev_priv, PORT_F))
16096                         intel_ddi_init(dev_priv, PORT_F);
16097
16098                 icl_dsi_init(dev_priv);
16099         } else if (IS_GEN9_LP(dev_priv)) {
16100                 /*
16101                  * FIXME: Broxton doesn't support port detection via the
16102                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16103                  * detect the ports.
16104                  */
16105                 intel_ddi_init(dev_priv, PORT_A);
16106                 intel_ddi_init(dev_priv, PORT_B);
16107                 intel_ddi_init(dev_priv, PORT_C);
16108
16109                 vlv_dsi_init(dev_priv);
16110         } else if (HAS_DDI(dev_priv)) {
16111                 int found;
16112
16113                 if (intel_ddi_crt_present(dev_priv))
16114                         intel_crt_init(dev_priv);
16115
16116                 /*
16117                  * Haswell uses DDI functions to detect digital outputs.
16118                  * On SKL pre-D0 the strap isn't connected, so we assume
16119                  * it's there.
16120                  */
16121                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16122                 /* WaIgnoreDDIAStrap: skl */
16123                 if (found || IS_GEN9_BC(dev_priv))
16124                         intel_ddi_init(dev_priv, PORT_A);
16125
16126                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16127                  * register */
16128                 found = I915_READ(SFUSE_STRAP);
16129
16130                 if (found & SFUSE_STRAP_DDIB_DETECTED)
16131                         intel_ddi_init(dev_priv, PORT_B);
16132                 if (found & SFUSE_STRAP_DDIC_DETECTED)
16133                         intel_ddi_init(dev_priv, PORT_C);
16134                 if (found & SFUSE_STRAP_DDID_DETECTED)
16135                         intel_ddi_init(dev_priv, PORT_D);
16136                 if (found & SFUSE_STRAP_DDIF_DETECTED)
16137                         intel_ddi_init(dev_priv, PORT_F);
16138                 /*
16139                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16140                  */
16141                 if (IS_GEN9_BC(dev_priv) &&
16142                     intel_bios_is_port_present(dev_priv, PORT_E))
16143                         intel_ddi_init(dev_priv, PORT_E);
16144
16145         } else if (HAS_PCH_SPLIT(dev_priv)) {
16146                 int found;
16147
16148                 /*
16149                  * intel_edp_init_connector() depends on this completing first,
16150                  * to prevent the registration of both eDP and LVDS and the
16151                  * incorrect sharing of the PPS.
16152                  */
16153                 intel_lvds_init(dev_priv);
16154                 intel_crt_init(dev_priv);
16155
16156                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16157
16158                 if (ilk_has_edp_a(dev_priv))
16159                         intel_dp_init(dev_priv, DP_A, PORT_A);
16160
16161                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
16162                         /* PCH SDVOB multiplex with HDMIB */
16163                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16164                         if (!found)
16165                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16166                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
16167                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16168                 }
16169
16170                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
16171                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16172
16173                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
16174                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16175
16176                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
16177                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16178
16179                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
16180                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16181         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16182                 bool has_edp, has_port;
16183
16184                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16185                         intel_crt_init(dev_priv);
16186
16187                 /*
16188                  * The DP_DETECTED bit is the latched state of the DDC
16189                  * SDA pin at boot. However since eDP doesn't require DDC
16190                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
16191                  * eDP ports may have been muxed to an alternate function.
16192                  * Thus we can't rely on the DP_DETECTED bit alone to detect
16193                  * eDP ports. Consult the VBT as well as DP_DETECTED to
16194                  * detect eDP ports.
16195                  *
16196                  * Sadly the straps seem to be missing sometimes even for HDMI
16197                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16198                  * and VBT for the presence of the port. Additionally we can't
16199                  * trust the port type the VBT declares as we've seen at least
16200                  * HDMI ports that the VBT claim are DP or eDP.
16201                  */
16202                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16203                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16204                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
16205                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16206                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16207                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16208
16209                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16210                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16211                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
16212                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16213                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16214                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16215
16216                 if (IS_CHERRYVIEW(dev_priv)) {
16217                         /*
16218                          * eDP not supported on port D,
16219                          * so no need to worry about it
16220                          */
16221                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16222                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
16223                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16224                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
16225                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16226                 }
16227
16228                 vlv_dsi_init(dev_priv);
16229         } else if (IS_PINEVIEW(dev_priv)) {
16230                 intel_lvds_init(dev_priv);
16231                 intel_crt_init(dev_priv);
16232         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16233                 bool found = false;
16234
16235                 if (IS_MOBILE(dev_priv))
16236                         intel_lvds_init(dev_priv);
16237
16238                 intel_crt_init(dev_priv);
16239
16240                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16241                         DRM_DEBUG_KMS("probing SDVOB\n");
16242                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16243                         if (!found && IS_G4X(dev_priv)) {
16244                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
16245                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16246                         }
16247
16248                         if (!found && IS_G4X(dev_priv))
16249                                 intel_dp_init(dev_priv, DP_B, PORT_B);
16250                 }
16251
16252                 /* Before G4X SDVOC doesn't have its own detect register */
16253
16254                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16255                         DRM_DEBUG_KMS("probing SDVOC\n");
16256                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16257                 }
16258
16259                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
16260
16261                         if (IS_G4X(dev_priv)) {
16262                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
16263                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16264                         }
16265                         if (IS_G4X(dev_priv))
16266                                 intel_dp_init(dev_priv, DP_C, PORT_C);
16267                 }
16268
16269                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
16270                         intel_dp_init(dev_priv, DP_D, PORT_D);
16271
16272                 if (SUPPORTS_TV(dev_priv))
16273                         intel_tv_init(dev_priv);
16274         } else if (IS_GEN(dev_priv, 2)) {
16275                 if (IS_I85X(dev_priv))
16276                         intel_lvds_init(dev_priv);
16277
16278                 intel_crt_init(dev_priv);
16279                 intel_dvo_init(dev_priv);
16280         }
16281
16282         intel_psr_init(dev_priv);
16283
16284         for_each_intel_encoder(&dev_priv->drm, encoder) {
16285                 encoder->base.possible_crtcs =
16286                         intel_encoder_possible_crtcs(encoder);
16287                 encoder->base.possible_clones =
16288                         intel_encoder_possible_clones(encoder);
16289         }
16290
16291         intel_init_pch_refclk(dev_priv);
16292
16293         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16294 }
16295
16296 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16297 {
16298         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16299
16300         drm_framebuffer_cleanup(fb);
16301         intel_frontbuffer_put(intel_fb->frontbuffer);
16302
16303         kfree(intel_fb);
16304 }
16305
16306 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16307                                                 struct drm_file *file,
16308                                                 unsigned int *handle)
16309 {
16310         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16311
16312         if (obj->userptr.mm) {
16313                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
16314                 return -EINVAL;
16315         }
16316
16317         return drm_gem_handle_create(file, &obj->base, handle);
16318 }
16319
16320 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16321                                         struct drm_file *file,
16322                                         unsigned flags, unsigned color,
16323                                         struct drm_clip_rect *clips,
16324                                         unsigned num_clips)
16325 {
16326         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16327
16328         i915_gem_object_flush_if_display(obj);
16329         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16330
16331         return 0;
16332 }
16333
16334 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16335         .destroy = intel_user_framebuffer_destroy,
16336         .create_handle = intel_user_framebuffer_create_handle,
16337         .dirty = intel_user_framebuffer_dirty,
16338 };
16339
16340 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16341                                   struct drm_i915_gem_object *obj,
16342                                   struct drm_mode_fb_cmd2 *mode_cmd)
16343 {
16344         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16345         struct drm_framebuffer *fb = &intel_fb->base;
16346         u32 max_stride;
16347         unsigned int tiling, stride;
16348         int ret = -EINVAL;
16349         int i;
16350
16351         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16352         if (!intel_fb->frontbuffer)
16353                 return -ENOMEM;
16354
16355         i915_gem_object_lock(obj);
16356         tiling = i915_gem_object_get_tiling(obj);
16357         stride = i915_gem_object_get_stride(obj);
16358         i915_gem_object_unlock(obj);
16359
16360         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16361                 /*
16362                  * If there's a fence, enforce that
16363                  * the fb modifier and tiling mode match.
16364                  */
16365                 if (tiling != I915_TILING_NONE &&
16366                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16367                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
16368                         goto err;
16369                 }
16370         } else {
16371                 if (tiling == I915_TILING_X) {
16372                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16373                 } else if (tiling == I915_TILING_Y) {
16374                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
16375                         goto err;
16376                 }
16377         }
16378
16379         if (!drm_any_plane_has_format(&dev_priv->drm,
16380                                       mode_cmd->pixel_format,
16381                                       mode_cmd->modifier[0])) {
16382                 struct drm_format_name_buf format_name;
16383
16384                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
16385                               drm_get_format_name(mode_cmd->pixel_format,
16386                                                   &format_name),
16387                               mode_cmd->modifier[0]);
16388                 goto err;
16389         }
16390
16391         /*
16392          * gen2/3 display engine uses the fence if present,
16393          * so the tiling mode must match the fb modifier exactly.
16394          */
16395         if (INTEL_GEN(dev_priv) < 4 &&
16396             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16397                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
16398                 goto err;
16399         }
16400
16401         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16402                                          mode_cmd->modifier[0]);
16403         if (mode_cmd->pitches[0] > max_stride) {
16404                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
16405                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16406                               "tiled" : "linear",
16407                               mode_cmd->pitches[0], max_stride);
16408                 goto err;
16409         }
16410
16411         /*
16412          * If there's a fence, enforce that
16413          * the fb pitch and fence stride match.
16414          */
16415         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16416                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
16417                               mode_cmd->pitches[0], stride);
16418                 goto err;
16419         }
16420
16421         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16422         if (mode_cmd->offsets[0] != 0)
16423                 goto err;
16424
16425         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16426
16427         for (i = 0; i < fb->format->num_planes; i++) {
16428                 u32 stride_alignment;
16429
16430                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16431                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
16432                         goto err;
16433                 }
16434
16435                 stride_alignment = intel_fb_stride_alignment(fb, i);
16436
16437                 /*
16438                  * Display WA #0531: skl,bxt,kbl,glk
16439                  *
16440                  * Render decompression and plane width > 3840
16441                  * combined with horizontal panning requires the
16442                  * plane stride to be a multiple of 4. We'll just
16443                  * require the entire fb to accommodate that to avoid
16444                  * potential runtime errors at plane configuration time.
16445                  */
16446                 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
16447                     is_ccs_modifier(fb->modifier))
16448                         stride_alignment *= 4;
16449
16450                 if (fb->pitches[i] & (stride_alignment - 1)) {
16451                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
16452                                       i, fb->pitches[i], stride_alignment);
16453                         goto err;
16454                 }
16455
16456                 fb->obj[i] = &obj->base;
16457         }
16458
16459         ret = intel_fill_fb_info(dev_priv, fb);
16460         if (ret)
16461                 goto err;
16462
16463         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16464         if (ret) {
16465                 DRM_ERROR("framebuffer init failed %d\n", ret);
16466                 goto err;
16467         }
16468
16469         return 0;
16470
16471 err:
16472         intel_frontbuffer_put(intel_fb->frontbuffer);
16473         return ret;
16474 }
16475
16476 static struct drm_framebuffer *
16477 intel_user_framebuffer_create(struct drm_device *dev,
16478                               struct drm_file *filp,
16479                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
16480 {
16481         struct drm_framebuffer *fb;
16482         struct drm_i915_gem_object *obj;
16483         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16484
16485         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16486         if (!obj)
16487                 return ERR_PTR(-ENOENT);
16488
16489         fb = intel_framebuffer_create(obj, &mode_cmd);
16490         i915_gem_object_put(obj);
16491
16492         return fb;
16493 }
16494
16495 static void intel_atomic_state_free(struct drm_atomic_state *state)
16496 {
16497         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
16498
16499         drm_atomic_state_default_release(state);
16500
16501         i915_sw_fence_fini(&intel_state->commit_ready);
16502
16503         kfree(state);
16504 }
16505
16506 static enum drm_mode_status
16507 intel_mode_valid(struct drm_device *dev,
16508                  const struct drm_display_mode *mode)
16509 {
16510         struct drm_i915_private *dev_priv = to_i915(dev);
16511         int hdisplay_max, htotal_max;
16512         int vdisplay_max, vtotal_max;
16513
16514         /*
16515          * Can't reject DBLSCAN here because Xorg ddxen can add piles
16516          * of DBLSCAN modes to the output's mode list when they detect
16517          * the scaling mode property on the connector. And they don't
16518          * ask the kernel to validate those modes in any way until
16519          * modeset time at which point the client gets a protocol error.
16520          * So in order to not upset those clients we silently ignore the
16521          * DBLSCAN flag on such connectors. For other connectors we will
16522          * reject modes with the DBLSCAN flag in encoder->compute_config().
16523          * And we always reject DBLSCAN modes in connector->mode_valid()
16524          * as we never want such modes on the connector's mode list.
16525          */
16526
16527         if (mode->vscan > 1)
16528                 return MODE_NO_VSCAN;
16529
16530         if (mode->flags & DRM_MODE_FLAG_HSKEW)
16531                 return MODE_H_ILLEGAL;
16532
16533         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16534                            DRM_MODE_FLAG_NCSYNC |
16535                            DRM_MODE_FLAG_PCSYNC))
16536                 return MODE_HSYNC;
16537
16538         if (mode->flags & (DRM_MODE_FLAG_BCAST |
16539                            DRM_MODE_FLAG_PIXMUX |
16540                            DRM_MODE_FLAG_CLKDIV2))
16541                 return MODE_BAD;
16542
16543         /* Transcoder timing limits */
16544         if (INTEL_GEN(dev_priv) >= 11) {
16545                 hdisplay_max = 16384;
16546                 vdisplay_max = 8192;
16547                 htotal_max = 16384;
16548                 vtotal_max = 8192;
16549         } else if (INTEL_GEN(dev_priv) >= 9 ||
16550                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16551                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16552                 vdisplay_max = 4096;
16553                 htotal_max = 8192;
16554                 vtotal_max = 8192;
16555         } else if (INTEL_GEN(dev_priv) >= 3) {
16556                 hdisplay_max = 4096;
16557                 vdisplay_max = 4096;
16558                 htotal_max = 8192;
16559                 vtotal_max = 8192;
16560         } else {
16561                 hdisplay_max = 2048;
16562                 vdisplay_max = 2048;
16563                 htotal_max = 4096;
16564                 vtotal_max = 4096;
16565         }
16566
16567         if (mode->hdisplay > hdisplay_max ||
16568             mode->hsync_start > htotal_max ||
16569             mode->hsync_end > htotal_max ||
16570             mode->htotal > htotal_max)
16571                 return MODE_H_ILLEGAL;
16572
16573         if (mode->vdisplay > vdisplay_max ||
16574             mode->vsync_start > vtotal_max ||
16575             mode->vsync_end > vtotal_max ||
16576             mode->vtotal > vtotal_max)
16577                 return MODE_V_ILLEGAL;
16578
16579         if (INTEL_GEN(dev_priv) >= 5) {
16580                 if (mode->hdisplay < 64 ||
16581                     mode->htotal - mode->hdisplay < 32)
16582                         return MODE_H_ILLEGAL;
16583
16584                 if (mode->vtotal - mode->vdisplay < 5)
16585                         return MODE_V_ILLEGAL;
16586         } else {
16587                 if (mode->htotal - mode->hdisplay < 32)
16588                         return MODE_H_ILLEGAL;
16589
16590                 if (mode->vtotal - mode->vdisplay < 3)
16591                         return MODE_V_ILLEGAL;
16592         }
16593
16594         return MODE_OK;
16595 }
16596
16597 enum drm_mode_status
16598 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16599                                 const struct drm_display_mode *mode)
16600 {
16601         int plane_width_max, plane_height_max;
16602
16603         /*
16604          * intel_mode_valid() should be
16605          * sufficient on older platforms.
16606          */
16607         if (INTEL_GEN(dev_priv) < 9)
16608                 return MODE_OK;
16609
16610         /*
16611          * Most people will probably want a fullscreen
16612          * plane so let's not advertize modes that are
16613          * too big for that.
16614          */
16615         if (INTEL_GEN(dev_priv) >= 11) {
16616                 plane_width_max = 5120;
16617                 plane_height_max = 4320;
16618         } else {
16619                 plane_width_max = 5120;
16620                 plane_height_max = 4096;
16621         }
16622
16623         if (mode->hdisplay > plane_width_max)
16624                 return MODE_H_ILLEGAL;
16625
16626         if (mode->vdisplay > plane_height_max)
16627                 return MODE_V_ILLEGAL;
16628
16629         return MODE_OK;
16630 }
16631
16632 static const struct drm_mode_config_funcs intel_mode_funcs = {
16633         .fb_create = intel_user_framebuffer_create,
16634         .get_format_info = intel_get_format_info,
16635         .output_poll_changed = intel_fbdev_output_poll_changed,
16636         .mode_valid = intel_mode_valid,
16637         .atomic_check = intel_atomic_check,
16638         .atomic_commit = intel_atomic_commit,
16639         .atomic_state_alloc = intel_atomic_state_alloc,
16640         .atomic_state_clear = intel_atomic_state_clear,
16641         .atomic_state_free = intel_atomic_state_free,
16642 };
16643
16644 /**
16645  * intel_init_display_hooks - initialize the display modesetting hooks
16646  * @dev_priv: device private
16647  */
16648 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16649 {
16650         intel_init_cdclk_hooks(dev_priv);
16651
16652         if (INTEL_GEN(dev_priv) >= 9) {
16653                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16654                 dev_priv->display.get_initial_plane_config =
16655                         skylake_get_initial_plane_config;
16656                 dev_priv->display.crtc_compute_clock =
16657                         haswell_crtc_compute_clock;
16658                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16659                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16660         } else if (HAS_DDI(dev_priv)) {
16661                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16662                 dev_priv->display.get_initial_plane_config =
16663                         i9xx_get_initial_plane_config;
16664                 dev_priv->display.crtc_compute_clock =
16665                         haswell_crtc_compute_clock;
16666                 dev_priv->display.crtc_enable = haswell_crtc_enable;
16667                 dev_priv->display.crtc_disable = haswell_crtc_disable;
16668         } else if (HAS_PCH_SPLIT(dev_priv)) {
16669                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
16670                 dev_priv->display.get_initial_plane_config =
16671                         i9xx_get_initial_plane_config;
16672                 dev_priv->display.crtc_compute_clock =
16673                         ironlake_crtc_compute_clock;
16674                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
16675                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
16676         } else if (IS_CHERRYVIEW(dev_priv)) {
16677                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16678                 dev_priv->display.get_initial_plane_config =
16679                         i9xx_get_initial_plane_config;
16680                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16681                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16682                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16683         } else if (IS_VALLEYVIEW(dev_priv)) {
16684                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16685                 dev_priv->display.get_initial_plane_config =
16686                         i9xx_get_initial_plane_config;
16687                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16688                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
16689                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16690         } else if (IS_G4X(dev_priv)) {
16691                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16692                 dev_priv->display.get_initial_plane_config =
16693                         i9xx_get_initial_plane_config;
16694                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16695                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16696                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16697         } else if (IS_PINEVIEW(dev_priv)) {
16698                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16699                 dev_priv->display.get_initial_plane_config =
16700                         i9xx_get_initial_plane_config;
16701                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16702                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16703                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16704         } else if (!IS_GEN(dev_priv, 2)) {
16705                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16706                 dev_priv->display.get_initial_plane_config =
16707                         i9xx_get_initial_plane_config;
16708                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16709                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16710                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16711         } else {
16712                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16713                 dev_priv->display.get_initial_plane_config =
16714                         i9xx_get_initial_plane_config;
16715                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16716                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
16717                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
16718         }
16719
16720         if (IS_GEN(dev_priv, 5)) {
16721                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
16722         } else if (IS_GEN(dev_priv, 6)) {
16723                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16724         } else if (IS_IVYBRIDGE(dev_priv)) {
16725                 /* FIXME: detect B0+ stepping and use auto training */
16726                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16727         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
16728                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
16729         }
16730
16731         if (INTEL_GEN(dev_priv) >= 9)
16732                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16733         else
16734                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16735
16736 }
16737
16738 void intel_modeset_init_hw(struct drm_i915_private *i915)
16739 {
16740         intel_update_cdclk(i915);
16741         intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
16742         i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
16743 }
16744
16745 /*
16746  * Calculate what we think the watermarks should be for the state we've read
16747  * out of the hardware and then immediately program those watermarks so that
16748  * we ensure the hardware settings match our internal state.
16749  *
16750  * We can calculate what we think WM's should be by creating a duplicate of the
16751  * current state (which was constructed during hardware readout) and running it
16752  * through the atomic check code to calculate new watermark values in the
16753  * state object.
16754  */
16755 static void sanitize_watermarks(struct drm_device *dev)
16756 {
16757         struct drm_i915_private *dev_priv = to_i915(dev);
16758         struct drm_atomic_state *state;
16759         struct intel_atomic_state *intel_state;
16760         struct intel_crtc *crtc;
16761         struct intel_crtc_state *crtc_state;
16762         struct drm_modeset_acquire_ctx ctx;
16763         int ret;
16764         int i;
16765
16766         /* Only supported on platforms that use atomic watermark design */
16767         if (!dev_priv->display.optimize_watermarks)
16768                 return;
16769
16770         /*
16771          * We need to hold connection_mutex before calling duplicate_state so
16772          * that the connector loop is protected.
16773          */
16774         drm_modeset_acquire_init(&ctx, 0);
16775 retry:
16776         ret = drm_modeset_lock_all_ctx(dev, &ctx);
16777         if (ret == -EDEADLK) {
16778                 drm_modeset_backoff(&ctx);
16779                 goto retry;
16780         } else if (WARN_ON(ret)) {
16781                 goto fail;
16782         }
16783
16784         state = drm_atomic_helper_duplicate_state(dev, &ctx);
16785         if (WARN_ON(IS_ERR(state)))
16786                 goto fail;
16787
16788         intel_state = to_intel_atomic_state(state);
16789
16790         /*
16791          * Hardware readout is the only time we don't want to calculate
16792          * intermediate watermarks (since we don't trust the current
16793          * watermarks).
16794          */
16795         if (!HAS_GMCH(dev_priv))
16796                 intel_state->skip_intermediate_wm = true;
16797
16798         ret = intel_atomic_check(dev, state);
16799         if (ret) {
16800                 /*
16801                  * If we fail here, it means that the hardware appears to be
16802                  * programmed in a way that shouldn't be possible, given our
16803                  * understanding of watermark requirements.  This might mean a
16804                  * mistake in the hardware readout code or a mistake in the
16805                  * watermark calculations for a given platform.  Raise a WARN
16806                  * so that this is noticeable.
16807                  *
16808                  * If this actually happens, we'll have to just leave the
16809                  * BIOS-programmed watermarks untouched and hope for the best.
16810                  */
16811                 WARN(true, "Could not determine valid watermarks for inherited state\n");
16812                 goto put_state;
16813         }
16814
16815         /* Write calculated watermark values back */
16816         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16817                 crtc_state->wm.need_postvbl_update = true;
16818                 dev_priv->display.optimize_watermarks(intel_state, crtc_state);
16819
16820                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16821         }
16822
16823 put_state:
16824         drm_atomic_state_put(state);
16825 fail:
16826         drm_modeset_drop_locks(&ctx);
16827         drm_modeset_acquire_fini(&ctx);
16828 }
16829
16830 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16831 {
16832         if (IS_GEN(dev_priv, 5)) {
16833                 u32 fdi_pll_clk =
16834                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16835
16836                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16837         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16838                 dev_priv->fdi_pll_freq = 270000;
16839         } else {
16840                 return;
16841         }
16842
16843         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16844 }
16845
16846 static int intel_initial_commit(struct drm_device *dev)
16847 {
16848         struct drm_atomic_state *state = NULL;
16849         struct drm_modeset_acquire_ctx ctx;
16850         struct intel_crtc *crtc;
16851         int ret = 0;
16852
16853         state = drm_atomic_state_alloc(dev);
16854         if (!state)
16855                 return -ENOMEM;
16856
16857         drm_modeset_acquire_init(&ctx, 0);
16858
16859 retry:
16860         state->acquire_ctx = &ctx;
16861
16862         for_each_intel_crtc(dev, crtc) {
16863                 struct intel_crtc_state *crtc_state =
16864                         intel_atomic_get_crtc_state(state, crtc);
16865
16866                 if (IS_ERR(crtc_state)) {
16867                         ret = PTR_ERR(crtc_state);
16868                         goto out;
16869                 }
16870
16871                 if (crtc_state->hw.active) {
16872                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
16873                         if (ret)
16874                                 goto out;
16875
16876                         /*
16877                          * FIXME hack to force a LUT update to avoid the
16878                          * plane update forcing the pipe gamma on without
16879                          * having a proper LUT loaded. Remove once we
16880                          * have readout for pipe gamma enable.
16881                          */
16882                         crtc_state->uapi.color_mgmt_changed = true;
16883                 }
16884         }
16885
16886         ret = drm_atomic_commit(state);
16887
16888 out:
16889         if (ret == -EDEADLK) {
16890                 drm_atomic_state_clear(state);
16891                 drm_modeset_backoff(&ctx);
16892                 goto retry;
16893         }
16894
16895         drm_atomic_state_put(state);
16896
16897         drm_modeset_drop_locks(&ctx);
16898         drm_modeset_acquire_fini(&ctx);
16899
16900         return ret;
16901 }
16902
16903 static void intel_mode_config_init(struct drm_i915_private *i915)
16904 {
16905         struct drm_mode_config *mode_config = &i915->drm.mode_config;
16906
16907         drm_mode_config_init(&i915->drm);
16908
16909         mode_config->min_width = 0;
16910         mode_config->min_height = 0;
16911
16912         mode_config->preferred_depth = 24;
16913         mode_config->prefer_shadow = 1;
16914
16915         mode_config->allow_fb_modifiers = true;
16916
16917         mode_config->funcs = &intel_mode_funcs;
16918
16919         /*
16920          * Maximum framebuffer dimensions, chosen to match
16921          * the maximum render engine surface size on gen4+.
16922          */
16923         if (INTEL_GEN(i915) >= 7) {
16924                 mode_config->max_width = 16384;
16925                 mode_config->max_height = 16384;
16926         } else if (INTEL_GEN(i915) >= 4) {
16927                 mode_config->max_width = 8192;
16928                 mode_config->max_height = 8192;
16929         } else if (IS_GEN(i915, 3)) {
16930                 mode_config->max_width = 4096;
16931                 mode_config->max_height = 4096;
16932         } else {
16933                 mode_config->max_width = 2048;
16934                 mode_config->max_height = 2048;
16935         }
16936
16937         if (IS_I845G(i915) || IS_I865G(i915)) {
16938                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
16939                 mode_config->cursor_height = 1023;
16940         } else if (IS_GEN(i915, 2)) {
16941                 mode_config->cursor_width = 64;
16942                 mode_config->cursor_height = 64;
16943         } else {
16944                 mode_config->cursor_width = 256;
16945                 mode_config->cursor_height = 256;
16946         }
16947 }
16948
16949 int intel_modeset_init(struct drm_i915_private *i915)
16950 {
16951         struct drm_device *dev = &i915->drm;
16952         enum pipe pipe;
16953         struct intel_crtc *crtc;
16954         int ret;
16955
16956         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
16957         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
16958                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
16959
16960         intel_mode_config_init(i915);
16961
16962         ret = intel_bw_init(i915);
16963         if (ret)
16964                 return ret;
16965
16966         init_llist_head(&i915->atomic_helper.free_list);
16967         INIT_WORK(&i915->atomic_helper.free_work,
16968                   intel_atomic_helper_free_state_worker);
16969
16970         intel_init_quirks(i915);
16971
16972         intel_fbc_init(i915);
16973
16974         intel_init_pm(i915);
16975
16976         intel_panel_sanitize_ssc(i915);
16977
16978         intel_gmbus_setup(i915);
16979
16980         DRM_DEBUG_KMS("%d display pipe%s available.\n",
16981                       INTEL_NUM_PIPES(i915),
16982                       INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
16983
16984         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
16985                 for_each_pipe(i915, pipe) {
16986                         ret = intel_crtc_init(i915, pipe);
16987                         if (ret) {
16988                                 drm_mode_config_cleanup(dev);
16989                                 return ret;
16990                         }
16991                 }
16992         }
16993
16994         intel_shared_dpll_init(dev);
16995         intel_update_fdi_pll_freq(i915);
16996
16997         intel_update_czclk(i915);
16998         intel_modeset_init_hw(i915);
16999
17000         intel_hdcp_component_init(i915);
17001
17002         if (i915->max_cdclk_freq == 0)
17003                 intel_update_max_cdclk(i915);
17004
17005         /* Just disable it once at startup */
17006         intel_vga_disable(i915);
17007         intel_setup_outputs(i915);
17008
17009         drm_modeset_lock_all(dev);
17010         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17011         drm_modeset_unlock_all(dev);
17012
17013         for_each_intel_crtc(dev, crtc) {
17014                 struct intel_initial_plane_config plane_config = {};
17015
17016                 if (!crtc->active)
17017                         continue;
17018
17019                 /*
17020                  * Note that reserving the BIOS fb up front prevents us
17021                  * from stuffing other stolen allocations like the ring
17022                  * on top.  This prevents some ugliness at boot time, and
17023                  * can even allow for smooth boot transitions if the BIOS
17024                  * fb is large enough for the active pipe configuration.
17025                  */
17026                 i915->display.get_initial_plane_config(crtc, &plane_config);
17027
17028                 /*
17029                  * If the fb is shared between multiple heads, we'll
17030                  * just get the first one.
17031                  */
17032                 intel_find_initial_plane_obj(crtc, &plane_config);
17033         }
17034
17035         /*
17036          * Make sure hardware watermarks really match the state we read out.
17037          * Note that we need to do this after reconstructing the BIOS fb's
17038          * since the watermark calculation done here will use pstate->fb.
17039          */
17040         if (!HAS_GMCH(i915))
17041                 sanitize_watermarks(dev);
17042
17043         /*
17044          * Force all active planes to recompute their states. So that on
17045          * mode_setcrtc after probe, all the intel_plane_state variables
17046          * are already calculated and there is no assert_plane warnings
17047          * during bootup.
17048          */
17049         ret = intel_initial_commit(dev);
17050         if (ret)
17051                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
17052
17053         return 0;
17054 }
17055
17056 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17057 {
17058         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17059         /* 640x480@60Hz, ~25175 kHz */
17060         struct dpll clock = {
17061                 .m1 = 18,
17062                 .m2 = 7,
17063                 .p1 = 13,
17064                 .p2 = 4,
17065                 .n = 2,
17066         };
17067         u32 dpll, fp;
17068         int i;
17069
17070         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
17071
17072         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17073                       pipe_name(pipe), clock.vco, clock.dot);
17074
17075         fp = i9xx_dpll_compute_fp(&clock);
17076         dpll = DPLL_DVO_2X_MODE |
17077                 DPLL_VGA_MODE_DIS |
17078                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17079                 PLL_P2_DIVIDE_BY_4 |
17080                 PLL_REF_INPUT_DREFCLK |
17081                 DPLL_VCO_ENABLE;
17082
17083         I915_WRITE(FP0(pipe), fp);
17084         I915_WRITE(FP1(pipe), fp);
17085
17086         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17087         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17088         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17089         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17090         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17091         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17092         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17093
17094         /*
17095          * Apparently we need to have VGA mode enabled prior to changing
17096          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17097          * dividers, even though the register value does change.
17098          */
17099         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17100         I915_WRITE(DPLL(pipe), dpll);
17101
17102         /* Wait for the clocks to stabilize. */
17103         POSTING_READ(DPLL(pipe));
17104         udelay(150);
17105
17106         /* The pixel multiplier can only be updated once the
17107          * DPLL is enabled and the clocks are stable.
17108          *
17109          * So write it again.
17110          */
17111         I915_WRITE(DPLL(pipe), dpll);
17112
17113         /* We do this three times for luck */
17114         for (i = 0; i < 3 ; i++) {
17115                 I915_WRITE(DPLL(pipe), dpll);
17116                 POSTING_READ(DPLL(pipe));
17117                 udelay(150); /* wait for warmup */
17118         }
17119
17120         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17121         POSTING_READ(PIPECONF(pipe));
17122
17123         intel_wait_for_pipe_scanline_moving(crtc);
17124 }
17125
17126 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17127 {
17128         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17129
17130         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
17131                       pipe_name(pipe));
17132
17133         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
17134         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
17135         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
17136         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
17137         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
17138
17139         I915_WRITE(PIPECONF(pipe), 0);
17140         POSTING_READ(PIPECONF(pipe));
17141
17142         intel_wait_for_pipe_scanline_stopped(crtc);
17143
17144         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
17145         POSTING_READ(DPLL(pipe));
17146 }
17147
17148 static void
17149 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17150 {
17151         struct intel_crtc *crtc;
17152
17153         if (INTEL_GEN(dev_priv) >= 4)
17154                 return;
17155
17156         for_each_intel_crtc(&dev_priv->drm, crtc) {
17157                 struct intel_plane *plane =
17158                         to_intel_plane(crtc->base.primary);
17159                 struct intel_crtc *plane_crtc;
17160                 enum pipe pipe;
17161
17162                 if (!plane->get_hw_state(plane, &pipe))
17163                         continue;
17164
17165                 if (pipe == crtc->pipe)
17166                         continue;
17167
17168                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17169                               plane->base.base.id, plane->base.name);
17170
17171                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17172                 intel_plane_disable_noatomic(plane_crtc, plane);
17173         }
17174 }
17175
17176 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17177 {
17178         struct drm_device *dev = crtc->base.dev;
17179         struct intel_encoder *encoder;
17180
17181         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17182                 return true;
17183
17184         return false;
17185 }
17186
17187 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17188 {
17189         struct drm_device *dev = encoder->base.dev;
17190         struct intel_connector *connector;
17191
17192         for_each_connector_on_encoder(dev, &encoder->base, connector)
17193                 return connector;
17194
17195         return NULL;
17196 }
17197
17198 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17199                               enum pipe pch_transcoder)
17200 {
17201         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17202                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17203 }
17204
17205 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17206                                 struct drm_modeset_acquire_ctx *ctx)
17207 {
17208         struct drm_device *dev = crtc->base.dev;
17209         struct drm_i915_private *dev_priv = to_i915(dev);
17210         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17211         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17212
17213         /* Clear any frame start delays used for debugging left by the BIOS */
17214         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
17215                 i915_reg_t reg = PIPECONF(cpu_transcoder);
17216
17217                 I915_WRITE(reg,
17218                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
17219         }
17220
17221         if (crtc_state->hw.active) {
17222                 struct intel_plane *plane;
17223
17224                 /* Disable everything but the primary plane */
17225                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
17226                         const struct intel_plane_state *plane_state =
17227                                 to_intel_plane_state(plane->base.state);
17228
17229                         if (plane_state->uapi.visible &&
17230                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17231                                 intel_plane_disable_noatomic(crtc, plane);
17232                 }
17233
17234                 /*
17235                  * Disable any background color set by the BIOS, but enable the
17236                  * gamma and CSC to match how we program our planes.
17237                  */
17238                 if (INTEL_GEN(dev_priv) >= 9)
17239                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
17240                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
17241                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
17242         }
17243
17244         /* Adjust the state of the output pipe according to whether we
17245          * have active connectors/encoders. */
17246         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
17247                 intel_crtc_disable_noatomic(&crtc->base, ctx);
17248
17249         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
17250                 /*
17251                  * We start out with underrun reporting disabled to avoid races.
17252                  * For correct bookkeeping mark this on active crtcs.
17253                  *
17254                  * Also on gmch platforms we dont have any hardware bits to
17255                  * disable the underrun reporting. Which means we need to start
17256                  * out with underrun reporting disabled also on inactive pipes,
17257                  * since otherwise we'll complain about the garbage we read when
17258                  * e.g. coming up after runtime pm.
17259                  *
17260                  * No protection against concurrent access is required - at
17261                  * worst a fifo underrun happens which also sets this to false.
17262                  */
17263                 crtc->cpu_fifo_underrun_disabled = true;
17264                 /*
17265                  * We track the PCH trancoder underrun reporting state
17266                  * within the crtc. With crtc for pipe A housing the underrun
17267                  * reporting state for PCH transcoder A, crtc for pipe B housing
17268                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17269                  * and marking underrun reporting as disabled for the non-existing
17270                  * PCH transcoders B and C would prevent enabling the south
17271                  * error interrupt (see cpt_can_enable_serr_int()).
17272                  */
17273                 if (has_pch_trancoder(dev_priv, crtc->pipe))
17274                         crtc->pch_fifo_underrun_disabled = true;
17275         }
17276 }
17277
17278 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17279 {
17280         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
17281
17282         /*
17283          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17284          * the hardware when a high res displays plugged in. DPLL P
17285          * divider is zero, and the pipe timings are bonkers. We'll
17286          * try to disable everything in that case.
17287          *
17288          * FIXME would be nice to be able to sanitize this state
17289          * without several WARNs, but for now let's take the easy
17290          * road.
17291          */
17292         return IS_GEN(dev_priv, 6) &&
17293                 crtc_state->hw.active &&
17294                 crtc_state->shared_dpll &&
17295                 crtc_state->port_clock == 0;
17296 }
17297
17298 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17299 {
17300         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17301         struct intel_connector *connector;
17302         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17303         struct intel_crtc_state *crtc_state = crtc ?
17304                 to_intel_crtc_state(crtc->base.state) : NULL;
17305
17306         /* We need to check both for a crtc link (meaning that the
17307          * encoder is active and trying to read from a pipe) and the
17308          * pipe itself being active. */
17309         bool has_active_crtc = crtc_state &&
17310                 crtc_state->hw.active;
17311
17312         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17313                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17314                               pipe_name(crtc->pipe));
17315                 has_active_crtc = false;
17316         }
17317
17318         connector = intel_encoder_find_connector(encoder);
17319         if (connector && !has_active_crtc) {
17320                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17321                               encoder->base.base.id,
17322                               encoder->base.name);
17323
17324                 /* Connector is active, but has no active pipe. This is
17325                  * fallout from our resume register restoring. Disable
17326                  * the encoder manually again. */
17327                 if (crtc_state) {
17328                         struct drm_encoder *best_encoder;
17329
17330                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
17331                                       encoder->base.base.id,
17332                                       encoder->base.name);
17333
17334                         /* avoid oopsing in case the hooks consult best_encoder */
17335                         best_encoder = connector->base.state->best_encoder;
17336                         connector->base.state->best_encoder = &encoder->base;
17337
17338                         if (encoder->disable)
17339                                 encoder->disable(encoder, crtc_state,
17340                                                  connector->base.state);
17341                         if (encoder->post_disable)
17342                                 encoder->post_disable(encoder, crtc_state,
17343                                                       connector->base.state);
17344
17345                         connector->base.state->best_encoder = best_encoder;
17346                 }
17347                 encoder->base.crtc = NULL;
17348
17349                 /* Inconsistent output/port/pipe state happens presumably due to
17350                  * a bug in one of the get_hw_state functions. Or someplace else
17351                  * in our code, like the register restore mess on resume. Clamp
17352                  * things to off as a safer default. */
17353
17354                 connector->base.dpms = DRM_MODE_DPMS_OFF;
17355                 connector->base.encoder = NULL;
17356         }
17357
17358         /* notify opregion of the sanitized encoder state */
17359         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17360
17361         if (INTEL_GEN(dev_priv) >= 11)
17362                 icl_sanitize_encoder_pll_mapping(encoder);
17363 }
17364
17365 /* FIXME read out full plane state for all planes */
17366 static void readout_plane_state(struct drm_i915_private *dev_priv)
17367 {
17368         struct intel_plane *plane;
17369         struct intel_crtc *crtc;
17370
17371         for_each_intel_plane(&dev_priv->drm, plane) {
17372                 struct intel_plane_state *plane_state =
17373                         to_intel_plane_state(plane->base.state);
17374                 struct intel_crtc_state *crtc_state;
17375                 enum pipe pipe = PIPE_A;
17376                 bool visible;
17377
17378                 visible = plane->get_hw_state(plane, &pipe);
17379
17380                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17381                 crtc_state = to_intel_crtc_state(crtc->base.state);
17382
17383                 intel_set_plane_visible(crtc_state, plane_state, visible);
17384
17385                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17386                               plane->base.base.id, plane->base.name,
17387                               enableddisabled(visible), pipe_name(pipe));
17388         }
17389
17390         for_each_intel_crtc(&dev_priv->drm, crtc) {
17391                 struct intel_crtc_state *crtc_state =
17392                         to_intel_crtc_state(crtc->base.state);
17393
17394                 fixup_active_planes(crtc_state);
17395         }
17396 }
17397
17398 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17399 {
17400         struct drm_i915_private *dev_priv = to_i915(dev);
17401         enum pipe pipe;
17402         struct intel_crtc *crtc;
17403         struct intel_encoder *encoder;
17404         struct intel_connector *connector;
17405         struct drm_connector_list_iter conn_iter;
17406         int i;
17407
17408         dev_priv->active_pipes = 0;
17409
17410         for_each_intel_crtc(dev, crtc) {
17411                 struct intel_crtc_state *crtc_state =
17412                         to_intel_crtc_state(crtc->base.state);
17413
17414                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
17415                 intel_crtc_free_hw_state(crtc_state);
17416                 memset(crtc_state, 0, sizeof(*crtc_state));
17417                 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->uapi);
17418
17419                 crtc_state->hw.active = crtc_state->hw.enable =
17420                         dev_priv->display.get_pipe_config(crtc, crtc_state);
17421
17422                 crtc->base.enabled = crtc_state->hw.enable;
17423                 crtc->active = crtc_state->hw.active;
17424
17425                 if (crtc_state->hw.active)
17426                         dev_priv->active_pipes |= BIT(crtc->pipe);
17427
17428                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
17429                               crtc->base.base.id, crtc->base.name,
17430                               enableddisabled(crtc_state->hw.active));
17431         }
17432
17433         readout_plane_state(dev_priv);
17434
17435         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17436                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17437
17438                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
17439                                                         &pll->state.hw_state);
17440
17441                 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
17442                     pll->info->id == DPLL_ID_EHL_DPLL4) {
17443                         pll->wakeref = intel_display_power_get(dev_priv,
17444                                                                POWER_DOMAIN_DPLL_DC_OFF);
17445                 }
17446
17447                 pll->state.crtc_mask = 0;
17448                 for_each_intel_crtc(dev, crtc) {
17449                         struct intel_crtc_state *crtc_state =
17450                                 to_intel_crtc_state(crtc->base.state);
17451
17452                         if (crtc_state->hw.active &&
17453                             crtc_state->shared_dpll == pll)
17454                                 pll->state.crtc_mask |= 1 << crtc->pipe;
17455                 }
17456                 pll->active_mask = pll->state.crtc_mask;
17457
17458                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
17459                               pll->info->name, pll->state.crtc_mask, pll->on);
17460         }
17461
17462         for_each_intel_encoder(dev, encoder) {
17463                 pipe = 0;
17464
17465                 if (encoder->get_hw_state(encoder, &pipe)) {
17466                         struct intel_crtc_state *crtc_state;
17467
17468                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17469                         crtc_state = to_intel_crtc_state(crtc->base.state);
17470
17471                         encoder->base.crtc = &crtc->base;
17472                         encoder->get_config(encoder, crtc_state);
17473                 } else {
17474                         encoder->base.crtc = NULL;
17475                 }
17476
17477                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17478                               encoder->base.base.id, encoder->base.name,
17479                               enableddisabled(encoder->base.crtc),
17480                               pipe_name(pipe));
17481         }
17482
17483         drm_connector_list_iter_begin(dev, &conn_iter);
17484         for_each_intel_connector_iter(connector, &conn_iter) {
17485                 if (connector->get_hw_state(connector)) {
17486                         struct intel_crtc_state *crtc_state;
17487                         struct intel_crtc *crtc;
17488
17489                         connector->base.dpms = DRM_MODE_DPMS_ON;
17490
17491                         encoder = connector->encoder;
17492                         connector->base.encoder = &encoder->base;
17493
17494                         crtc = to_intel_crtc(encoder->base.crtc);
17495                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17496
17497                         if (crtc_state && crtc_state->hw.active) {
17498                                 /*
17499                                  * This has to be done during hardware readout
17500                                  * because anything calling .crtc_disable may
17501                                  * rely on the connector_mask being accurate.
17502                                  */
17503                                 crtc_state->uapi.connector_mask |=
17504                                         drm_connector_mask(&connector->base);
17505                                 crtc_state->uapi.encoder_mask |=
17506                                         drm_encoder_mask(&encoder->base);
17507                         }
17508                 } else {
17509                         connector->base.dpms = DRM_MODE_DPMS_OFF;
17510                         connector->base.encoder = NULL;
17511                 }
17512                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
17513                               connector->base.base.id, connector->base.name,
17514                               enableddisabled(connector->base.encoder));
17515         }
17516         drm_connector_list_iter_end(&conn_iter);
17517
17518         for_each_intel_crtc(dev, crtc) {
17519                 struct intel_bw_state *bw_state =
17520                         to_intel_bw_state(dev_priv->bw_obj.state);
17521                 struct intel_crtc_state *crtc_state =
17522                         to_intel_crtc_state(crtc->base.state);
17523                 struct intel_plane *plane;
17524                 int min_cdclk = 0;
17525
17526                 if (crtc_state->hw.active) {
17527                         struct drm_display_mode *mode = &crtc_state->hw.mode;
17528
17529                         intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
17530                                                     crtc_state);
17531
17532                         *mode = crtc_state->hw.adjusted_mode;
17533                         mode->hdisplay = crtc_state->pipe_src_w;
17534                         mode->vdisplay = crtc_state->pipe_src_h;
17535
17536                         /*
17537                          * The initial mode needs to be set in order to keep
17538                          * the atomic core happy. It wants a valid mode if the
17539                          * crtc's enabled, so we do the above call.
17540                          *
17541                          * But we don't set all the derived state fully, hence
17542                          * set a flag to indicate that a full recalculation is
17543                          * needed on the next commit.
17544                          */
17545                         mode->private_flags = I915_MODE_FLAG_INHERITED;
17546
17547                         intel_crtc_compute_pixel_rate(crtc_state);
17548
17549                         intel_crtc_update_active_timings(crtc_state);
17550
17551                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
17552                 }
17553
17554                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17555                         const struct intel_plane_state *plane_state =
17556                                 to_intel_plane_state(plane->base.state);
17557
17558                         /*
17559                          * FIXME don't have the fb yet, so can't
17560                          * use intel_plane_data_rate() :(
17561                          */
17562                         if (plane_state->uapi.visible)
17563                                 crtc_state->data_rate[plane->id] =
17564                                         4 * crtc_state->pixel_rate;
17565                         /*
17566                          * FIXME don't have the fb yet, so can't
17567                          * use plane->min_cdclk() :(
17568                          */
17569                         if (plane_state->uapi.visible && plane->min_cdclk) {
17570                                 if (crtc_state->double_wide ||
17571                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17572                                         crtc_state->min_cdclk[plane->id] =
17573                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17574                                 else
17575                                         crtc_state->min_cdclk[plane->id] =
17576                                                 crtc_state->pixel_rate;
17577                         }
17578                         DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
17579                                       plane->base.base.id, plane->base.name,
17580                                       crtc_state->min_cdclk[plane->id]);
17581                 }
17582
17583                 if (crtc_state->hw.active) {
17584                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17585                         if (WARN_ON(min_cdclk < 0))
17586                                 min_cdclk = 0;
17587                 }
17588
17589                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
17590                 dev_priv->min_voltage_level[crtc->pipe] =
17591                         crtc_state->min_voltage_level;
17592
17593                 intel_bw_crtc_update(bw_state, crtc_state);
17594
17595                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
17596         }
17597 }
17598
17599 static void
17600 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17601 {
17602         struct intel_encoder *encoder;
17603
17604         for_each_intel_encoder(&dev_priv->drm, encoder) {
17605                 struct intel_crtc_state *crtc_state;
17606
17607                 if (!encoder->get_power_domains)
17608                         continue;
17609
17610                 /*
17611                  * MST-primary and inactive encoders don't have a crtc state
17612                  * and neither of these require any power domain references.
17613                  */
17614                 if (!encoder->base.crtc)
17615                         continue;
17616
17617                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17618                 encoder->get_power_domains(encoder, crtc_state);
17619         }
17620 }
17621
17622 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17623 {
17624         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
17625         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
17626                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
17627                            DARBF_GATING_DIS);
17628
17629         if (IS_HASWELL(dev_priv)) {
17630                 /*
17631                  * WaRsPkgCStateDisplayPMReq:hsw
17632                  * System hang if this isn't done before disabling all planes!
17633                  */
17634                 I915_WRITE(CHICKEN_PAR1_1,
17635                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17636         }
17637 }
17638
17639 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17640                                        enum port port, i915_reg_t hdmi_reg)
17641 {
17642         u32 val = I915_READ(hdmi_reg);
17643
17644         if (val & SDVO_ENABLE ||
17645             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17646                 return;
17647
17648         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
17649                       port_name(port));
17650
17651         val &= ~SDVO_PIPE_SEL_MASK;
17652         val |= SDVO_PIPE_SEL(PIPE_A);
17653
17654         I915_WRITE(hdmi_reg, val);
17655 }
17656
17657 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
17658                                      enum port port, i915_reg_t dp_reg)
17659 {
17660         u32 val = I915_READ(dp_reg);
17661
17662         if (val & DP_PORT_EN ||
17663             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
17664                 return;
17665
17666         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
17667                       port_name(port));
17668
17669         val &= ~DP_PIPE_SEL_MASK;
17670         val |= DP_PIPE_SEL(PIPE_A);
17671
17672         I915_WRITE(dp_reg, val);
17673 }
17674
17675 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
17676 {
17677         /*
17678          * The BIOS may select transcoder B on some of the PCH
17679          * ports even it doesn't enable the port. This would trip
17680          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
17681          * Sanitize the transcoder select bits to prevent that. We
17682          * assume that the BIOS never actually enabled the port,
17683          * because if it did we'd actually have to toggle the port
17684          * on and back off to make the transcoder A select stick
17685          * (see. intel_dp_link_down(), intel_disable_hdmi(),
17686          * intel_disable_sdvo()).
17687          */
17688         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
17689         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
17690         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
17691
17692         /* PCH SDVOB multiplex with HDMIB */
17693         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
17694         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
17695         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
17696 }
17697
17698 /* Scan out the current hw modeset state,
17699  * and sanitizes it to the current state
17700  */
17701 static void
17702 intel_modeset_setup_hw_state(struct drm_device *dev,
17703                              struct drm_modeset_acquire_ctx *ctx)
17704 {
17705         struct drm_i915_private *dev_priv = to_i915(dev);
17706         struct intel_crtc_state *crtc_state;
17707         struct intel_encoder *encoder;
17708         struct intel_crtc *crtc;
17709         intel_wakeref_t wakeref;
17710         int i;
17711
17712         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
17713
17714         intel_early_display_was(dev_priv);
17715         intel_modeset_readout_hw_state(dev);
17716
17717         /* HW state is read out, now we need to sanitize this mess. */
17718
17719         /* Sanitize the TypeC port mode upfront, encoders depend on this */
17720         for_each_intel_encoder(dev, encoder) {
17721                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
17722
17723                 /* We need to sanitize only the MST primary port. */
17724                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
17725                     intel_phy_is_tc(dev_priv, phy))
17726                         intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
17727         }
17728
17729         get_encoder_power_domains(dev_priv);
17730
17731         if (HAS_PCH_IBX(dev_priv))
17732                 ibx_sanitize_pch_ports(dev_priv);
17733
17734         /*
17735          * intel_sanitize_plane_mapping() may need to do vblank
17736          * waits, so we need vblank interrupts restored beforehand.
17737          */
17738         for_each_intel_crtc(&dev_priv->drm, crtc) {
17739                 crtc_state = to_intel_crtc_state(crtc->base.state);
17740
17741                 drm_crtc_vblank_reset(&crtc->base);
17742
17743                 if (crtc_state->hw.active)
17744                         intel_crtc_vblank_on(crtc_state);
17745         }
17746
17747         intel_sanitize_plane_mapping(dev_priv);
17748
17749         for_each_intel_encoder(dev, encoder)
17750                 intel_sanitize_encoder(encoder);
17751
17752         for_each_intel_crtc(&dev_priv->drm, crtc) {
17753                 crtc_state = to_intel_crtc_state(crtc->base.state);
17754                 intel_sanitize_crtc(crtc, ctx);
17755                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
17756         }
17757
17758         intel_modeset_update_connector_atomic_state(dev);
17759
17760         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17761                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17762
17763                 if (!pll->on || pll->active_mask)
17764                         continue;
17765
17766                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17767                               pll->info->name);
17768
17769                 pll->info->funcs->disable(dev_priv, pll);
17770                 pll->on = false;
17771         }
17772
17773         if (IS_G4X(dev_priv)) {
17774                 g4x_wm_get_hw_state(dev_priv);
17775                 g4x_wm_sanitize(dev_priv);
17776         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17777                 vlv_wm_get_hw_state(dev_priv);
17778                 vlv_wm_sanitize(dev_priv);
17779         } else if (INTEL_GEN(dev_priv) >= 9) {
17780                 skl_wm_get_hw_state(dev_priv);
17781         } else if (HAS_PCH_SPLIT(dev_priv)) {
17782                 ilk_wm_get_hw_state(dev_priv);
17783         }
17784
17785         for_each_intel_crtc(dev, crtc) {
17786                 u64 put_domains;
17787
17788                 crtc_state = to_intel_crtc_state(crtc->base.state);
17789                 put_domains = modeset_get_crtc_power_domains(crtc_state);
17790                 if (WARN_ON(put_domains))
17791                         modeset_put_power_domains(dev_priv, put_domains);
17792         }
17793
17794         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17795
17796         intel_fbc_init_pipe_state(dev_priv);
17797 }
17798
17799 void intel_display_resume(struct drm_device *dev)
17800 {
17801         struct drm_i915_private *dev_priv = to_i915(dev);
17802         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17803         struct drm_modeset_acquire_ctx ctx;
17804         int ret;
17805
17806         dev_priv->modeset_restore_state = NULL;
17807         if (state)
17808                 state->acquire_ctx = &ctx;
17809
17810         drm_modeset_acquire_init(&ctx, 0);
17811
17812         while (1) {
17813                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17814                 if (ret != -EDEADLK)
17815                         break;
17816
17817                 drm_modeset_backoff(&ctx);
17818         }
17819
17820         if (!ret)
17821                 ret = __intel_display_resume(dev, state, &ctx);
17822
17823         intel_enable_ipc(dev_priv);
17824         drm_modeset_drop_locks(&ctx);
17825         drm_modeset_acquire_fini(&ctx);
17826
17827         if (ret)
17828                 DRM_ERROR("Restoring old state failed with %i\n", ret);
17829         if (state)
17830                 drm_atomic_state_put(state);
17831 }
17832
17833 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
17834 {
17835         struct intel_connector *connector;
17836         struct drm_connector_list_iter conn_iter;
17837
17838         /* Kill all the work that may have been queued by hpd. */
17839         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
17840         for_each_intel_connector_iter(connector, &conn_iter) {
17841                 if (connector->modeset_retry_work.func)
17842                         cancel_work_sync(&connector->modeset_retry_work);
17843                 if (connector->hdcp.shim) {
17844                         cancel_delayed_work_sync(&connector->hdcp.check_work);
17845                         cancel_work_sync(&connector->hdcp.prop_work);
17846                 }
17847         }
17848         drm_connector_list_iter_end(&conn_iter);
17849 }
17850
17851 void intel_modeset_driver_remove(struct drm_i915_private *i915)
17852 {
17853         flush_workqueue(i915->flip_wq);
17854         flush_workqueue(i915->modeset_wq);
17855
17856         flush_work(&i915->atomic_helper.free_work);
17857         WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
17858
17859         /*
17860          * Interrupts and polling as the first thing to avoid creating havoc.
17861          * Too much stuff here (turning of connectors, ...) would
17862          * experience fancy races otherwise.
17863          */
17864         intel_irq_uninstall(i915);
17865
17866         /*
17867          * Due to the hpd irq storm handling the hotplug work can re-arm the
17868          * poll handlers. Hence disable polling after hpd handling is shut down.
17869          */
17870         intel_hpd_poll_fini(i915);
17871
17872         /* poll work can call into fbdev, hence clean that up afterwards */
17873         intel_fbdev_fini(i915);
17874
17875         intel_unregister_dsm_handler();
17876
17877         intel_fbc_global_disable(i915);
17878
17879         /* flush any delayed tasks or pending work */
17880         flush_scheduled_work();
17881
17882         intel_hdcp_component_fini(i915);
17883
17884         drm_mode_config_cleanup(&i915->drm);
17885
17886         intel_overlay_cleanup(i915);
17887
17888         intel_gmbus_teardown(i915);
17889
17890         destroy_workqueue(i915->flip_wq);
17891         destroy_workqueue(i915->modeset_wq);
17892
17893         intel_fbc_cleanup_cfb(i915);
17894 }
17895
17896 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
17897
17898 struct intel_display_error_state {
17899
17900         u32 power_well_driver;
17901
17902         struct intel_cursor_error_state {
17903                 u32 control;
17904                 u32 position;
17905                 u32 base;
17906                 u32 size;
17907         } cursor[I915_MAX_PIPES];
17908
17909         struct intel_pipe_error_state {
17910                 bool power_domain_on;
17911                 u32 source;
17912                 u32 stat;
17913         } pipe[I915_MAX_PIPES];
17914
17915         struct intel_plane_error_state {
17916                 u32 control;
17917                 u32 stride;
17918                 u32 size;
17919                 u32 pos;
17920                 u32 addr;
17921                 u32 surface;
17922                 u32 tile_offset;
17923         } plane[I915_MAX_PIPES];
17924
17925         struct intel_transcoder_error_state {
17926                 bool available;
17927                 bool power_domain_on;
17928                 enum transcoder cpu_transcoder;
17929
17930                 u32 conf;
17931
17932                 u32 htotal;
17933                 u32 hblank;
17934                 u32 hsync;
17935                 u32 vtotal;
17936                 u32 vblank;
17937                 u32 vsync;
17938         } transcoder[5];
17939 };
17940
17941 struct intel_display_error_state *
17942 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
17943 {
17944         struct intel_display_error_state *error;
17945         int transcoders[] = {
17946                 TRANSCODER_A,
17947                 TRANSCODER_B,
17948                 TRANSCODER_C,
17949                 TRANSCODER_D,
17950                 TRANSCODER_EDP,
17951         };
17952         int i;
17953
17954         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
17955
17956         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
17957                 return NULL;
17958
17959         error = kzalloc(sizeof(*error), GFP_ATOMIC);
17960         if (error == NULL)
17961                 return NULL;
17962
17963         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17964                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
17965
17966         for_each_pipe(dev_priv, i) {
17967                 error->pipe[i].power_domain_on =
17968                         __intel_display_power_is_enabled(dev_priv,
17969                                                          POWER_DOMAIN_PIPE(i));
17970                 if (!error->pipe[i].power_domain_on)
17971                         continue;
17972
17973                 error->cursor[i].control = I915_READ(CURCNTR(i));
17974                 error->cursor[i].position = I915_READ(CURPOS(i));
17975                 error->cursor[i].base = I915_READ(CURBASE(i));
17976
17977                 error->plane[i].control = I915_READ(DSPCNTR(i));
17978                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
17979                 if (INTEL_GEN(dev_priv) <= 3) {
17980                         error->plane[i].size = I915_READ(DSPSIZE(i));
17981                         error->plane[i].pos = I915_READ(DSPPOS(i));
17982                 }
17983                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17984                         error->plane[i].addr = I915_READ(DSPADDR(i));
17985                 if (INTEL_GEN(dev_priv) >= 4) {
17986                         error->plane[i].surface = I915_READ(DSPSURF(i));
17987                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
17988                 }
17989
17990                 error->pipe[i].source = I915_READ(PIPESRC(i));
17991
17992                 if (HAS_GMCH(dev_priv))
17993                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
17994         }
17995
17996         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17997                 enum transcoder cpu_transcoder = transcoders[i];
17998
17999                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
18000                         continue;
18001
18002                 error->transcoder[i].available = true;
18003                 error->transcoder[i].power_domain_on =
18004                         __intel_display_power_is_enabled(dev_priv,
18005                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18006                 if (!error->transcoder[i].power_domain_on)
18007                         continue;
18008
18009                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18010
18011                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
18012                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
18013                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
18014                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
18015                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
18016                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
18017                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
18018         }
18019
18020         return error;
18021 }
18022
18023 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18024
18025 void
18026 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18027                                 struct intel_display_error_state *error)
18028 {
18029         struct drm_i915_private *dev_priv = m->i915;
18030         int i;
18031
18032         if (!error)
18033                 return;
18034
18035         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18036         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18037                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18038                            error->power_well_driver);
18039         for_each_pipe(dev_priv, i) {
18040                 err_printf(m, "Pipe [%d]:\n", i);
18041                 err_printf(m, "  Power: %s\n",
18042                            onoff(error->pipe[i].power_domain_on));
18043                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
18044                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
18045
18046                 err_printf(m, "Plane [%d]:\n", i);
18047                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
18048                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
18049                 if (INTEL_GEN(dev_priv) <= 3) {
18050                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
18051                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
18052                 }
18053                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18054                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
18055                 if (INTEL_GEN(dev_priv) >= 4) {
18056                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
18057                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
18058                 }
18059
18060                 err_printf(m, "Cursor [%d]:\n", i);
18061                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
18062                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
18063                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
18064         }
18065
18066         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18067                 if (!error->transcoder[i].available)
18068                         continue;
18069
18070                 err_printf(m, "CPU transcoder: %s\n",
18071                            transcoder_name(error->transcoder[i].cpu_transcoder));
18072                 err_printf(m, "  Power: %s\n",
18073                            onoff(error->transcoder[i].power_domain_on));
18074                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
18075                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
18076                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
18077                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
18078                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
18079                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
18080                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
18081         }
18082 }
18083
18084 #endif