]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/display/intel_display.c
drm/i915: Populate possible_crtcs correctly
[linux.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57
58 #include "i915_drv.h"
59 #include "i915_trace.h"
60 #include "intel_acpi.h"
61 #include "intel_atomic.h"
62 #include "intel_atomic_plane.h"
63 #include "intel_bw.h"
64 #include "intel_cdclk.h"
65 #include "intel_color.h"
66 #include "intel_display_types.h"
67 #include "intel_fbc.h"
68 #include "intel_fbdev.h"
69 #include "intel_fifo_underrun.h"
70 #include "intel_frontbuffer.h"
71 #include "intel_hdcp.h"
72 #include "intel_hotplug.h"
73 #include "intel_overlay.h"
74 #include "intel_pipe_crc.h"
75 #include "intel_pm.h"
76 #include "intel_psr.h"
77 #include "intel_quirks.h"
78 #include "intel_sideband.h"
79 #include "intel_sprite.h"
80 #include "intel_tc.h"
81 #include "intel_vga.h"
82
83 /* Primary plane formats for gen <= 3 */
84 static const u32 i8xx_primary_formats[] = {
85         DRM_FORMAT_C8,
86         DRM_FORMAT_RGB565,
87         DRM_FORMAT_XRGB1555,
88         DRM_FORMAT_XRGB8888,
89 };
90
91 /* Primary plane formats for gen >= 4 */
92 static const u32 i965_primary_formats[] = {
93         DRM_FORMAT_C8,
94         DRM_FORMAT_RGB565,
95         DRM_FORMAT_XRGB8888,
96         DRM_FORMAT_XBGR8888,
97         DRM_FORMAT_XRGB2101010,
98         DRM_FORMAT_XBGR2101010,
99 };
100
101 static const u64 i9xx_format_modifiers[] = {
102         I915_FORMAT_MOD_X_TILED,
103         DRM_FORMAT_MOD_LINEAR,
104         DRM_FORMAT_MOD_INVALID
105 };
106
107 /* Cursor formats */
108 static const u32 intel_cursor_formats[] = {
109         DRM_FORMAT_ARGB8888,
110 };
111
112 static const u64 cursor_format_modifiers[] = {
113         DRM_FORMAT_MOD_LINEAR,
114         DRM_FORMAT_MOD_INVALID
115 };
116
117 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
118                                 struct intel_crtc_state *pipe_config);
119 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
120                                    struct intel_crtc_state *pipe_config);
121
122 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
123                                   struct drm_i915_gem_object *obj,
124                                   struct drm_mode_fb_cmd2 *mode_cmd);
125 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
126 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
127 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
128                                          const struct intel_link_m_n *m_n,
129                                          const struct intel_link_m_n *m2_n2);
130 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
131 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
132 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
133 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
134 static void vlv_prepare_pll(struct intel_crtc *crtc,
135                             const struct intel_crtc_state *pipe_config);
136 static void chv_prepare_pll(struct intel_crtc *crtc,
137                             const struct intel_crtc_state *pipe_config);
138 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
139 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
140 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
141                                     struct intel_crtc_state *crtc_state);
142 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
143 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
144 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
145 static void intel_modeset_setup_hw_state(struct drm_device *dev,
146                                          struct drm_modeset_acquire_ctx *ctx);
147 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
148
149 struct intel_limit {
150         struct {
151                 int min, max;
152         } dot, vco, n, m, m1, m2, p, p1;
153
154         struct {
155                 int dot_limit;
156                 int p2_slow, p2_fast;
157         } p2;
158 };
159
160 /* returns HPLL frequency in kHz */
161 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
162 {
163         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
164
165         /* Obtain SKU information */
166         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
167                 CCK_FUSE_HPLL_FREQ_MASK;
168
169         return vco_freq[hpll_freq] * 1000;
170 }
171
172 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
173                       const char *name, u32 reg, int ref_freq)
174 {
175         u32 val;
176         int divider;
177
178         val = vlv_cck_read(dev_priv, reg);
179         divider = val & CCK_FREQUENCY_VALUES;
180
181         WARN((val & CCK_FREQUENCY_STATUS) !=
182              (divider << CCK_FREQUENCY_STATUS_SHIFT),
183              "%s change in progress\n", name);
184
185         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
186 }
187
188 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
189                            const char *name, u32 reg)
190 {
191         int hpll;
192
193         vlv_cck_get(dev_priv);
194
195         if (dev_priv->hpll_freq == 0)
196                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
197
198         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
199
200         vlv_cck_put(dev_priv);
201
202         return hpll;
203 }
204
205 static void intel_update_czclk(struct drm_i915_private *dev_priv)
206 {
207         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
208                 return;
209
210         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
211                                                       CCK_CZ_CLOCK_CONTROL);
212
213         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
214 }
215
216 static inline u32 /* units of 100MHz */
217 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
218                     const struct intel_crtc_state *pipe_config)
219 {
220         if (HAS_DDI(dev_priv))
221                 return pipe_config->port_clock; /* SPLL */
222         else
223                 return dev_priv->fdi_pll_freq;
224 }
225
226 static const struct intel_limit intel_limits_i8xx_dac = {
227         .dot = { .min = 25000, .max = 350000 },
228         .vco = { .min = 908000, .max = 1512000 },
229         .n = { .min = 2, .max = 16 },
230         .m = { .min = 96, .max = 140 },
231         .m1 = { .min = 18, .max = 26 },
232         .m2 = { .min = 6, .max = 16 },
233         .p = { .min = 4, .max = 128 },
234         .p1 = { .min = 2, .max = 33 },
235         .p2 = { .dot_limit = 165000,
236                 .p2_slow = 4, .p2_fast = 2 },
237 };
238
239 static const struct intel_limit intel_limits_i8xx_dvo = {
240         .dot = { .min = 25000, .max = 350000 },
241         .vco = { .min = 908000, .max = 1512000 },
242         .n = { .min = 2, .max = 16 },
243         .m = { .min = 96, .max = 140 },
244         .m1 = { .min = 18, .max = 26 },
245         .m2 = { .min = 6, .max = 16 },
246         .p = { .min = 4, .max = 128 },
247         .p1 = { .min = 2, .max = 33 },
248         .p2 = { .dot_limit = 165000,
249                 .p2_slow = 4, .p2_fast = 4 },
250 };
251
252 static const struct intel_limit intel_limits_i8xx_lvds = {
253         .dot = { .min = 25000, .max = 350000 },
254         .vco = { .min = 908000, .max = 1512000 },
255         .n = { .min = 2, .max = 16 },
256         .m = { .min = 96, .max = 140 },
257         .m1 = { .min = 18, .max = 26 },
258         .m2 = { .min = 6, .max = 16 },
259         .p = { .min = 4, .max = 128 },
260         .p1 = { .min = 1, .max = 6 },
261         .p2 = { .dot_limit = 165000,
262                 .p2_slow = 14, .p2_fast = 7 },
263 };
264
265 static const struct intel_limit intel_limits_i9xx_sdvo = {
266         .dot = { .min = 20000, .max = 400000 },
267         .vco = { .min = 1400000, .max = 2800000 },
268         .n = { .min = 1, .max = 6 },
269         .m = { .min = 70, .max = 120 },
270         .m1 = { .min = 8, .max = 18 },
271         .m2 = { .min = 3, .max = 7 },
272         .p = { .min = 5, .max = 80 },
273         .p1 = { .min = 1, .max = 8 },
274         .p2 = { .dot_limit = 200000,
275                 .p2_slow = 10, .p2_fast = 5 },
276 };
277
278 static const struct intel_limit intel_limits_i9xx_lvds = {
279         .dot = { .min = 20000, .max = 400000 },
280         .vco = { .min = 1400000, .max = 2800000 },
281         .n = { .min = 1, .max = 6 },
282         .m = { .min = 70, .max = 120 },
283         .m1 = { .min = 8, .max = 18 },
284         .m2 = { .min = 3, .max = 7 },
285         .p = { .min = 7, .max = 98 },
286         .p1 = { .min = 1, .max = 8 },
287         .p2 = { .dot_limit = 112000,
288                 .p2_slow = 14, .p2_fast = 7 },
289 };
290
291
292 static const struct intel_limit intel_limits_g4x_sdvo = {
293         .dot = { .min = 25000, .max = 270000 },
294         .vco = { .min = 1750000, .max = 3500000},
295         .n = { .min = 1, .max = 4 },
296         .m = { .min = 104, .max = 138 },
297         .m1 = { .min = 17, .max = 23 },
298         .m2 = { .min = 5, .max = 11 },
299         .p = { .min = 10, .max = 30 },
300         .p1 = { .min = 1, .max = 3},
301         .p2 = { .dot_limit = 270000,
302                 .p2_slow = 10,
303                 .p2_fast = 10
304         },
305 };
306
307 static const struct intel_limit intel_limits_g4x_hdmi = {
308         .dot = { .min = 22000, .max = 400000 },
309         .vco = { .min = 1750000, .max = 3500000},
310         .n = { .min = 1, .max = 4 },
311         .m = { .min = 104, .max = 138 },
312         .m1 = { .min = 16, .max = 23 },
313         .m2 = { .min = 5, .max = 11 },
314         .p = { .min = 5, .max = 80 },
315         .p1 = { .min = 1, .max = 8},
316         .p2 = { .dot_limit = 165000,
317                 .p2_slow = 10, .p2_fast = 5 },
318 };
319
320 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
321         .dot = { .min = 20000, .max = 115000 },
322         .vco = { .min = 1750000, .max = 3500000 },
323         .n = { .min = 1, .max = 3 },
324         .m = { .min = 104, .max = 138 },
325         .m1 = { .min = 17, .max = 23 },
326         .m2 = { .min = 5, .max = 11 },
327         .p = { .min = 28, .max = 112 },
328         .p1 = { .min = 2, .max = 8 },
329         .p2 = { .dot_limit = 0,
330                 .p2_slow = 14, .p2_fast = 14
331         },
332 };
333
334 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
335         .dot = { .min = 80000, .max = 224000 },
336         .vco = { .min = 1750000, .max = 3500000 },
337         .n = { .min = 1, .max = 3 },
338         .m = { .min = 104, .max = 138 },
339         .m1 = { .min = 17, .max = 23 },
340         .m2 = { .min = 5, .max = 11 },
341         .p = { .min = 14, .max = 42 },
342         .p1 = { .min = 2, .max = 6 },
343         .p2 = { .dot_limit = 0,
344                 .p2_slow = 7, .p2_fast = 7
345         },
346 };
347
348 static const struct intel_limit intel_limits_pineview_sdvo = {
349         .dot = { .min = 20000, .max = 400000},
350         .vco = { .min = 1700000, .max = 3500000 },
351         /* Pineview's Ncounter is a ring counter */
352         .n = { .min = 3, .max = 6 },
353         .m = { .min = 2, .max = 256 },
354         /* Pineview only has one combined m divider, which we treat as m2. */
355         .m1 = { .min = 0, .max = 0 },
356         .m2 = { .min = 0, .max = 254 },
357         .p = { .min = 5, .max = 80 },
358         .p1 = { .min = 1, .max = 8 },
359         .p2 = { .dot_limit = 200000,
360                 .p2_slow = 10, .p2_fast = 5 },
361 };
362
363 static const struct intel_limit intel_limits_pineview_lvds = {
364         .dot = { .min = 20000, .max = 400000 },
365         .vco = { .min = 1700000, .max = 3500000 },
366         .n = { .min = 3, .max = 6 },
367         .m = { .min = 2, .max = 256 },
368         .m1 = { .min = 0, .max = 0 },
369         .m2 = { .min = 0, .max = 254 },
370         .p = { .min = 7, .max = 112 },
371         .p1 = { .min = 1, .max = 8 },
372         .p2 = { .dot_limit = 112000,
373                 .p2_slow = 14, .p2_fast = 14 },
374 };
375
376 /* Ironlake / Sandybridge
377  *
378  * We calculate clock using (register_value + 2) for N/M1/M2, so here
379  * the range value for them is (actual_value - 2).
380  */
381 static const struct intel_limit intel_limits_ironlake_dac = {
382         .dot = { .min = 25000, .max = 350000 },
383         .vco = { .min = 1760000, .max = 3510000 },
384         .n = { .min = 1, .max = 5 },
385         .m = { .min = 79, .max = 127 },
386         .m1 = { .min = 12, .max = 22 },
387         .m2 = { .min = 5, .max = 9 },
388         .p = { .min = 5, .max = 80 },
389         .p1 = { .min = 1, .max = 8 },
390         .p2 = { .dot_limit = 225000,
391                 .p2_slow = 10, .p2_fast = 5 },
392 };
393
394 static const struct intel_limit intel_limits_ironlake_single_lvds = {
395         .dot = { .min = 25000, .max = 350000 },
396         .vco = { .min = 1760000, .max = 3510000 },
397         .n = { .min = 1, .max = 3 },
398         .m = { .min = 79, .max = 118 },
399         .m1 = { .min = 12, .max = 22 },
400         .m2 = { .min = 5, .max = 9 },
401         .p = { .min = 28, .max = 112 },
402         .p1 = { .min = 2, .max = 8 },
403         .p2 = { .dot_limit = 225000,
404                 .p2_slow = 14, .p2_fast = 14 },
405 };
406
407 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
408         .dot = { .min = 25000, .max = 350000 },
409         .vco = { .min = 1760000, .max = 3510000 },
410         .n = { .min = 1, .max = 3 },
411         .m = { .min = 79, .max = 127 },
412         .m1 = { .min = 12, .max = 22 },
413         .m2 = { .min = 5, .max = 9 },
414         .p = { .min = 14, .max = 56 },
415         .p1 = { .min = 2, .max = 8 },
416         .p2 = { .dot_limit = 225000,
417                 .p2_slow = 7, .p2_fast = 7 },
418 };
419
420 /* LVDS 100mhz refclk limits. */
421 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
422         .dot = { .min = 25000, .max = 350000 },
423         .vco = { .min = 1760000, .max = 3510000 },
424         .n = { .min = 1, .max = 2 },
425         .m = { .min = 79, .max = 126 },
426         .m1 = { .min = 12, .max = 22 },
427         .m2 = { .min = 5, .max = 9 },
428         .p = { .min = 28, .max = 112 },
429         .p1 = { .min = 2, .max = 8 },
430         .p2 = { .dot_limit = 225000,
431                 .p2_slow = 14, .p2_fast = 14 },
432 };
433
434 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
435         .dot = { .min = 25000, .max = 350000 },
436         .vco = { .min = 1760000, .max = 3510000 },
437         .n = { .min = 1, .max = 3 },
438         .m = { .min = 79, .max = 126 },
439         .m1 = { .min = 12, .max = 22 },
440         .m2 = { .min = 5, .max = 9 },
441         .p = { .min = 14, .max = 42 },
442         .p1 = { .min = 2, .max = 6 },
443         .p2 = { .dot_limit = 225000,
444                 .p2_slow = 7, .p2_fast = 7 },
445 };
446
447 static const struct intel_limit intel_limits_vlv = {
448          /*
449           * These are the data rate limits (measured in fast clocks)
450           * since those are the strictest limits we have. The fast
451           * clock and actual rate limits are more relaxed, so checking
452           * them would make no difference.
453           */
454         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
455         .vco = { .min = 4000000, .max = 6000000 },
456         .n = { .min = 1, .max = 7 },
457         .m1 = { .min = 2, .max = 3 },
458         .m2 = { .min = 11, .max = 156 },
459         .p1 = { .min = 2, .max = 3 },
460         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
461 };
462
463 static const struct intel_limit intel_limits_chv = {
464         /*
465          * These are the data rate limits (measured in fast clocks)
466          * since those are the strictest limits we have.  The fast
467          * clock and actual rate limits are more relaxed, so checking
468          * them would make no difference.
469          */
470         .dot = { .min = 25000 * 5, .max = 540000 * 5},
471         .vco = { .min = 4800000, .max = 6480000 },
472         .n = { .min = 1, .max = 1 },
473         .m1 = { .min = 2, .max = 2 },
474         .m2 = { .min = 24 << 22, .max = 175 << 22 },
475         .p1 = { .min = 2, .max = 4 },
476         .p2 = { .p2_slow = 1, .p2_fast = 14 },
477 };
478
479 static const struct intel_limit intel_limits_bxt = {
480         /* FIXME: find real dot limits */
481         .dot = { .min = 0, .max = INT_MAX },
482         .vco = { .min = 4800000, .max = 6700000 },
483         .n = { .min = 1, .max = 1 },
484         .m1 = { .min = 2, .max = 2 },
485         /* FIXME: find real m2 limits */
486         .m2 = { .min = 2 << 22, .max = 255 << 22 },
487         .p1 = { .min = 2, .max = 4 },
488         .p2 = { .p2_slow = 1, .p2_fast = 20 },
489 };
490
491 /* WA Display #0827: Gen9:all */
492 static void
493 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
494 {
495         if (enable)
496                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
497                            I915_READ(CLKGATE_DIS_PSL(pipe)) |
498                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
499         else
500                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
501                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
502                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
503 }
504
505 /* Wa_2006604312:icl */
506 static void
507 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
508                        bool enable)
509 {
510         if (enable)
511                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
512                            I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
513         else
514                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
515                            I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
516 }
517
518 static bool
519 needs_modeset(const struct intel_crtc_state *state)
520 {
521         return drm_atomic_crtc_needs_modeset(&state->base);
522 }
523
524 /*
525  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
526  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
527  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
528  * The helpers' return value is the rate of the clock that is fed to the
529  * display engine's pipe which can be the above fast dot clock rate or a
530  * divided-down version of it.
531  */
532 /* m1 is reserved as 0 in Pineview, n is a ring counter */
533 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
534 {
535         clock->m = clock->m2 + 2;
536         clock->p = clock->p1 * clock->p2;
537         if (WARN_ON(clock->n == 0 || clock->p == 0))
538                 return 0;
539         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
540         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
541
542         return clock->dot;
543 }
544
545 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
546 {
547         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
548 }
549
550 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
551 {
552         clock->m = i9xx_dpll_compute_m(clock);
553         clock->p = clock->p1 * clock->p2;
554         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
555                 return 0;
556         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
557         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
558
559         return clock->dot;
560 }
561
562 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
563 {
564         clock->m = clock->m1 * clock->m2;
565         clock->p = clock->p1 * clock->p2;
566         if (WARN_ON(clock->n == 0 || clock->p == 0))
567                 return 0;
568         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
569         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
570
571         return clock->dot / 5;
572 }
573
574 int chv_calc_dpll_params(int refclk, struct dpll *clock)
575 {
576         clock->m = clock->m1 * clock->m2;
577         clock->p = clock->p1 * clock->p2;
578         if (WARN_ON(clock->n == 0 || clock->p == 0))
579                 return 0;
580         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
581                                            clock->n << 22);
582         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
583
584         return clock->dot / 5;
585 }
586
587 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
588
589 /*
590  * Returns whether the given set of divisors are valid for a given refclk with
591  * the given connectors.
592  */
593 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
594                                const struct intel_limit *limit,
595                                const struct dpll *clock)
596 {
597         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
598                 INTELPllInvalid("n out of range\n");
599         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
600                 INTELPllInvalid("p1 out of range\n");
601         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
602                 INTELPllInvalid("m2 out of range\n");
603         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
604                 INTELPllInvalid("m1 out of range\n");
605
606         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
607             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
608                 if (clock->m1 <= clock->m2)
609                         INTELPllInvalid("m1 <= m2\n");
610
611         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
612             !IS_GEN9_LP(dev_priv)) {
613                 if (clock->p < limit->p.min || limit->p.max < clock->p)
614                         INTELPllInvalid("p out of range\n");
615                 if (clock->m < limit->m.min || limit->m.max < clock->m)
616                         INTELPllInvalid("m out of range\n");
617         }
618
619         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
620                 INTELPllInvalid("vco out of range\n");
621         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
622          * connector, etc., rather than just a single range.
623          */
624         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
625                 INTELPllInvalid("dot out of range\n");
626
627         return true;
628 }
629
630 static int
631 i9xx_select_p2_div(const struct intel_limit *limit,
632                    const struct intel_crtc_state *crtc_state,
633                    int target)
634 {
635         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
636
637         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
638                 /*
639                  * For LVDS just rely on its current settings for dual-channel.
640                  * We haven't figured out how to reliably set up different
641                  * single/dual channel state, if we even can.
642                  */
643                 if (intel_is_dual_link_lvds(dev_priv))
644                         return limit->p2.p2_fast;
645                 else
646                         return limit->p2.p2_slow;
647         } else {
648                 if (target < limit->p2.dot_limit)
649                         return limit->p2.p2_slow;
650                 else
651                         return limit->p2.p2_fast;
652         }
653 }
654
655 /*
656  * Returns a set of divisors for the desired target clock with the given
657  * refclk, or FALSE.  The returned values represent the clock equation:
658  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
659  *
660  * Target and reference clocks are specified in kHz.
661  *
662  * If match_clock is provided, then best_clock P divider must match the P
663  * divider from @match_clock used for LVDS downclocking.
664  */
665 static bool
666 i9xx_find_best_dpll(const struct intel_limit *limit,
667                     struct intel_crtc_state *crtc_state,
668                     int target, int refclk, struct dpll *match_clock,
669                     struct dpll *best_clock)
670 {
671         struct drm_device *dev = crtc_state->base.crtc->dev;
672         struct dpll clock;
673         int err = target;
674
675         memset(best_clock, 0, sizeof(*best_clock));
676
677         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
678
679         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
680              clock.m1++) {
681                 for (clock.m2 = limit->m2.min;
682                      clock.m2 <= limit->m2.max; clock.m2++) {
683                         if (clock.m2 >= clock.m1)
684                                 break;
685                         for (clock.n = limit->n.min;
686                              clock.n <= limit->n.max; clock.n++) {
687                                 for (clock.p1 = limit->p1.min;
688                                         clock.p1 <= limit->p1.max; clock.p1++) {
689                                         int this_err;
690
691                                         i9xx_calc_dpll_params(refclk, &clock);
692                                         if (!intel_PLL_is_valid(to_i915(dev),
693                                                                 limit,
694                                                                 &clock))
695                                                 continue;
696                                         if (match_clock &&
697                                             clock.p != match_clock->p)
698                                                 continue;
699
700                                         this_err = abs(clock.dot - target);
701                                         if (this_err < err) {
702                                                 *best_clock = clock;
703                                                 err = this_err;
704                                         }
705                                 }
706                         }
707                 }
708         }
709
710         return (err != target);
711 }
712
713 /*
714  * Returns a set of divisors for the desired target clock with the given
715  * refclk, or FALSE.  The returned values represent the clock equation:
716  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
717  *
718  * Target and reference clocks are specified in kHz.
719  *
720  * If match_clock is provided, then best_clock P divider must match the P
721  * divider from @match_clock used for LVDS downclocking.
722  */
723 static bool
724 pnv_find_best_dpll(const struct intel_limit *limit,
725                    struct intel_crtc_state *crtc_state,
726                    int target, int refclk, struct dpll *match_clock,
727                    struct dpll *best_clock)
728 {
729         struct drm_device *dev = crtc_state->base.crtc->dev;
730         struct dpll clock;
731         int err = target;
732
733         memset(best_clock, 0, sizeof(*best_clock));
734
735         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
736
737         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
738              clock.m1++) {
739                 for (clock.m2 = limit->m2.min;
740                      clock.m2 <= limit->m2.max; clock.m2++) {
741                         for (clock.n = limit->n.min;
742                              clock.n <= limit->n.max; clock.n++) {
743                                 for (clock.p1 = limit->p1.min;
744                                         clock.p1 <= limit->p1.max; clock.p1++) {
745                                         int this_err;
746
747                                         pnv_calc_dpll_params(refclk, &clock);
748                                         if (!intel_PLL_is_valid(to_i915(dev),
749                                                                 limit,
750                                                                 &clock))
751                                                 continue;
752                                         if (match_clock &&
753                                             clock.p != match_clock->p)
754                                                 continue;
755
756                                         this_err = abs(clock.dot - target);
757                                         if (this_err < err) {
758                                                 *best_clock = clock;
759                                                 err = this_err;
760                                         }
761                                 }
762                         }
763                 }
764         }
765
766         return (err != target);
767 }
768
769 /*
770  * Returns a set of divisors for the desired target clock with the given
771  * refclk, or FALSE.  The returned values represent the clock equation:
772  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
773  *
774  * Target and reference clocks are specified in kHz.
775  *
776  * If match_clock is provided, then best_clock P divider must match the P
777  * divider from @match_clock used for LVDS downclocking.
778  */
779 static bool
780 g4x_find_best_dpll(const struct intel_limit *limit,
781                    struct intel_crtc_state *crtc_state,
782                    int target, int refclk, struct dpll *match_clock,
783                    struct dpll *best_clock)
784 {
785         struct drm_device *dev = crtc_state->base.crtc->dev;
786         struct dpll clock;
787         int max_n;
788         bool found = false;
789         /* approximately equals target * 0.00585 */
790         int err_most = (target >> 8) + (target >> 9);
791
792         memset(best_clock, 0, sizeof(*best_clock));
793
794         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
795
796         max_n = limit->n.max;
797         /* based on hardware requirement, prefer smaller n to precision */
798         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
799                 /* based on hardware requirement, prefere larger m1,m2 */
800                 for (clock.m1 = limit->m1.max;
801                      clock.m1 >= limit->m1.min; clock.m1--) {
802                         for (clock.m2 = limit->m2.max;
803                              clock.m2 >= limit->m2.min; clock.m2--) {
804                                 for (clock.p1 = limit->p1.max;
805                                      clock.p1 >= limit->p1.min; clock.p1--) {
806                                         int this_err;
807
808                                         i9xx_calc_dpll_params(refclk, &clock);
809                                         if (!intel_PLL_is_valid(to_i915(dev),
810                                                                 limit,
811                                                                 &clock))
812                                                 continue;
813
814                                         this_err = abs(clock.dot - target);
815                                         if (this_err < err_most) {
816                                                 *best_clock = clock;
817                                                 err_most = this_err;
818                                                 max_n = clock.n;
819                                                 found = true;
820                                         }
821                                 }
822                         }
823                 }
824         }
825         return found;
826 }
827
828 /*
829  * Check if the calculated PLL configuration is more optimal compared to the
830  * best configuration and error found so far. Return the calculated error.
831  */
832 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
833                                const struct dpll *calculated_clock,
834                                const struct dpll *best_clock,
835                                unsigned int best_error_ppm,
836                                unsigned int *error_ppm)
837 {
838         /*
839          * For CHV ignore the error and consider only the P value.
840          * Prefer a bigger P value based on HW requirements.
841          */
842         if (IS_CHERRYVIEW(to_i915(dev))) {
843                 *error_ppm = 0;
844
845                 return calculated_clock->p > best_clock->p;
846         }
847
848         if (WARN_ON_ONCE(!target_freq))
849                 return false;
850
851         *error_ppm = div_u64(1000000ULL *
852                                 abs(target_freq - calculated_clock->dot),
853                              target_freq);
854         /*
855          * Prefer a better P value over a better (smaller) error if the error
856          * is small. Ensure this preference for future configurations too by
857          * setting the error to 0.
858          */
859         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
860                 *error_ppm = 0;
861
862                 return true;
863         }
864
865         return *error_ppm + 10 < best_error_ppm;
866 }
867
868 /*
869  * Returns a set of divisors for the desired target clock with the given
870  * refclk, or FALSE.  The returned values represent the clock equation:
871  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
872  */
873 static bool
874 vlv_find_best_dpll(const struct intel_limit *limit,
875                    struct intel_crtc_state *crtc_state,
876                    int target, int refclk, struct dpll *match_clock,
877                    struct dpll *best_clock)
878 {
879         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
880         struct drm_device *dev = crtc->base.dev;
881         struct dpll clock;
882         unsigned int bestppm = 1000000;
883         /* min update 19.2 MHz */
884         int max_n = min(limit->n.max, refclk / 19200);
885         bool found = false;
886
887         target *= 5; /* fast clock */
888
889         memset(best_clock, 0, sizeof(*best_clock));
890
891         /* based on hardware requirement, prefer smaller n to precision */
892         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
893                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
894                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
895                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
896                                 clock.p = clock.p1 * clock.p2;
897                                 /* based on hardware requirement, prefer bigger m1,m2 values */
898                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
899                                         unsigned int ppm;
900
901                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
902                                                                      refclk * clock.m1);
903
904                                         vlv_calc_dpll_params(refclk, &clock);
905
906                                         if (!intel_PLL_is_valid(to_i915(dev),
907                                                                 limit,
908                                                                 &clock))
909                                                 continue;
910
911                                         if (!vlv_PLL_is_optimal(dev, target,
912                                                                 &clock,
913                                                                 best_clock,
914                                                                 bestppm, &ppm))
915                                                 continue;
916
917                                         *best_clock = clock;
918                                         bestppm = ppm;
919                                         found = true;
920                                 }
921                         }
922                 }
923         }
924
925         return found;
926 }
927
928 /*
929  * Returns a set of divisors for the desired target clock with the given
930  * refclk, or FALSE.  The returned values represent the clock equation:
931  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
932  */
933 static bool
934 chv_find_best_dpll(const struct intel_limit *limit,
935                    struct intel_crtc_state *crtc_state,
936                    int target, int refclk, struct dpll *match_clock,
937                    struct dpll *best_clock)
938 {
939         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
940         struct drm_device *dev = crtc->base.dev;
941         unsigned int best_error_ppm;
942         struct dpll clock;
943         u64 m2;
944         int found = false;
945
946         memset(best_clock, 0, sizeof(*best_clock));
947         best_error_ppm = 1000000;
948
949         /*
950          * Based on hardware doc, the n always set to 1, and m1 always
951          * set to 2.  If requires to support 200Mhz refclk, we need to
952          * revisit this because n may not 1 anymore.
953          */
954         clock.n = 1, clock.m1 = 2;
955         target *= 5;    /* fast clock */
956
957         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
958                 for (clock.p2 = limit->p2.p2_fast;
959                                 clock.p2 >= limit->p2.p2_slow;
960                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
961                         unsigned int error_ppm;
962
963                         clock.p = clock.p1 * clock.p2;
964
965                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
966                                                    refclk * clock.m1);
967
968                         if (m2 > INT_MAX/clock.m1)
969                                 continue;
970
971                         clock.m2 = m2;
972
973                         chv_calc_dpll_params(refclk, &clock);
974
975                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
976                                 continue;
977
978                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
979                                                 best_error_ppm, &error_ppm))
980                                 continue;
981
982                         *best_clock = clock;
983                         best_error_ppm = error_ppm;
984                         found = true;
985                 }
986         }
987
988         return found;
989 }
990
991 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
992                         struct dpll *best_clock)
993 {
994         int refclk = 100000;
995         const struct intel_limit *limit = &intel_limits_bxt;
996
997         return chv_find_best_dpll(limit, crtc_state,
998                                   crtc_state->port_clock, refclk,
999                                   NULL, best_clock);
1000 }
1001
1002 bool intel_crtc_active(struct intel_crtc *crtc)
1003 {
1004         /* Be paranoid as we can arrive here with only partial
1005          * state retrieved from the hardware during setup.
1006          *
1007          * We can ditch the adjusted_mode.crtc_clock check as soon
1008          * as Haswell has gained clock readout/fastboot support.
1009          *
1010          * We can ditch the crtc->primary->state->fb check as soon as we can
1011          * properly reconstruct framebuffers.
1012          *
1013          * FIXME: The intel_crtc->active here should be switched to
1014          * crtc->state->active once we have proper CRTC states wired up
1015          * for atomic.
1016          */
1017         return crtc->active && crtc->base.primary->state->fb &&
1018                 crtc->config->base.adjusted_mode.crtc_clock;
1019 }
1020
1021 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1022                                              enum pipe pipe)
1023 {
1024         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1025
1026         return crtc->config->cpu_transcoder;
1027 }
1028
1029 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1030                                     enum pipe pipe)
1031 {
1032         i915_reg_t reg = PIPEDSL(pipe);
1033         u32 line1, line2;
1034         u32 line_mask;
1035
1036         if (IS_GEN(dev_priv, 2))
1037                 line_mask = DSL_LINEMASK_GEN2;
1038         else
1039                 line_mask = DSL_LINEMASK_GEN3;
1040
1041         line1 = I915_READ(reg) & line_mask;
1042         msleep(5);
1043         line2 = I915_READ(reg) & line_mask;
1044
1045         return line1 != line2;
1046 }
1047
1048 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1049 {
1050         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1051         enum pipe pipe = crtc->pipe;
1052
1053         /* Wait for the display line to settle/start moving */
1054         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1055                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1056                           pipe_name(pipe), onoff(state));
1057 }
1058
1059 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1060 {
1061         wait_for_pipe_scanline_moving(crtc, false);
1062 }
1063
1064 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1065 {
1066         wait_for_pipe_scanline_moving(crtc, true);
1067 }
1068
1069 static void
1070 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1071 {
1072         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1073         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1074
1075         if (INTEL_GEN(dev_priv) >= 4) {
1076                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1077                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1078
1079                 /* Wait for the Pipe State to go off */
1080                 if (intel_de_wait_for_clear(dev_priv, reg,
1081                                             I965_PIPECONF_ACTIVE, 100))
1082                         WARN(1, "pipe_off wait timed out\n");
1083         } else {
1084                 intel_wait_for_pipe_scanline_stopped(crtc);
1085         }
1086 }
1087
1088 /* Only for pre-ILK configs */
1089 void assert_pll(struct drm_i915_private *dev_priv,
1090                 enum pipe pipe, bool state)
1091 {
1092         u32 val;
1093         bool cur_state;
1094
1095         val = I915_READ(DPLL(pipe));
1096         cur_state = !!(val & DPLL_VCO_ENABLE);
1097         I915_STATE_WARN(cur_state != state,
1098              "PLL state assertion failure (expected %s, current %s)\n",
1099                         onoff(state), onoff(cur_state));
1100 }
1101
1102 /* XXX: the dsi pll is shared between MIPI DSI ports */
1103 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1104 {
1105         u32 val;
1106         bool cur_state;
1107
1108         vlv_cck_get(dev_priv);
1109         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1110         vlv_cck_put(dev_priv);
1111
1112         cur_state = val & DSI_PLL_VCO_EN;
1113         I915_STATE_WARN(cur_state != state,
1114              "DSI PLL state assertion failure (expected %s, current %s)\n",
1115                         onoff(state), onoff(cur_state));
1116 }
1117
1118 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1119                           enum pipe pipe, bool state)
1120 {
1121         bool cur_state;
1122         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1123                                                                       pipe);
1124
1125         if (HAS_DDI(dev_priv)) {
1126                 /* DDI does not have a specific FDI_TX register */
1127                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1128                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1129         } else {
1130                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1131                 cur_state = !!(val & FDI_TX_ENABLE);
1132         }
1133         I915_STATE_WARN(cur_state != state,
1134              "FDI TX state assertion failure (expected %s, current %s)\n",
1135                         onoff(state), onoff(cur_state));
1136 }
1137 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1138 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1139
1140 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1141                           enum pipe pipe, bool state)
1142 {
1143         u32 val;
1144         bool cur_state;
1145
1146         val = I915_READ(FDI_RX_CTL(pipe));
1147         cur_state = !!(val & FDI_RX_ENABLE);
1148         I915_STATE_WARN(cur_state != state,
1149              "FDI RX state assertion failure (expected %s, current %s)\n",
1150                         onoff(state), onoff(cur_state));
1151 }
1152 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1153 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1154
1155 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1156                                       enum pipe pipe)
1157 {
1158         u32 val;
1159
1160         /* ILK FDI PLL is always enabled */
1161         if (IS_GEN(dev_priv, 5))
1162                 return;
1163
1164         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1165         if (HAS_DDI(dev_priv))
1166                 return;
1167
1168         val = I915_READ(FDI_TX_CTL(pipe));
1169         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1170 }
1171
1172 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1173                        enum pipe pipe, bool state)
1174 {
1175         u32 val;
1176         bool cur_state;
1177
1178         val = I915_READ(FDI_RX_CTL(pipe));
1179         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1180         I915_STATE_WARN(cur_state != state,
1181              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1182                         onoff(state), onoff(cur_state));
1183 }
1184
1185 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1186 {
1187         i915_reg_t pp_reg;
1188         u32 val;
1189         enum pipe panel_pipe = INVALID_PIPE;
1190         bool locked = true;
1191
1192         if (WARN_ON(HAS_DDI(dev_priv)))
1193                 return;
1194
1195         if (HAS_PCH_SPLIT(dev_priv)) {
1196                 u32 port_sel;
1197
1198                 pp_reg = PP_CONTROL(0);
1199                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1200
1201                 switch (port_sel) {
1202                 case PANEL_PORT_SELECT_LVDS:
1203                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1204                         break;
1205                 case PANEL_PORT_SELECT_DPA:
1206                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1207                         break;
1208                 case PANEL_PORT_SELECT_DPC:
1209                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1210                         break;
1211                 case PANEL_PORT_SELECT_DPD:
1212                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1213                         break;
1214                 default:
1215                         MISSING_CASE(port_sel);
1216                         break;
1217                 }
1218         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1219                 /* presumably write lock depends on pipe, not port select */
1220                 pp_reg = PP_CONTROL(pipe);
1221                 panel_pipe = pipe;
1222         } else {
1223                 u32 port_sel;
1224
1225                 pp_reg = PP_CONTROL(0);
1226                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1227
1228                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1229                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1230         }
1231
1232         val = I915_READ(pp_reg);
1233         if (!(val & PANEL_POWER_ON) ||
1234             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1235                 locked = false;
1236
1237         I915_STATE_WARN(panel_pipe == pipe && locked,
1238              "panel assertion failure, pipe %c regs locked\n",
1239              pipe_name(pipe));
1240 }
1241
1242 void assert_pipe(struct drm_i915_private *dev_priv,
1243                  enum pipe pipe, bool state)
1244 {
1245         bool cur_state;
1246         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1247                                                                       pipe);
1248         enum intel_display_power_domain power_domain;
1249         intel_wakeref_t wakeref;
1250
1251         /* we keep both pipes enabled on 830 */
1252         if (IS_I830(dev_priv))
1253                 state = true;
1254
1255         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1256         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1257         if (wakeref) {
1258                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1259                 cur_state = !!(val & PIPECONF_ENABLE);
1260
1261                 intel_display_power_put(dev_priv, power_domain, wakeref);
1262         } else {
1263                 cur_state = false;
1264         }
1265
1266         I915_STATE_WARN(cur_state != state,
1267              "pipe %c assertion failure (expected %s, current %s)\n",
1268                         pipe_name(pipe), onoff(state), onoff(cur_state));
1269 }
1270
1271 static void assert_plane(struct intel_plane *plane, bool state)
1272 {
1273         enum pipe pipe;
1274         bool cur_state;
1275
1276         cur_state = plane->get_hw_state(plane, &pipe);
1277
1278         I915_STATE_WARN(cur_state != state,
1279                         "%s assertion failure (expected %s, current %s)\n",
1280                         plane->base.name, onoff(state), onoff(cur_state));
1281 }
1282
1283 #define assert_plane_enabled(p) assert_plane(p, true)
1284 #define assert_plane_disabled(p) assert_plane(p, false)
1285
1286 static void assert_planes_disabled(struct intel_crtc *crtc)
1287 {
1288         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1289         struct intel_plane *plane;
1290
1291         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1292                 assert_plane_disabled(plane);
1293 }
1294
1295 static void assert_vblank_disabled(struct drm_crtc *crtc)
1296 {
1297         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1298                 drm_crtc_vblank_put(crtc);
1299 }
1300
1301 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1302                                     enum pipe pipe)
1303 {
1304         u32 val;
1305         bool enabled;
1306
1307         val = I915_READ(PCH_TRANSCONF(pipe));
1308         enabled = !!(val & TRANS_ENABLE);
1309         I915_STATE_WARN(enabled,
1310              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1311              pipe_name(pipe));
1312 }
1313
1314 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1315                                    enum pipe pipe, enum port port,
1316                                    i915_reg_t dp_reg)
1317 {
1318         enum pipe port_pipe;
1319         bool state;
1320
1321         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1322
1323         I915_STATE_WARN(state && port_pipe == pipe,
1324                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1325                         port_name(port), pipe_name(pipe));
1326
1327         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1328                         "IBX PCH DP %c still using transcoder B\n",
1329                         port_name(port));
1330 }
1331
1332 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1333                                      enum pipe pipe, enum port port,
1334                                      i915_reg_t hdmi_reg)
1335 {
1336         enum pipe port_pipe;
1337         bool state;
1338
1339         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1340
1341         I915_STATE_WARN(state && port_pipe == pipe,
1342                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1343                         port_name(port), pipe_name(pipe));
1344
1345         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1346                         "IBX PCH HDMI %c still using transcoder B\n",
1347                         port_name(port));
1348 }
1349
1350 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1351                                       enum pipe pipe)
1352 {
1353         enum pipe port_pipe;
1354
1355         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1356         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1357         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1358
1359         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1360                         port_pipe == pipe,
1361                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1362                         pipe_name(pipe));
1363
1364         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1365                         port_pipe == pipe,
1366                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1367                         pipe_name(pipe));
1368
1369         /* PCH SDVOB multiplex with HDMIB */
1370         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1371         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1372         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1373 }
1374
1375 static void _vlv_enable_pll(struct intel_crtc *crtc,
1376                             const struct intel_crtc_state *pipe_config)
1377 {
1378         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1379         enum pipe pipe = crtc->pipe;
1380
1381         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1382         POSTING_READ(DPLL(pipe));
1383         udelay(150);
1384
1385         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1386                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1387 }
1388
1389 static void vlv_enable_pll(struct intel_crtc *crtc,
1390                            const struct intel_crtc_state *pipe_config)
1391 {
1392         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1393         enum pipe pipe = crtc->pipe;
1394
1395         assert_pipe_disabled(dev_priv, pipe);
1396
1397         /* PLL is protected by panel, make sure we can write it */
1398         assert_panel_unlocked(dev_priv, pipe);
1399
1400         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1401                 _vlv_enable_pll(crtc, pipe_config);
1402
1403         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1404         POSTING_READ(DPLL_MD(pipe));
1405 }
1406
1407
1408 static void _chv_enable_pll(struct intel_crtc *crtc,
1409                             const struct intel_crtc_state *pipe_config)
1410 {
1411         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1412         enum pipe pipe = crtc->pipe;
1413         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1414         u32 tmp;
1415
1416         vlv_dpio_get(dev_priv);
1417
1418         /* Enable back the 10bit clock to display controller */
1419         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1420         tmp |= DPIO_DCLKP_EN;
1421         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1422
1423         vlv_dpio_put(dev_priv);
1424
1425         /*
1426          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1427          */
1428         udelay(1);
1429
1430         /* Enable PLL */
1431         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1432
1433         /* Check PLL is locked */
1434         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1435                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1436 }
1437
1438 static void chv_enable_pll(struct intel_crtc *crtc,
1439                            const struct intel_crtc_state *pipe_config)
1440 {
1441         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1442         enum pipe pipe = crtc->pipe;
1443
1444         assert_pipe_disabled(dev_priv, pipe);
1445
1446         /* PLL is protected by panel, make sure we can write it */
1447         assert_panel_unlocked(dev_priv, pipe);
1448
1449         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1450                 _chv_enable_pll(crtc, pipe_config);
1451
1452         if (pipe != PIPE_A) {
1453                 /*
1454                  * WaPixelRepeatModeFixForC0:chv
1455                  *
1456                  * DPLLCMD is AWOL. Use chicken bits to propagate
1457                  * the value from DPLLBMD to either pipe B or C.
1458                  */
1459                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1460                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1461                 I915_WRITE(CBR4_VLV, 0);
1462                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1463
1464                 /*
1465                  * DPLLB VGA mode also seems to cause problems.
1466                  * We should always have it disabled.
1467                  */
1468                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1469         } else {
1470                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1471                 POSTING_READ(DPLL_MD(pipe));
1472         }
1473 }
1474
1475 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1476 {
1477         if (IS_I830(dev_priv))
1478                 return false;
1479
1480         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1481 }
1482
1483 static void i9xx_enable_pll(struct intel_crtc *crtc,
1484                             const struct intel_crtc_state *crtc_state)
1485 {
1486         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1487         i915_reg_t reg = DPLL(crtc->pipe);
1488         u32 dpll = crtc_state->dpll_hw_state.dpll;
1489         int i;
1490
1491         assert_pipe_disabled(dev_priv, crtc->pipe);
1492
1493         /* PLL is protected by panel, make sure we can write it */
1494         if (i9xx_has_pps(dev_priv))
1495                 assert_panel_unlocked(dev_priv, crtc->pipe);
1496
1497         /*
1498          * Apparently we need to have VGA mode enabled prior to changing
1499          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1500          * dividers, even though the register value does change.
1501          */
1502         I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1503         I915_WRITE(reg, dpll);
1504
1505         /* Wait for the clocks to stabilize. */
1506         POSTING_READ(reg);
1507         udelay(150);
1508
1509         if (INTEL_GEN(dev_priv) >= 4) {
1510                 I915_WRITE(DPLL_MD(crtc->pipe),
1511                            crtc_state->dpll_hw_state.dpll_md);
1512         } else {
1513                 /* The pixel multiplier can only be updated once the
1514                  * DPLL is enabled and the clocks are stable.
1515                  *
1516                  * So write it again.
1517                  */
1518                 I915_WRITE(reg, dpll);
1519         }
1520
1521         /* We do this three times for luck */
1522         for (i = 0; i < 3; i++) {
1523                 I915_WRITE(reg, dpll);
1524                 POSTING_READ(reg);
1525                 udelay(150); /* wait for warmup */
1526         }
1527 }
1528
1529 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1530 {
1531         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1532         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1533         enum pipe pipe = crtc->pipe;
1534
1535         /* Don't disable pipe or pipe PLLs if needed */
1536         if (IS_I830(dev_priv))
1537                 return;
1538
1539         /* Make sure the pipe isn't still relying on us */
1540         assert_pipe_disabled(dev_priv, pipe);
1541
1542         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1543         POSTING_READ(DPLL(pipe));
1544 }
1545
1546 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1547 {
1548         u32 val;
1549
1550         /* Make sure the pipe isn't still relying on us */
1551         assert_pipe_disabled(dev_priv, pipe);
1552
1553         val = DPLL_INTEGRATED_REF_CLK_VLV |
1554                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1555         if (pipe != PIPE_A)
1556                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1557
1558         I915_WRITE(DPLL(pipe), val);
1559         POSTING_READ(DPLL(pipe));
1560 }
1561
1562 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1563 {
1564         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1565         u32 val;
1566
1567         /* Make sure the pipe isn't still relying on us */
1568         assert_pipe_disabled(dev_priv, pipe);
1569
1570         val = DPLL_SSC_REF_CLK_CHV |
1571                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1572         if (pipe != PIPE_A)
1573                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1574
1575         I915_WRITE(DPLL(pipe), val);
1576         POSTING_READ(DPLL(pipe));
1577
1578         vlv_dpio_get(dev_priv);
1579
1580         /* Disable 10bit clock to display controller */
1581         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1582         val &= ~DPIO_DCLKP_EN;
1583         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1584
1585         vlv_dpio_put(dev_priv);
1586 }
1587
1588 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1589                          struct intel_digital_port *dport,
1590                          unsigned int expected_mask)
1591 {
1592         u32 port_mask;
1593         i915_reg_t dpll_reg;
1594
1595         switch (dport->base.port) {
1596         case PORT_B:
1597                 port_mask = DPLL_PORTB_READY_MASK;
1598                 dpll_reg = DPLL(0);
1599                 break;
1600         case PORT_C:
1601                 port_mask = DPLL_PORTC_READY_MASK;
1602                 dpll_reg = DPLL(0);
1603                 expected_mask <<= 4;
1604                 break;
1605         case PORT_D:
1606                 port_mask = DPLL_PORTD_READY_MASK;
1607                 dpll_reg = DPIO_PHY_STATUS;
1608                 break;
1609         default:
1610                 BUG();
1611         }
1612
1613         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1614                                        port_mask, expected_mask, 1000))
1615                 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1616                      dport->base.base.base.id, dport->base.base.name,
1617                      I915_READ(dpll_reg) & port_mask, expected_mask);
1618 }
1619
1620 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1621 {
1622         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1623         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1624         enum pipe pipe = crtc->pipe;
1625         i915_reg_t reg;
1626         u32 val, pipeconf_val;
1627
1628         /* Make sure PCH DPLL is enabled */
1629         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1630
1631         /* FDI must be feeding us bits for PCH ports */
1632         assert_fdi_tx_enabled(dev_priv, pipe);
1633         assert_fdi_rx_enabled(dev_priv, pipe);
1634
1635         if (HAS_PCH_CPT(dev_priv)) {
1636                 /* Workaround: Set the timing override bit before enabling the
1637                  * pch transcoder. */
1638                 reg = TRANS_CHICKEN2(pipe);
1639                 val = I915_READ(reg);
1640                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1641                 I915_WRITE(reg, val);
1642         }
1643
1644         reg = PCH_TRANSCONF(pipe);
1645         val = I915_READ(reg);
1646         pipeconf_val = I915_READ(PIPECONF(pipe));
1647
1648         if (HAS_PCH_IBX(dev_priv)) {
1649                 /*
1650                  * Make the BPC in transcoder be consistent with
1651                  * that in pipeconf reg. For HDMI we must use 8bpc
1652                  * here for both 8bpc and 12bpc.
1653                  */
1654                 val &= ~PIPECONF_BPC_MASK;
1655                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1656                         val |= PIPECONF_8BPC;
1657                 else
1658                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1659         }
1660
1661         val &= ~TRANS_INTERLACE_MASK;
1662         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1663                 if (HAS_PCH_IBX(dev_priv) &&
1664                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1665                         val |= TRANS_LEGACY_INTERLACED_ILK;
1666                 else
1667                         val |= TRANS_INTERLACED;
1668         } else {
1669                 val |= TRANS_PROGRESSIVE;
1670         }
1671
1672         I915_WRITE(reg, val | TRANS_ENABLE);
1673         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1674                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1675 }
1676
1677 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1678                                       enum transcoder cpu_transcoder)
1679 {
1680         u32 val, pipeconf_val;
1681
1682         /* FDI must be feeding us bits for PCH ports */
1683         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1684         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1685
1686         /* Workaround: set timing override bit. */
1687         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1688         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1689         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1690
1691         val = TRANS_ENABLE;
1692         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1693
1694         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1695             PIPECONF_INTERLACED_ILK)
1696                 val |= TRANS_INTERLACED;
1697         else
1698                 val |= TRANS_PROGRESSIVE;
1699
1700         I915_WRITE(LPT_TRANSCONF, val);
1701         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1702                                   TRANS_STATE_ENABLE, 100))
1703                 DRM_ERROR("Failed to enable PCH transcoder\n");
1704 }
1705
1706 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1707                                             enum pipe pipe)
1708 {
1709         i915_reg_t reg;
1710         u32 val;
1711
1712         /* FDI relies on the transcoder */
1713         assert_fdi_tx_disabled(dev_priv, pipe);
1714         assert_fdi_rx_disabled(dev_priv, pipe);
1715
1716         /* Ports must be off as well */
1717         assert_pch_ports_disabled(dev_priv, pipe);
1718
1719         reg = PCH_TRANSCONF(pipe);
1720         val = I915_READ(reg);
1721         val &= ~TRANS_ENABLE;
1722         I915_WRITE(reg, val);
1723         /* wait for PCH transcoder off, transcoder state */
1724         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1725                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1726
1727         if (HAS_PCH_CPT(dev_priv)) {
1728                 /* Workaround: Clear the timing override chicken bit again. */
1729                 reg = TRANS_CHICKEN2(pipe);
1730                 val = I915_READ(reg);
1731                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1732                 I915_WRITE(reg, val);
1733         }
1734 }
1735
1736 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1737 {
1738         u32 val;
1739
1740         val = I915_READ(LPT_TRANSCONF);
1741         val &= ~TRANS_ENABLE;
1742         I915_WRITE(LPT_TRANSCONF, val);
1743         /* wait for PCH transcoder off, transcoder state */
1744         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1745                                     TRANS_STATE_ENABLE, 50))
1746                 DRM_ERROR("Failed to disable PCH transcoder\n");
1747
1748         /* Workaround: clear timing override bit. */
1749         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1750         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1751         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1752 }
1753
1754 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1755 {
1756         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1757
1758         if (HAS_PCH_LPT(dev_priv))
1759                 return PIPE_A;
1760         else
1761                 return crtc->pipe;
1762 }
1763
1764 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1765 {
1766         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1767
1768         /*
1769          * On i965gm the hardware frame counter reads
1770          * zero when the TV encoder is enabled :(
1771          */
1772         if (IS_I965GM(dev_priv) &&
1773             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1774                 return 0;
1775
1776         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1777                 return 0xffffffff; /* full 32 bit counter */
1778         else if (INTEL_GEN(dev_priv) >= 3)
1779                 return 0xffffff; /* only 24 bits of frame count */
1780         else
1781                 return 0; /* Gen2 doesn't have a hardware frame counter */
1782 }
1783
1784 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1785 {
1786         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1787
1788         drm_crtc_set_max_vblank_count(&crtc->base,
1789                                       intel_crtc_max_vblank_count(crtc_state));
1790         drm_crtc_vblank_on(&crtc->base);
1791 }
1792
1793 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1794 {
1795         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1796         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1797         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1798         enum pipe pipe = crtc->pipe;
1799         i915_reg_t reg;
1800         u32 val;
1801
1802         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1803
1804         assert_planes_disabled(crtc);
1805
1806         /*
1807          * A pipe without a PLL won't actually be able to drive bits from
1808          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1809          * need the check.
1810          */
1811         if (HAS_GMCH(dev_priv)) {
1812                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1813                         assert_dsi_pll_enabled(dev_priv);
1814                 else
1815                         assert_pll_enabled(dev_priv, pipe);
1816         } else {
1817                 if (new_crtc_state->has_pch_encoder) {
1818                         /* if driving the PCH, we need FDI enabled */
1819                         assert_fdi_rx_pll_enabled(dev_priv,
1820                                                   intel_crtc_pch_transcoder(crtc));
1821                         assert_fdi_tx_pll_enabled(dev_priv,
1822                                                   (enum pipe) cpu_transcoder);
1823                 }
1824                 /* FIXME: assert CPU port conditions for SNB+ */
1825         }
1826
1827         trace_intel_pipe_enable(crtc);
1828
1829         reg = PIPECONF(cpu_transcoder);
1830         val = I915_READ(reg);
1831         if (val & PIPECONF_ENABLE) {
1832                 /* we keep both pipes enabled on 830 */
1833                 WARN_ON(!IS_I830(dev_priv));
1834                 return;
1835         }
1836
1837         I915_WRITE(reg, val | PIPECONF_ENABLE);
1838         POSTING_READ(reg);
1839
1840         /*
1841          * Until the pipe starts PIPEDSL reads will return a stale value,
1842          * which causes an apparent vblank timestamp jump when PIPEDSL
1843          * resets to its proper value. That also messes up the frame count
1844          * when it's derived from the timestamps. So let's wait for the
1845          * pipe to start properly before we call drm_crtc_vblank_on()
1846          */
1847         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1848                 intel_wait_for_pipe_scanline_moving(crtc);
1849 }
1850
1851 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1852 {
1853         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1854         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1855         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1856         enum pipe pipe = crtc->pipe;
1857         i915_reg_t reg;
1858         u32 val;
1859
1860         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1861
1862         /*
1863          * Make sure planes won't keep trying to pump pixels to us,
1864          * or we might hang the display.
1865          */
1866         assert_planes_disabled(crtc);
1867
1868         trace_intel_pipe_disable(crtc);
1869
1870         reg = PIPECONF(cpu_transcoder);
1871         val = I915_READ(reg);
1872         if ((val & PIPECONF_ENABLE) == 0)
1873                 return;
1874
1875         /*
1876          * Double wide has implications for planes
1877          * so best keep it disabled when not needed.
1878          */
1879         if (old_crtc_state->double_wide)
1880                 val &= ~PIPECONF_DOUBLE_WIDE;
1881
1882         /* Don't disable pipe or pipe PLLs if needed */
1883         if (!IS_I830(dev_priv))
1884                 val &= ~PIPECONF_ENABLE;
1885
1886         I915_WRITE(reg, val);
1887         if ((val & PIPECONF_ENABLE) == 0)
1888                 intel_wait_for_pipe_off(old_crtc_state);
1889 }
1890
1891 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1892 {
1893         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1894 }
1895
1896 static unsigned int
1897 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1898 {
1899         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1900         unsigned int cpp = fb->format->cpp[color_plane];
1901
1902         switch (fb->modifier) {
1903         case DRM_FORMAT_MOD_LINEAR:
1904                 return intel_tile_size(dev_priv);
1905         case I915_FORMAT_MOD_X_TILED:
1906                 if (IS_GEN(dev_priv, 2))
1907                         return 128;
1908                 else
1909                         return 512;
1910         case I915_FORMAT_MOD_Y_TILED_CCS:
1911                 if (color_plane == 1)
1912                         return 128;
1913                 /* fall through */
1914         case I915_FORMAT_MOD_Y_TILED:
1915                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1916                         return 128;
1917                 else
1918                         return 512;
1919         case I915_FORMAT_MOD_Yf_TILED_CCS:
1920                 if (color_plane == 1)
1921                         return 128;
1922                 /* fall through */
1923         case I915_FORMAT_MOD_Yf_TILED:
1924                 switch (cpp) {
1925                 case 1:
1926                         return 64;
1927                 case 2:
1928                 case 4:
1929                         return 128;
1930                 case 8:
1931                 case 16:
1932                         return 256;
1933                 default:
1934                         MISSING_CASE(cpp);
1935                         return cpp;
1936                 }
1937                 break;
1938         default:
1939                 MISSING_CASE(fb->modifier);
1940                 return cpp;
1941         }
1942 }
1943
1944 static unsigned int
1945 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1946 {
1947         return intel_tile_size(to_i915(fb->dev)) /
1948                 intel_tile_width_bytes(fb, color_plane);
1949 }
1950
1951 /* Return the tile dimensions in pixel units */
1952 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1953                             unsigned int *tile_width,
1954                             unsigned int *tile_height)
1955 {
1956         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1957         unsigned int cpp = fb->format->cpp[color_plane];
1958
1959         *tile_width = tile_width_bytes / cpp;
1960         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1961 }
1962
1963 unsigned int
1964 intel_fb_align_height(const struct drm_framebuffer *fb,
1965                       int color_plane, unsigned int height)
1966 {
1967         unsigned int tile_height = intel_tile_height(fb, color_plane);
1968
1969         return ALIGN(height, tile_height);
1970 }
1971
1972 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1973 {
1974         unsigned int size = 0;
1975         int i;
1976
1977         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1978                 size += rot_info->plane[i].width * rot_info->plane[i].height;
1979
1980         return size;
1981 }
1982
1983 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1984 {
1985         unsigned int size = 0;
1986         int i;
1987
1988         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1989                 size += rem_info->plane[i].width * rem_info->plane[i].height;
1990
1991         return size;
1992 }
1993
1994 static void
1995 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1996                         const struct drm_framebuffer *fb,
1997                         unsigned int rotation)
1998 {
1999         view->type = I915_GGTT_VIEW_NORMAL;
2000         if (drm_rotation_90_or_270(rotation)) {
2001                 view->type = I915_GGTT_VIEW_ROTATED;
2002                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2003         }
2004 }
2005
2006 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2007 {
2008         if (IS_I830(dev_priv))
2009                 return 16 * 1024;
2010         else if (IS_I85X(dev_priv))
2011                 return 256;
2012         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2013                 return 32;
2014         else
2015                 return 4 * 1024;
2016 }
2017
2018 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2019 {
2020         if (INTEL_GEN(dev_priv) >= 9)
2021                 return 256 * 1024;
2022         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2023                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2024                 return 128 * 1024;
2025         else if (INTEL_GEN(dev_priv) >= 4)
2026                 return 4 * 1024;
2027         else
2028                 return 0;
2029 }
2030
2031 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2032                                          int color_plane)
2033 {
2034         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2035
2036         /* AUX_DIST needs only 4K alignment */
2037         if (color_plane == 1)
2038                 return 4096;
2039
2040         switch (fb->modifier) {
2041         case DRM_FORMAT_MOD_LINEAR:
2042                 return intel_linear_alignment(dev_priv);
2043         case I915_FORMAT_MOD_X_TILED:
2044                 if (INTEL_GEN(dev_priv) >= 9)
2045                         return 256 * 1024;
2046                 return 0;
2047         case I915_FORMAT_MOD_Y_TILED_CCS:
2048         case I915_FORMAT_MOD_Yf_TILED_CCS:
2049         case I915_FORMAT_MOD_Y_TILED:
2050         case I915_FORMAT_MOD_Yf_TILED:
2051                 return 1 * 1024 * 1024;
2052         default:
2053                 MISSING_CASE(fb->modifier);
2054                 return 0;
2055         }
2056 }
2057
2058 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2059 {
2060         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2061         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2062
2063         return INTEL_GEN(dev_priv) < 4 ||
2064                 (plane->has_fbc &&
2065                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2066 }
2067
2068 struct i915_vma *
2069 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2070                            const struct i915_ggtt_view *view,
2071                            bool uses_fence,
2072                            unsigned long *out_flags)
2073 {
2074         struct drm_device *dev = fb->dev;
2075         struct drm_i915_private *dev_priv = to_i915(dev);
2076         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2077         intel_wakeref_t wakeref;
2078         struct i915_vma *vma;
2079         unsigned int pinctl;
2080         u32 alignment;
2081
2082         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2083         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2084                 return ERR_PTR(-EINVAL);
2085
2086         alignment = intel_surf_alignment(fb, 0);
2087
2088         /* Note that the w/a also requires 64 PTE of padding following the
2089          * bo. We currently fill all unused PTE with the shadow page and so
2090          * we should always have valid PTE following the scanout preventing
2091          * the VT-d warning.
2092          */
2093         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2094                 alignment = 256 * 1024;
2095
2096         /*
2097          * Global gtt pte registers are special registers which actually forward
2098          * writes to a chunk of system memory. Which means that there is no risk
2099          * that the register values disappear as soon as we call
2100          * intel_runtime_pm_put(), so it is correct to wrap only the
2101          * pin/unpin/fence and not more.
2102          */
2103         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2104         i915_gem_object_lock(obj);
2105
2106         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2107
2108         pinctl = 0;
2109
2110         /* Valleyview is definitely limited to scanning out the first
2111          * 512MiB. Lets presume this behaviour was inherited from the
2112          * g4x display engine and that all earlier gen are similarly
2113          * limited. Testing suggests that it is a little more
2114          * complicated than this. For example, Cherryview appears quite
2115          * happy to scanout from anywhere within its global aperture.
2116          */
2117         if (HAS_GMCH(dev_priv))
2118                 pinctl |= PIN_MAPPABLE;
2119
2120         vma = i915_gem_object_pin_to_display_plane(obj,
2121                                                    alignment, view, pinctl);
2122         if (IS_ERR(vma))
2123                 goto err;
2124
2125         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2126                 int ret;
2127
2128                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2129                  * fence, whereas 965+ only requires a fence if using
2130                  * framebuffer compression.  For simplicity, we always, when
2131                  * possible, install a fence as the cost is not that onerous.
2132                  *
2133                  * If we fail to fence the tiled scanout, then either the
2134                  * modeset will reject the change (which is highly unlikely as
2135                  * the affected systems, all but one, do not have unmappable
2136                  * space) or we will not be able to enable full powersaving
2137                  * techniques (also likely not to apply due to various limits
2138                  * FBC and the like impose on the size of the buffer, which
2139                  * presumably we violated anyway with this unmappable buffer).
2140                  * Anyway, it is presumably better to stumble onwards with
2141                  * something and try to run the system in a "less than optimal"
2142                  * mode that matches the user configuration.
2143                  */
2144                 ret = i915_vma_pin_fence(vma);
2145                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2146                         i915_gem_object_unpin_from_display_plane(vma);
2147                         vma = ERR_PTR(ret);
2148                         goto err;
2149                 }
2150
2151                 if (ret == 0 && vma->fence)
2152                         *out_flags |= PLANE_HAS_FENCE;
2153         }
2154
2155         i915_vma_get(vma);
2156 err:
2157         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2158
2159         i915_gem_object_unlock(obj);
2160         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2161         return vma;
2162 }
2163
2164 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2165 {
2166         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2167
2168         i915_gem_object_lock(vma->obj);
2169         if (flags & PLANE_HAS_FENCE)
2170                 i915_vma_unpin_fence(vma);
2171         i915_gem_object_unpin_from_display_plane(vma);
2172         i915_gem_object_unlock(vma->obj);
2173
2174         i915_vma_put(vma);
2175 }
2176
2177 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2178                           unsigned int rotation)
2179 {
2180         if (drm_rotation_90_or_270(rotation))
2181                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2182         else
2183                 return fb->pitches[color_plane];
2184 }
2185
2186 /*
2187  * Convert the x/y offsets into a linear offset.
2188  * Only valid with 0/180 degree rotation, which is fine since linear
2189  * offset is only used with linear buffers on pre-hsw and tiled buffers
2190  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2191  */
2192 u32 intel_fb_xy_to_linear(int x, int y,
2193                           const struct intel_plane_state *state,
2194                           int color_plane)
2195 {
2196         const struct drm_framebuffer *fb = state->base.fb;
2197         unsigned int cpp = fb->format->cpp[color_plane];
2198         unsigned int pitch = state->color_plane[color_plane].stride;
2199
2200         return y * pitch + x * cpp;
2201 }
2202
2203 /*
2204  * Add the x/y offsets derived from fb->offsets[] to the user
2205  * specified plane src x/y offsets. The resulting x/y offsets
2206  * specify the start of scanout from the beginning of the gtt mapping.
2207  */
2208 void intel_add_fb_offsets(int *x, int *y,
2209                           const struct intel_plane_state *state,
2210                           int color_plane)
2211
2212 {
2213         *x += state->color_plane[color_plane].x;
2214         *y += state->color_plane[color_plane].y;
2215 }
2216
2217 static u32 intel_adjust_tile_offset(int *x, int *y,
2218                                     unsigned int tile_width,
2219                                     unsigned int tile_height,
2220                                     unsigned int tile_size,
2221                                     unsigned int pitch_tiles,
2222                                     u32 old_offset,
2223                                     u32 new_offset)
2224 {
2225         unsigned int pitch_pixels = pitch_tiles * tile_width;
2226         unsigned int tiles;
2227
2228         WARN_ON(old_offset & (tile_size - 1));
2229         WARN_ON(new_offset & (tile_size - 1));
2230         WARN_ON(new_offset > old_offset);
2231
2232         tiles = (old_offset - new_offset) / tile_size;
2233
2234         *y += tiles / pitch_tiles * tile_height;
2235         *x += tiles % pitch_tiles * tile_width;
2236
2237         /* minimize x in case it got needlessly big */
2238         *y += *x / pitch_pixels * tile_height;
2239         *x %= pitch_pixels;
2240
2241         return new_offset;
2242 }
2243
2244 static bool is_surface_linear(u64 modifier, int color_plane)
2245 {
2246         return modifier == DRM_FORMAT_MOD_LINEAR;
2247 }
2248
2249 static u32 intel_adjust_aligned_offset(int *x, int *y,
2250                                        const struct drm_framebuffer *fb,
2251                                        int color_plane,
2252                                        unsigned int rotation,
2253                                        unsigned int pitch,
2254                                        u32 old_offset, u32 new_offset)
2255 {
2256         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2257         unsigned int cpp = fb->format->cpp[color_plane];
2258
2259         WARN_ON(new_offset > old_offset);
2260
2261         if (!is_surface_linear(fb->modifier, color_plane)) {
2262                 unsigned int tile_size, tile_width, tile_height;
2263                 unsigned int pitch_tiles;
2264
2265                 tile_size = intel_tile_size(dev_priv);
2266                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2267
2268                 if (drm_rotation_90_or_270(rotation)) {
2269                         pitch_tiles = pitch / tile_height;
2270                         swap(tile_width, tile_height);
2271                 } else {
2272                         pitch_tiles = pitch / (tile_width * cpp);
2273                 }
2274
2275                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2276                                          tile_size, pitch_tiles,
2277                                          old_offset, new_offset);
2278         } else {
2279                 old_offset += *y * pitch + *x * cpp;
2280
2281                 *y = (old_offset - new_offset) / pitch;
2282                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2283         }
2284
2285         return new_offset;
2286 }
2287
2288 /*
2289  * Adjust the tile offset by moving the difference into
2290  * the x/y offsets.
2291  */
2292 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2293                                              const struct intel_plane_state *state,
2294                                              int color_plane,
2295                                              u32 old_offset, u32 new_offset)
2296 {
2297         return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2298                                            state->base.rotation,
2299                                            state->color_plane[color_plane].stride,
2300                                            old_offset, new_offset);
2301 }
2302
2303 /*
2304  * Computes the aligned offset to the base tile and adjusts
2305  * x, y. bytes per pixel is assumed to be a power-of-two.
2306  *
2307  * In the 90/270 rotated case, x and y are assumed
2308  * to be already rotated to match the rotated GTT view, and
2309  * pitch is the tile_height aligned framebuffer height.
2310  *
2311  * This function is used when computing the derived information
2312  * under intel_framebuffer, so using any of that information
2313  * here is not allowed. Anything under drm_framebuffer can be
2314  * used. This is why the user has to pass in the pitch since it
2315  * is specified in the rotated orientation.
2316  */
2317 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2318                                         int *x, int *y,
2319                                         const struct drm_framebuffer *fb,
2320                                         int color_plane,
2321                                         unsigned int pitch,
2322                                         unsigned int rotation,
2323                                         u32 alignment)
2324 {
2325         unsigned int cpp = fb->format->cpp[color_plane];
2326         u32 offset, offset_aligned;
2327
2328         if (alignment)
2329                 alignment--;
2330
2331         if (!is_surface_linear(fb->modifier, color_plane)) {
2332                 unsigned int tile_size, tile_width, tile_height;
2333                 unsigned int tile_rows, tiles, pitch_tiles;
2334
2335                 tile_size = intel_tile_size(dev_priv);
2336                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2337
2338                 if (drm_rotation_90_or_270(rotation)) {
2339                         pitch_tiles = pitch / tile_height;
2340                         swap(tile_width, tile_height);
2341                 } else {
2342                         pitch_tiles = pitch / (tile_width * cpp);
2343                 }
2344
2345                 tile_rows = *y / tile_height;
2346                 *y %= tile_height;
2347
2348                 tiles = *x / tile_width;
2349                 *x %= tile_width;
2350
2351                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2352                 offset_aligned = offset & ~alignment;
2353
2354                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2355                                          tile_size, pitch_tiles,
2356                                          offset, offset_aligned);
2357         } else {
2358                 offset = *y * pitch + *x * cpp;
2359                 offset_aligned = offset & ~alignment;
2360
2361                 *y = (offset & alignment) / pitch;
2362                 *x = ((offset & alignment) - *y * pitch) / cpp;
2363         }
2364
2365         return offset_aligned;
2366 }
2367
2368 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2369                                               const struct intel_plane_state *state,
2370                                               int color_plane)
2371 {
2372         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2373         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2374         const struct drm_framebuffer *fb = state->base.fb;
2375         unsigned int rotation = state->base.rotation;
2376         int pitch = state->color_plane[color_plane].stride;
2377         u32 alignment;
2378
2379         if (intel_plane->id == PLANE_CURSOR)
2380                 alignment = intel_cursor_alignment(dev_priv);
2381         else
2382                 alignment = intel_surf_alignment(fb, color_plane);
2383
2384         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2385                                             pitch, rotation, alignment);
2386 }
2387
2388 /* Convert the fb->offset[] into x/y offsets */
2389 static int intel_fb_offset_to_xy(int *x, int *y,
2390                                  const struct drm_framebuffer *fb,
2391                                  int color_plane)
2392 {
2393         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2394         unsigned int height;
2395
2396         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2397             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2398                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2399                               fb->offsets[color_plane], color_plane);
2400                 return -EINVAL;
2401         }
2402
2403         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2404         height = ALIGN(height, intel_tile_height(fb, color_plane));
2405
2406         /* Catch potential overflows early */
2407         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2408                             fb->offsets[color_plane])) {
2409                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2410                               fb->offsets[color_plane], fb->pitches[color_plane],
2411                               color_plane);
2412                 return -ERANGE;
2413         }
2414
2415         *x = 0;
2416         *y = 0;
2417
2418         intel_adjust_aligned_offset(x, y,
2419                                     fb, color_plane, DRM_MODE_ROTATE_0,
2420                                     fb->pitches[color_plane],
2421                                     fb->offsets[color_plane], 0);
2422
2423         return 0;
2424 }
2425
2426 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2427 {
2428         switch (fb_modifier) {
2429         case I915_FORMAT_MOD_X_TILED:
2430                 return I915_TILING_X;
2431         case I915_FORMAT_MOD_Y_TILED:
2432         case I915_FORMAT_MOD_Y_TILED_CCS:
2433                 return I915_TILING_Y;
2434         default:
2435                 return I915_TILING_NONE;
2436         }
2437 }
2438
2439 /*
2440  * From the Sky Lake PRM:
2441  * "The Color Control Surface (CCS) contains the compression status of
2442  *  the cache-line pairs. The compression state of the cache-line pair
2443  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2444  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2445  *  cache-line-pairs. CCS is always Y tiled."
2446  *
2447  * Since cache line pairs refers to horizontally adjacent cache lines,
2448  * each cache line in the CCS corresponds to an area of 32x16 cache
2449  * lines on the main surface. Since each pixel is 4 bytes, this gives
2450  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2451  * main surface.
2452  */
2453 static const struct drm_format_info ccs_formats[] = {
2454         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2455           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2456         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2457           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2458         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2459           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2460         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2461           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2462 };
2463
2464 static const struct drm_format_info *
2465 lookup_format_info(const struct drm_format_info formats[],
2466                    int num_formats, u32 format)
2467 {
2468         int i;
2469
2470         for (i = 0; i < num_formats; i++) {
2471                 if (formats[i].format == format)
2472                         return &formats[i];
2473         }
2474
2475         return NULL;
2476 }
2477
2478 static const struct drm_format_info *
2479 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2480 {
2481         switch (cmd->modifier[0]) {
2482         case I915_FORMAT_MOD_Y_TILED_CCS:
2483         case I915_FORMAT_MOD_Yf_TILED_CCS:
2484                 return lookup_format_info(ccs_formats,
2485                                           ARRAY_SIZE(ccs_formats),
2486                                           cmd->pixel_format);
2487         default:
2488                 return NULL;
2489         }
2490 }
2491
2492 bool is_ccs_modifier(u64 modifier)
2493 {
2494         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2495                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2496 }
2497
2498 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2499                               u32 pixel_format, u64 modifier)
2500 {
2501         struct intel_crtc *crtc;
2502         struct intel_plane *plane;
2503
2504         /*
2505          * We assume the primary plane for pipe A has
2506          * the highest stride limits of them all.
2507          */
2508         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2509         plane = to_intel_plane(crtc->base.primary);
2510
2511         return plane->max_stride(plane, pixel_format, modifier,
2512                                  DRM_MODE_ROTATE_0);
2513 }
2514
2515 static
2516 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2517                         u32 pixel_format, u64 modifier)
2518 {
2519         /*
2520          * Arbitrary limit for gen4+ chosen to match the
2521          * render engine max stride.
2522          *
2523          * The new CCS hash mode makes remapping impossible
2524          */
2525         if (!is_ccs_modifier(modifier)) {
2526                 if (INTEL_GEN(dev_priv) >= 7)
2527                         return 256*1024;
2528                 else if (INTEL_GEN(dev_priv) >= 4)
2529                         return 128*1024;
2530         }
2531
2532         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2533 }
2534
2535 static u32
2536 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2537 {
2538         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2539
2540         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2541                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2542                                                            fb->format->format,
2543                                                            fb->modifier);
2544
2545                 /*
2546                  * To make remapping with linear generally feasible
2547                  * we need the stride to be page aligned.
2548                  */
2549                 if (fb->pitches[color_plane] > max_stride)
2550                         return intel_tile_size(dev_priv);
2551                 else
2552                         return 64;
2553         } else {
2554                 return intel_tile_width_bytes(fb, color_plane);
2555         }
2556 }
2557
2558 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2559 {
2560         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2561         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2562         const struct drm_framebuffer *fb = plane_state->base.fb;
2563         int i;
2564
2565         /* We don't want to deal with remapping with cursors */
2566         if (plane->id == PLANE_CURSOR)
2567                 return false;
2568
2569         /*
2570          * The display engine limits already match/exceed the
2571          * render engine limits, so not much point in remapping.
2572          * Would also need to deal with the fence POT alignment
2573          * and gen2 2KiB GTT tile size.
2574          */
2575         if (INTEL_GEN(dev_priv) < 4)
2576                 return false;
2577
2578         /*
2579          * The new CCS hash mode isn't compatible with remapping as
2580          * the virtual address of the pages affects the compressed data.
2581          */
2582         if (is_ccs_modifier(fb->modifier))
2583                 return false;
2584
2585         /* Linear needs a page aligned stride for remapping */
2586         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2587                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2588
2589                 for (i = 0; i < fb->format->num_planes; i++) {
2590                         if (fb->pitches[i] & alignment)
2591                                 return false;
2592                 }
2593         }
2594
2595         return true;
2596 }
2597
2598 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2599 {
2600         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2601         const struct drm_framebuffer *fb = plane_state->base.fb;
2602         unsigned int rotation = plane_state->base.rotation;
2603         u32 stride, max_stride;
2604
2605         /*
2606          * No remapping for invisible planes since we don't have
2607          * an actual source viewport to remap.
2608          */
2609         if (!plane_state->base.visible)
2610                 return false;
2611
2612         if (!intel_plane_can_remap(plane_state))
2613                 return false;
2614
2615         /*
2616          * FIXME: aux plane limits on gen9+ are
2617          * unclear in Bspec, for now no checking.
2618          */
2619         stride = intel_fb_pitch(fb, 0, rotation);
2620         max_stride = plane->max_stride(plane, fb->format->format,
2621                                        fb->modifier, rotation);
2622
2623         return stride > max_stride;
2624 }
2625
2626 static int
2627 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2628                    struct drm_framebuffer *fb)
2629 {
2630         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2631         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2632         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2633         u32 gtt_offset_rotated = 0;
2634         unsigned int max_size = 0;
2635         int i, num_planes = fb->format->num_planes;
2636         unsigned int tile_size = intel_tile_size(dev_priv);
2637
2638         for (i = 0; i < num_planes; i++) {
2639                 unsigned int width, height;
2640                 unsigned int cpp, size;
2641                 u32 offset;
2642                 int x, y;
2643                 int ret;
2644
2645                 cpp = fb->format->cpp[i];
2646                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2647                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2648
2649                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2650                 if (ret) {
2651                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2652                                       i, fb->offsets[i]);
2653                         return ret;
2654                 }
2655
2656                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2657                         int hsub = fb->format->hsub;
2658                         int vsub = fb->format->vsub;
2659                         int tile_width, tile_height;
2660                         int main_x, main_y;
2661                         int ccs_x, ccs_y;
2662
2663                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2664                         tile_width *= hsub;
2665                         tile_height *= vsub;
2666
2667                         ccs_x = (x * hsub) % tile_width;
2668                         ccs_y = (y * vsub) % tile_height;
2669                         main_x = intel_fb->normal[0].x % tile_width;
2670                         main_y = intel_fb->normal[0].y % tile_height;
2671
2672                         /*
2673                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2674                          * x/y offsets must match between CCS and the main surface.
2675                          */
2676                         if (main_x != ccs_x || main_y != ccs_y) {
2677                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2678                                               main_x, main_y,
2679                                               ccs_x, ccs_y,
2680                                               intel_fb->normal[0].x,
2681                                               intel_fb->normal[0].y,
2682                                               x, y);
2683                                 return -EINVAL;
2684                         }
2685                 }
2686
2687                 /*
2688                  * The fence (if used) is aligned to the start of the object
2689                  * so having the framebuffer wrap around across the edge of the
2690                  * fenced region doesn't really work. We have no API to configure
2691                  * the fence start offset within the object (nor could we probably
2692                  * on gen2/3). So it's just easier if we just require that the
2693                  * fb layout agrees with the fence layout. We already check that the
2694                  * fb stride matches the fence stride elsewhere.
2695                  */
2696                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2697                     (x + width) * cpp > fb->pitches[i]) {
2698                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2699                                       i, fb->offsets[i]);
2700                         return -EINVAL;
2701                 }
2702
2703                 /*
2704                  * First pixel of the framebuffer from
2705                  * the start of the normal gtt mapping.
2706                  */
2707                 intel_fb->normal[i].x = x;
2708                 intel_fb->normal[i].y = y;
2709
2710                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2711                                                       fb->pitches[i],
2712                                                       DRM_MODE_ROTATE_0,
2713                                                       tile_size);
2714                 offset /= tile_size;
2715
2716                 if (!is_surface_linear(fb->modifier, i)) {
2717                         unsigned int tile_width, tile_height;
2718                         unsigned int pitch_tiles;
2719                         struct drm_rect r;
2720
2721                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2722
2723                         rot_info->plane[i].offset = offset;
2724                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2725                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2726                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2727
2728                         intel_fb->rotated[i].pitch =
2729                                 rot_info->plane[i].height * tile_height;
2730
2731                         /* how many tiles does this plane need */
2732                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2733                         /*
2734                          * If the plane isn't horizontally tile aligned,
2735                          * we need one more tile.
2736                          */
2737                         if (x != 0)
2738                                 size++;
2739
2740                         /* rotate the x/y offsets to match the GTT view */
2741                         r.x1 = x;
2742                         r.y1 = y;
2743                         r.x2 = x + width;
2744                         r.y2 = y + height;
2745                         drm_rect_rotate(&r,
2746                                         rot_info->plane[i].width * tile_width,
2747                                         rot_info->plane[i].height * tile_height,
2748                                         DRM_MODE_ROTATE_270);
2749                         x = r.x1;
2750                         y = r.y1;
2751
2752                         /* rotate the tile dimensions to match the GTT view */
2753                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2754                         swap(tile_width, tile_height);
2755
2756                         /*
2757                          * We only keep the x/y offsets, so push all of the
2758                          * gtt offset into the x/y offsets.
2759                          */
2760                         intel_adjust_tile_offset(&x, &y,
2761                                                  tile_width, tile_height,
2762                                                  tile_size, pitch_tiles,
2763                                                  gtt_offset_rotated * tile_size, 0);
2764
2765                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2766
2767                         /*
2768                          * First pixel of the framebuffer from
2769                          * the start of the rotated gtt mapping.
2770                          */
2771                         intel_fb->rotated[i].x = x;
2772                         intel_fb->rotated[i].y = y;
2773                 } else {
2774                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2775                                             x * cpp, tile_size);
2776                 }
2777
2778                 /* how many tiles in total needed in the bo */
2779                 max_size = max(max_size, offset + size);
2780         }
2781
2782         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2783                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2784                               mul_u32_u32(max_size, tile_size), obj->base.size);
2785                 return -EINVAL;
2786         }
2787
2788         return 0;
2789 }
2790
2791 static void
2792 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2793 {
2794         struct drm_i915_private *dev_priv =
2795                 to_i915(plane_state->base.plane->dev);
2796         struct drm_framebuffer *fb = plane_state->base.fb;
2797         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2798         struct intel_rotation_info *info = &plane_state->view.rotated;
2799         unsigned int rotation = plane_state->base.rotation;
2800         int i, num_planes = fb->format->num_planes;
2801         unsigned int tile_size = intel_tile_size(dev_priv);
2802         unsigned int src_x, src_y;
2803         unsigned int src_w, src_h;
2804         u32 gtt_offset = 0;
2805
2806         memset(&plane_state->view, 0, sizeof(plane_state->view));
2807         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2808                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2809
2810         src_x = plane_state->base.src.x1 >> 16;
2811         src_y = plane_state->base.src.y1 >> 16;
2812         src_w = drm_rect_width(&plane_state->base.src) >> 16;
2813         src_h = drm_rect_height(&plane_state->base.src) >> 16;
2814
2815         WARN_ON(is_ccs_modifier(fb->modifier));
2816
2817         /* Make src coordinates relative to the viewport */
2818         drm_rect_translate(&plane_state->base.src,
2819                            -(src_x << 16), -(src_y << 16));
2820
2821         /* Rotate src coordinates to match rotated GTT view */
2822         if (drm_rotation_90_or_270(rotation))
2823                 drm_rect_rotate(&plane_state->base.src,
2824                                 src_w << 16, src_h << 16,
2825                                 DRM_MODE_ROTATE_270);
2826
2827         for (i = 0; i < num_planes; i++) {
2828                 unsigned int hsub = i ? fb->format->hsub : 1;
2829                 unsigned int vsub = i ? fb->format->vsub : 1;
2830                 unsigned int cpp = fb->format->cpp[i];
2831                 unsigned int tile_width, tile_height;
2832                 unsigned int width, height;
2833                 unsigned int pitch_tiles;
2834                 unsigned int x, y;
2835                 u32 offset;
2836
2837                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2838
2839                 x = src_x / hsub;
2840                 y = src_y / vsub;
2841                 width = src_w / hsub;
2842                 height = src_h / vsub;
2843
2844                 /*
2845                  * First pixel of the src viewport from the
2846                  * start of the normal gtt mapping.
2847                  */
2848                 x += intel_fb->normal[i].x;
2849                 y += intel_fb->normal[i].y;
2850
2851                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2852                                                       fb, i, fb->pitches[i],
2853                                                       DRM_MODE_ROTATE_0, tile_size);
2854                 offset /= tile_size;
2855
2856                 info->plane[i].offset = offset;
2857                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2858                                                      tile_width * cpp);
2859                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2860                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2861
2862                 if (drm_rotation_90_or_270(rotation)) {
2863                         struct drm_rect r;
2864
2865                         /* rotate the x/y offsets to match the GTT view */
2866                         r.x1 = x;
2867                         r.y1 = y;
2868                         r.x2 = x + width;
2869                         r.y2 = y + height;
2870                         drm_rect_rotate(&r,
2871                                         info->plane[i].width * tile_width,
2872                                         info->plane[i].height * tile_height,
2873                                         DRM_MODE_ROTATE_270);
2874                         x = r.x1;
2875                         y = r.y1;
2876
2877                         pitch_tiles = info->plane[i].height;
2878                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2879
2880                         /* rotate the tile dimensions to match the GTT view */
2881                         swap(tile_width, tile_height);
2882                 } else {
2883                         pitch_tiles = info->plane[i].width;
2884                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2885                 }
2886
2887                 /*
2888                  * We only keep the x/y offsets, so push all of the
2889                  * gtt offset into the x/y offsets.
2890                  */
2891                 intel_adjust_tile_offset(&x, &y,
2892                                          tile_width, tile_height,
2893                                          tile_size, pitch_tiles,
2894                                          gtt_offset * tile_size, 0);
2895
2896                 gtt_offset += info->plane[i].width * info->plane[i].height;
2897
2898                 plane_state->color_plane[i].offset = 0;
2899                 plane_state->color_plane[i].x = x;
2900                 plane_state->color_plane[i].y = y;
2901         }
2902 }
2903
2904 static int
2905 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2906 {
2907         const struct intel_framebuffer *fb =
2908                 to_intel_framebuffer(plane_state->base.fb);
2909         unsigned int rotation = plane_state->base.rotation;
2910         int i, num_planes;
2911
2912         if (!fb)
2913                 return 0;
2914
2915         num_planes = fb->base.format->num_planes;
2916
2917         if (intel_plane_needs_remap(plane_state)) {
2918                 intel_plane_remap_gtt(plane_state);
2919
2920                 /*
2921                  * Sometimes even remapping can't overcome
2922                  * the stride limitations :( Can happen with
2923                  * big plane sizes and suitably misaligned
2924                  * offsets.
2925                  */
2926                 return intel_plane_check_stride(plane_state);
2927         }
2928
2929         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2930
2931         for (i = 0; i < num_planes; i++) {
2932                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2933                 plane_state->color_plane[i].offset = 0;
2934
2935                 if (drm_rotation_90_or_270(rotation)) {
2936                         plane_state->color_plane[i].x = fb->rotated[i].x;
2937                         plane_state->color_plane[i].y = fb->rotated[i].y;
2938                 } else {
2939                         plane_state->color_plane[i].x = fb->normal[i].x;
2940                         plane_state->color_plane[i].y = fb->normal[i].y;
2941                 }
2942         }
2943
2944         /* Rotate src coordinates to match rotated GTT view */
2945         if (drm_rotation_90_or_270(rotation))
2946                 drm_rect_rotate(&plane_state->base.src,
2947                                 fb->base.width << 16, fb->base.height << 16,
2948                                 DRM_MODE_ROTATE_270);
2949
2950         return intel_plane_check_stride(plane_state);
2951 }
2952
2953 static int i9xx_format_to_fourcc(int format)
2954 {
2955         switch (format) {
2956         case DISPPLANE_8BPP:
2957                 return DRM_FORMAT_C8;
2958         case DISPPLANE_BGRX555:
2959                 return DRM_FORMAT_XRGB1555;
2960         case DISPPLANE_BGRX565:
2961                 return DRM_FORMAT_RGB565;
2962         default:
2963         case DISPPLANE_BGRX888:
2964                 return DRM_FORMAT_XRGB8888;
2965         case DISPPLANE_RGBX888:
2966                 return DRM_FORMAT_XBGR8888;
2967         case DISPPLANE_BGRX101010:
2968                 return DRM_FORMAT_XRGB2101010;
2969         case DISPPLANE_RGBX101010:
2970                 return DRM_FORMAT_XBGR2101010;
2971         }
2972 }
2973
2974 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2975 {
2976         switch (format) {
2977         case PLANE_CTL_FORMAT_RGB_565:
2978                 return DRM_FORMAT_RGB565;
2979         case PLANE_CTL_FORMAT_NV12:
2980                 return DRM_FORMAT_NV12;
2981         case PLANE_CTL_FORMAT_P010:
2982                 return DRM_FORMAT_P010;
2983         case PLANE_CTL_FORMAT_P012:
2984                 return DRM_FORMAT_P012;
2985         case PLANE_CTL_FORMAT_P016:
2986                 return DRM_FORMAT_P016;
2987         case PLANE_CTL_FORMAT_Y210:
2988                 return DRM_FORMAT_Y210;
2989         case PLANE_CTL_FORMAT_Y212:
2990                 return DRM_FORMAT_Y212;
2991         case PLANE_CTL_FORMAT_Y216:
2992                 return DRM_FORMAT_Y216;
2993         case PLANE_CTL_FORMAT_Y410:
2994                 return DRM_FORMAT_XVYU2101010;
2995         case PLANE_CTL_FORMAT_Y412:
2996                 return DRM_FORMAT_XVYU12_16161616;
2997         case PLANE_CTL_FORMAT_Y416:
2998                 return DRM_FORMAT_XVYU16161616;
2999         default:
3000         case PLANE_CTL_FORMAT_XRGB_8888:
3001                 if (rgb_order) {
3002                         if (alpha)
3003                                 return DRM_FORMAT_ABGR8888;
3004                         else
3005                                 return DRM_FORMAT_XBGR8888;
3006                 } else {
3007                         if (alpha)
3008                                 return DRM_FORMAT_ARGB8888;
3009                         else
3010                                 return DRM_FORMAT_XRGB8888;
3011                 }
3012         case PLANE_CTL_FORMAT_XRGB_2101010:
3013                 if (rgb_order)
3014                         return DRM_FORMAT_XBGR2101010;
3015                 else
3016                         return DRM_FORMAT_XRGB2101010;
3017         case PLANE_CTL_FORMAT_XRGB_16161616F:
3018                 if (rgb_order) {
3019                         if (alpha)
3020                                 return DRM_FORMAT_ABGR16161616F;
3021                         else
3022                                 return DRM_FORMAT_XBGR16161616F;
3023                 } else {
3024                         if (alpha)
3025                                 return DRM_FORMAT_ARGB16161616F;
3026                         else
3027                                 return DRM_FORMAT_XRGB16161616F;
3028                 }
3029         }
3030 }
3031
3032 static bool
3033 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3034                               struct intel_initial_plane_config *plane_config)
3035 {
3036         struct drm_device *dev = crtc->base.dev;
3037         struct drm_i915_private *dev_priv = to_i915(dev);
3038         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3039         struct drm_framebuffer *fb = &plane_config->fb->base;
3040         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3041         u32 size_aligned = round_up(plane_config->base + plane_config->size,
3042                                     PAGE_SIZE);
3043         struct drm_i915_gem_object *obj;
3044         bool ret = false;
3045
3046         size_aligned -= base_aligned;
3047
3048         if (plane_config->size == 0)
3049                 return false;
3050
3051         /* If the FB is too big, just don't use it since fbdev is not very
3052          * important and we should probably use that space with FBC or other
3053          * features. */
3054         if (size_aligned * 2 > dev_priv->stolen_usable_size)
3055                 return false;
3056
3057         switch (fb->modifier) {
3058         case DRM_FORMAT_MOD_LINEAR:
3059         case I915_FORMAT_MOD_X_TILED:
3060         case I915_FORMAT_MOD_Y_TILED:
3061                 break;
3062         default:
3063                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3064                                  fb->modifier);
3065                 return false;
3066         }
3067
3068         mutex_lock(&dev->struct_mutex);
3069         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3070                                                              base_aligned,
3071                                                              base_aligned,
3072                                                              size_aligned);
3073         mutex_unlock(&dev->struct_mutex);
3074         if (!obj)
3075                 return false;
3076
3077         switch (plane_config->tiling) {
3078         case I915_TILING_NONE:
3079                 break;
3080         case I915_TILING_X:
3081         case I915_TILING_Y:
3082                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3083                 break;
3084         default:
3085                 MISSING_CASE(plane_config->tiling);
3086                 goto out;
3087         }
3088
3089         mode_cmd.pixel_format = fb->format->format;
3090         mode_cmd.width = fb->width;
3091         mode_cmd.height = fb->height;
3092         mode_cmd.pitches[0] = fb->pitches[0];
3093         mode_cmd.modifier[0] = fb->modifier;
3094         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3095
3096         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3097                 DRM_DEBUG_KMS("intel fb init failed\n");
3098                 goto out;
3099         }
3100
3101
3102         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3103         ret = true;
3104 out:
3105         i915_gem_object_put(obj);
3106         return ret;
3107 }
3108
3109 static void
3110 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3111                         struct intel_plane_state *plane_state,
3112                         bool visible)
3113 {
3114         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3115
3116         plane_state->base.visible = visible;
3117
3118         if (visible)
3119                 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
3120         else
3121                 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
3122 }
3123
3124 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3125 {
3126         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3127         struct drm_plane *plane;
3128
3129         /*
3130          * Active_planes aliases if multiple "primary" or cursor planes
3131          * have been used on the same (or wrong) pipe. plane_mask uses
3132          * unique ids, hence we can use that to reconstruct active_planes.
3133          */
3134         crtc_state->active_planes = 0;
3135
3136         drm_for_each_plane_mask(plane, &dev_priv->drm,
3137                                 crtc_state->base.plane_mask)
3138                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3139 }
3140
3141 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3142                                          struct intel_plane *plane)
3143 {
3144         struct intel_crtc_state *crtc_state =
3145                 to_intel_crtc_state(crtc->base.state);
3146         struct intel_plane_state *plane_state =
3147                 to_intel_plane_state(plane->base.state);
3148
3149         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3150                       plane->base.base.id, plane->base.name,
3151                       crtc->base.base.id, crtc->base.name);
3152
3153         intel_set_plane_visible(crtc_state, plane_state, false);
3154         fixup_active_planes(crtc_state);
3155         crtc_state->data_rate[plane->id] = 0;
3156
3157         if (plane->id == PLANE_PRIMARY)
3158                 intel_pre_disable_primary_noatomic(&crtc->base);
3159
3160         intel_disable_plane(plane, crtc_state);
3161 }
3162
3163 static struct intel_frontbuffer *
3164 to_intel_frontbuffer(struct drm_framebuffer *fb)
3165 {
3166         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3167 }
3168
3169 static void
3170 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3171                              struct intel_initial_plane_config *plane_config)
3172 {
3173         struct drm_device *dev = intel_crtc->base.dev;
3174         struct drm_i915_private *dev_priv = to_i915(dev);
3175         struct drm_crtc *c;
3176         struct drm_plane *primary = intel_crtc->base.primary;
3177         struct drm_plane_state *plane_state = primary->state;
3178         struct intel_plane *intel_plane = to_intel_plane(primary);
3179         struct intel_plane_state *intel_state =
3180                 to_intel_plane_state(plane_state);
3181         struct drm_framebuffer *fb;
3182
3183         if (!plane_config->fb)
3184                 return;
3185
3186         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3187                 fb = &plane_config->fb->base;
3188                 goto valid_fb;
3189         }
3190
3191         kfree(plane_config->fb);
3192
3193         /*
3194          * Failed to alloc the obj, check to see if we should share
3195          * an fb with another CRTC instead
3196          */
3197         for_each_crtc(dev, c) {
3198                 struct intel_plane_state *state;
3199
3200                 if (c == &intel_crtc->base)
3201                         continue;
3202
3203                 if (!to_intel_crtc(c)->active)
3204                         continue;
3205
3206                 state = to_intel_plane_state(c->primary->state);
3207                 if (!state->vma)
3208                         continue;
3209
3210                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3211                         fb = state->base.fb;
3212                         drm_framebuffer_get(fb);
3213                         goto valid_fb;
3214                 }
3215         }
3216
3217         /*
3218          * We've failed to reconstruct the BIOS FB.  Current display state
3219          * indicates that the primary plane is visible, but has a NULL FB,
3220          * which will lead to problems later if we don't fix it up.  The
3221          * simplest solution is to just disable the primary plane now and
3222          * pretend the BIOS never had it enabled.
3223          */
3224         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3225
3226         return;
3227
3228 valid_fb:
3229         intel_state->base.rotation = plane_config->rotation;
3230         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3231                                 intel_state->base.rotation);
3232         intel_state->color_plane[0].stride =
3233                 intel_fb_pitch(fb, 0, intel_state->base.rotation);
3234
3235         mutex_lock(&dev->struct_mutex);
3236         intel_state->vma =
3237                 intel_pin_and_fence_fb_obj(fb,
3238                                            &intel_state->view,
3239                                            intel_plane_uses_fence(intel_state),
3240                                            &intel_state->flags);
3241         mutex_unlock(&dev->struct_mutex);
3242         if (IS_ERR(intel_state->vma)) {
3243                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3244                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
3245
3246                 intel_state->vma = NULL;
3247                 drm_framebuffer_put(fb);
3248                 return;
3249         }
3250
3251         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3252
3253         plane_state->src_x = 0;
3254         plane_state->src_y = 0;
3255         plane_state->src_w = fb->width << 16;
3256         plane_state->src_h = fb->height << 16;
3257
3258         plane_state->crtc_x = 0;
3259         plane_state->crtc_y = 0;
3260         plane_state->crtc_w = fb->width;
3261         plane_state->crtc_h = fb->height;
3262
3263         intel_state->base.src = drm_plane_state_src(plane_state);
3264         intel_state->base.dst = drm_plane_state_dest(plane_state);
3265
3266         if (plane_config->tiling)
3267                 dev_priv->preserve_bios_swizzle = true;
3268
3269         plane_state->fb = fb;
3270         plane_state->crtc = &intel_crtc->base;
3271
3272         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3273                   &to_intel_frontbuffer(fb)->bits);
3274 }
3275
3276 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3277                                int color_plane,
3278                                unsigned int rotation)
3279 {
3280         int cpp = fb->format->cpp[color_plane];
3281
3282         switch (fb->modifier) {
3283         case DRM_FORMAT_MOD_LINEAR:
3284         case I915_FORMAT_MOD_X_TILED:
3285                 /*
3286                  * Validated limit is 4k, but has 5k should
3287                  * work apart from the following features:
3288                  * - Ytile (already limited to 4k)
3289                  * - FP16 (already limited to 4k)
3290                  * - render compression (already limited to 4k)
3291                  * - KVMR sprite and cursor (don't care)
3292                  * - horizontal panning (TODO verify this)
3293                  * - pipe and plane scaling (TODO verify this)
3294                  */
3295                 if (cpp == 8)
3296                         return 4096;
3297                 else
3298                         return 5120;
3299         case I915_FORMAT_MOD_Y_TILED_CCS:
3300         case I915_FORMAT_MOD_Yf_TILED_CCS:
3301                 /* FIXME AUX plane? */
3302         case I915_FORMAT_MOD_Y_TILED:
3303         case I915_FORMAT_MOD_Yf_TILED:
3304                 if (cpp == 8)
3305                         return 2048;
3306                 else
3307                         return 4096;
3308         default:
3309                 MISSING_CASE(fb->modifier);
3310                 return 2048;
3311         }
3312 }
3313
3314 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3315                                int color_plane,
3316                                unsigned int rotation)
3317 {
3318         int cpp = fb->format->cpp[color_plane];
3319
3320         switch (fb->modifier) {
3321         case DRM_FORMAT_MOD_LINEAR:
3322         case I915_FORMAT_MOD_X_TILED:
3323                 if (cpp == 8)
3324                         return 4096;
3325                 else
3326                         return 5120;
3327         case I915_FORMAT_MOD_Y_TILED_CCS:
3328         case I915_FORMAT_MOD_Yf_TILED_CCS:
3329                 /* FIXME AUX plane? */
3330         case I915_FORMAT_MOD_Y_TILED:
3331         case I915_FORMAT_MOD_Yf_TILED:
3332                 if (cpp == 8)
3333                         return 2048;
3334                 else
3335                         return 5120;
3336         default:
3337                 MISSING_CASE(fb->modifier);
3338                 return 2048;
3339         }
3340 }
3341
3342 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3343                                int color_plane,
3344                                unsigned int rotation)
3345 {
3346         return 5120;
3347 }
3348
3349 static int skl_max_plane_height(void)
3350 {
3351         return 4096;
3352 }
3353
3354 static int icl_max_plane_height(void)
3355 {
3356         return 4320;
3357 }
3358
3359 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3360                                            int main_x, int main_y, u32 main_offset)
3361 {
3362         const struct drm_framebuffer *fb = plane_state->base.fb;
3363         int hsub = fb->format->hsub;
3364         int vsub = fb->format->vsub;
3365         int aux_x = plane_state->color_plane[1].x;
3366         int aux_y = plane_state->color_plane[1].y;
3367         u32 aux_offset = plane_state->color_plane[1].offset;
3368         u32 alignment = intel_surf_alignment(fb, 1);
3369
3370         while (aux_offset >= main_offset && aux_y <= main_y) {
3371                 int x, y;
3372
3373                 if (aux_x == main_x && aux_y == main_y)
3374                         break;
3375
3376                 if (aux_offset == 0)
3377                         break;
3378
3379                 x = aux_x / hsub;
3380                 y = aux_y / vsub;
3381                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3382                                                                aux_offset, aux_offset - alignment);
3383                 aux_x = x * hsub + aux_x % hsub;
3384                 aux_y = y * vsub + aux_y % vsub;
3385         }
3386
3387         if (aux_x != main_x || aux_y != main_y)
3388                 return false;
3389
3390         plane_state->color_plane[1].offset = aux_offset;
3391         plane_state->color_plane[1].x = aux_x;
3392         plane_state->color_plane[1].y = aux_y;
3393
3394         return true;
3395 }
3396
3397 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3398 {
3399         struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
3400         const struct drm_framebuffer *fb = plane_state->base.fb;
3401         unsigned int rotation = plane_state->base.rotation;
3402         int x = plane_state->base.src.x1 >> 16;
3403         int y = plane_state->base.src.y1 >> 16;
3404         int w = drm_rect_width(&plane_state->base.src) >> 16;
3405         int h = drm_rect_height(&plane_state->base.src) >> 16;
3406         int max_width;
3407         int max_height;
3408         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3409
3410         if (INTEL_GEN(dev_priv) >= 11)
3411                 max_width = icl_max_plane_width(fb, 0, rotation);
3412         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3413                 max_width = glk_max_plane_width(fb, 0, rotation);
3414         else
3415                 max_width = skl_max_plane_width(fb, 0, rotation);
3416
3417         if (INTEL_GEN(dev_priv) >= 11)
3418                 max_height = icl_max_plane_height();
3419         else
3420                 max_height = skl_max_plane_height();
3421
3422         if (w > max_width || h > max_height) {
3423                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3424                               w, h, max_width, max_height);
3425                 return -EINVAL;
3426         }
3427
3428         intel_add_fb_offsets(&x, &y, plane_state, 0);
3429         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3430         alignment = intel_surf_alignment(fb, 0);
3431
3432         /*
3433          * AUX surface offset is specified as the distance from the
3434          * main surface offset, and it must be non-negative. Make
3435          * sure that is what we will get.
3436          */
3437         if (offset > aux_offset)
3438                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3439                                                            offset, aux_offset & ~(alignment - 1));
3440
3441         /*
3442          * When using an X-tiled surface, the plane blows up
3443          * if the x offset + width exceed the stride.
3444          *
3445          * TODO: linear and Y-tiled seem fine, Yf untested,
3446          */
3447         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3448                 int cpp = fb->format->cpp[0];
3449
3450                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3451                         if (offset == 0) {
3452                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3453                                 return -EINVAL;
3454                         }
3455
3456                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3457                                                                    offset, offset - alignment);
3458                 }
3459         }
3460
3461         /*
3462          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3463          * they match with the main surface x/y offsets.
3464          */
3465         if (is_ccs_modifier(fb->modifier)) {
3466                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3467                         if (offset == 0)
3468                                 break;
3469
3470                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3471                                                                    offset, offset - alignment);
3472                 }
3473
3474                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3475                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3476                         return -EINVAL;
3477                 }
3478         }
3479
3480         plane_state->color_plane[0].offset = offset;
3481         plane_state->color_plane[0].x = x;
3482         plane_state->color_plane[0].y = y;
3483
3484         /*
3485          * Put the final coordinates back so that the src
3486          * coordinate checks will see the right values.
3487          */
3488         drm_rect_translate(&plane_state->base.src,
3489                            (x << 16) - plane_state->base.src.x1,
3490                            (y << 16) - plane_state->base.src.y1);
3491
3492         return 0;
3493 }
3494
3495 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3496 {
3497         const struct drm_framebuffer *fb = plane_state->base.fb;
3498         unsigned int rotation = plane_state->base.rotation;
3499         int max_width = skl_max_plane_width(fb, 1, rotation);
3500         int max_height = 4096;
3501         int x = plane_state->base.src.x1 >> 17;
3502         int y = plane_state->base.src.y1 >> 17;
3503         int w = drm_rect_width(&plane_state->base.src) >> 17;
3504         int h = drm_rect_height(&plane_state->base.src) >> 17;
3505         u32 offset;
3506
3507         intel_add_fb_offsets(&x, &y, plane_state, 1);
3508         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3509
3510         /* FIXME not quite sure how/if these apply to the chroma plane */
3511         if (w > max_width || h > max_height) {
3512                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3513                               w, h, max_width, max_height);
3514                 return -EINVAL;
3515         }
3516
3517         plane_state->color_plane[1].offset = offset;
3518         plane_state->color_plane[1].x = x;
3519         plane_state->color_plane[1].y = y;
3520
3521         return 0;
3522 }
3523
3524 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3525 {
3526         const struct drm_framebuffer *fb = plane_state->base.fb;
3527         int src_x = plane_state->base.src.x1 >> 16;
3528         int src_y = plane_state->base.src.y1 >> 16;
3529         int hsub = fb->format->hsub;
3530         int vsub = fb->format->vsub;
3531         int x = src_x / hsub;
3532         int y = src_y / vsub;
3533         u32 offset;
3534
3535         intel_add_fb_offsets(&x, &y, plane_state, 1);
3536         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3537
3538         plane_state->color_plane[1].offset = offset;
3539         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3540         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3541
3542         return 0;
3543 }
3544
3545 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3546 {
3547         const struct drm_framebuffer *fb = plane_state->base.fb;
3548         int ret;
3549
3550         ret = intel_plane_compute_gtt(plane_state);
3551         if (ret)
3552                 return ret;
3553
3554         if (!plane_state->base.visible)
3555                 return 0;
3556
3557         /*
3558          * Handle the AUX surface first since
3559          * the main surface setup depends on it.
3560          */
3561         if (drm_format_info_is_yuv_semiplanar(fb->format)) {
3562                 ret = skl_check_nv12_aux_surface(plane_state);
3563                 if (ret)
3564                         return ret;
3565         } else if (is_ccs_modifier(fb->modifier)) {
3566                 ret = skl_check_ccs_aux_surface(plane_state);
3567                 if (ret)
3568                         return ret;
3569         } else {
3570                 plane_state->color_plane[1].offset = ~0xfff;
3571                 plane_state->color_plane[1].x = 0;
3572                 plane_state->color_plane[1].y = 0;
3573         }
3574
3575         ret = skl_check_main_surface(plane_state);
3576         if (ret)
3577                 return ret;
3578
3579         return 0;
3580 }
3581
3582 unsigned int
3583 i9xx_plane_max_stride(struct intel_plane *plane,
3584                       u32 pixel_format, u64 modifier,
3585                       unsigned int rotation)
3586 {
3587         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3588
3589         if (!HAS_GMCH(dev_priv)) {
3590                 return 32*1024;
3591         } else if (INTEL_GEN(dev_priv) >= 4) {
3592                 if (modifier == I915_FORMAT_MOD_X_TILED)
3593                         return 16*1024;
3594                 else
3595                         return 32*1024;
3596         } else if (INTEL_GEN(dev_priv) >= 3) {
3597                 if (modifier == I915_FORMAT_MOD_X_TILED)
3598                         return 8*1024;
3599                 else
3600                         return 16*1024;
3601         } else {
3602                 if (plane->i9xx_plane == PLANE_C)
3603                         return 4*1024;
3604                 else
3605                         return 8*1024;
3606         }
3607 }
3608
3609 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3610 {
3611         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3612         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3613         u32 dspcntr = 0;
3614
3615         if (crtc_state->gamma_enable)
3616                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3617
3618         if (crtc_state->csc_enable)
3619                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3620
3621         if (INTEL_GEN(dev_priv) < 5)
3622                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3623
3624         return dspcntr;
3625 }
3626
3627 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3628                           const struct intel_plane_state *plane_state)
3629 {
3630         struct drm_i915_private *dev_priv =
3631                 to_i915(plane_state->base.plane->dev);
3632         const struct drm_framebuffer *fb = plane_state->base.fb;
3633         unsigned int rotation = plane_state->base.rotation;
3634         u32 dspcntr;
3635
3636         dspcntr = DISPLAY_PLANE_ENABLE;
3637
3638         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3639             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3640                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3641
3642         switch (fb->format->format) {
3643         case DRM_FORMAT_C8:
3644                 dspcntr |= DISPPLANE_8BPP;
3645                 break;
3646         case DRM_FORMAT_XRGB1555:
3647                 dspcntr |= DISPPLANE_BGRX555;
3648                 break;
3649         case DRM_FORMAT_RGB565:
3650                 dspcntr |= DISPPLANE_BGRX565;
3651                 break;
3652         case DRM_FORMAT_XRGB8888:
3653                 dspcntr |= DISPPLANE_BGRX888;
3654                 break;
3655         case DRM_FORMAT_XBGR8888:
3656                 dspcntr |= DISPPLANE_RGBX888;
3657                 break;
3658         case DRM_FORMAT_XRGB2101010:
3659                 dspcntr |= DISPPLANE_BGRX101010;
3660                 break;
3661         case DRM_FORMAT_XBGR2101010:
3662                 dspcntr |= DISPPLANE_RGBX101010;
3663                 break;
3664         default:
3665                 MISSING_CASE(fb->format->format);
3666                 return 0;
3667         }
3668
3669         if (INTEL_GEN(dev_priv) >= 4 &&
3670             fb->modifier == I915_FORMAT_MOD_X_TILED)
3671                 dspcntr |= DISPPLANE_TILED;
3672
3673         if (rotation & DRM_MODE_ROTATE_180)
3674                 dspcntr |= DISPPLANE_ROTATE_180;
3675
3676         if (rotation & DRM_MODE_REFLECT_X)
3677                 dspcntr |= DISPPLANE_MIRROR;
3678
3679         return dspcntr;
3680 }
3681
3682 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3683 {
3684         struct drm_i915_private *dev_priv =
3685                 to_i915(plane_state->base.plane->dev);
3686         int src_x, src_y;
3687         u32 offset;
3688         int ret;
3689
3690         ret = intel_plane_compute_gtt(plane_state);
3691         if (ret)
3692                 return ret;
3693
3694         if (!plane_state->base.visible)
3695                 return 0;
3696
3697         src_x = plane_state->base.src.x1 >> 16;
3698         src_y = plane_state->base.src.y1 >> 16;
3699
3700         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3701
3702         if (INTEL_GEN(dev_priv) >= 4)
3703                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3704                                                             plane_state, 0);
3705         else
3706                 offset = 0;
3707
3708         /*
3709          * Put the final coordinates back so that the src
3710          * coordinate checks will see the right values.
3711          */
3712         drm_rect_translate(&plane_state->base.src,
3713                            (src_x << 16) - plane_state->base.src.x1,
3714                            (src_y << 16) - plane_state->base.src.y1);
3715
3716         /* HSW/BDW do this automagically in hardware */
3717         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3718                 unsigned int rotation = plane_state->base.rotation;
3719                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3720                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3721
3722                 if (rotation & DRM_MODE_ROTATE_180) {
3723                         src_x += src_w - 1;
3724                         src_y += src_h - 1;
3725                 } else if (rotation & DRM_MODE_REFLECT_X) {
3726                         src_x += src_w - 1;
3727                 }
3728         }
3729
3730         plane_state->color_plane[0].offset = offset;
3731         plane_state->color_plane[0].x = src_x;
3732         plane_state->color_plane[0].y = src_y;
3733
3734         return 0;
3735 }
3736
3737 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
3738 {
3739         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3740         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3741
3742         if (IS_CHERRYVIEW(dev_priv))
3743                 return i9xx_plane == PLANE_B;
3744         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3745                 return false;
3746         else if (IS_GEN(dev_priv, 4))
3747                 return i9xx_plane == PLANE_C;
3748         else
3749                 return i9xx_plane == PLANE_B ||
3750                         i9xx_plane == PLANE_C;
3751 }
3752
3753 static int
3754 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3755                  struct intel_plane_state *plane_state)
3756 {
3757         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3758         int ret;
3759
3760         ret = chv_plane_check_rotation(plane_state);
3761         if (ret)
3762                 return ret;
3763
3764         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3765                                                   &crtc_state->base,
3766                                                   DRM_PLANE_HELPER_NO_SCALING,
3767                                                   DRM_PLANE_HELPER_NO_SCALING,
3768                                                   i9xx_plane_has_windowing(plane),
3769                                                   true);
3770         if (ret)
3771                 return ret;
3772
3773         ret = i9xx_check_plane_surface(plane_state);
3774         if (ret)
3775                 return ret;
3776
3777         if (!plane_state->base.visible)
3778                 return 0;
3779
3780         ret = intel_plane_check_src_coordinates(plane_state);
3781         if (ret)
3782                 return ret;
3783
3784         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3785
3786         return 0;
3787 }
3788
3789 static void i9xx_update_plane(struct intel_plane *plane,
3790                               const struct intel_crtc_state *crtc_state,
3791                               const struct intel_plane_state *plane_state)
3792 {
3793         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3794         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3795         u32 linear_offset;
3796         int x = plane_state->color_plane[0].x;
3797         int y = plane_state->color_plane[0].y;
3798         int crtc_x = plane_state->base.dst.x1;
3799         int crtc_y = plane_state->base.dst.y1;
3800         int crtc_w = drm_rect_width(&plane_state->base.dst);
3801         int crtc_h = drm_rect_height(&plane_state->base.dst);
3802         unsigned long irqflags;
3803         u32 dspaddr_offset;
3804         u32 dspcntr;
3805
3806         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3807
3808         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3809
3810         if (INTEL_GEN(dev_priv) >= 4)
3811                 dspaddr_offset = plane_state->color_plane[0].offset;
3812         else
3813                 dspaddr_offset = linear_offset;
3814
3815         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3816
3817         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3818
3819         if (INTEL_GEN(dev_priv) < 4) {
3820                 /*
3821                  * PLANE_A doesn't actually have a full window
3822                  * generator but let's assume we still need to
3823                  * program whatever is there.
3824                  */
3825                 I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3826                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3827                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3828         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3829                 I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3830                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3831                               ((crtc_h - 1) << 16) | (crtc_w - 1));
3832                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3833         }
3834
3835         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3836                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3837         } else if (INTEL_GEN(dev_priv) >= 4) {
3838                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3839                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3840         }
3841
3842         /*
3843          * The control register self-arms if the plane was previously
3844          * disabled. Try to make the plane enable atomic by writing
3845          * the control register just before the surface register.
3846          */
3847         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3848         if (INTEL_GEN(dev_priv) >= 4)
3849                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3850                               intel_plane_ggtt_offset(plane_state) +
3851                               dspaddr_offset);
3852         else
3853                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3854                               intel_plane_ggtt_offset(plane_state) +
3855                               dspaddr_offset);
3856
3857         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3858 }
3859
3860 static void i9xx_disable_plane(struct intel_plane *plane,
3861                                const struct intel_crtc_state *crtc_state)
3862 {
3863         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3864         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3865         unsigned long irqflags;
3866         u32 dspcntr;
3867
3868         /*
3869          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3870          * enable on ilk+ affect the pipe bottom color as
3871          * well, so we must configure them even if the plane
3872          * is disabled.
3873          *
3874          * On pre-g4x there is no way to gamma correct the
3875          * pipe bottom color but we'll keep on doing this
3876          * anyway so that the crtc state readout works correctly.
3877          */
3878         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3879
3880         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3881
3882         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3883         if (INTEL_GEN(dev_priv) >= 4)
3884                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3885         else
3886                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3887
3888         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3889 }
3890
3891 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3892                                     enum pipe *pipe)
3893 {
3894         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3895         enum intel_display_power_domain power_domain;
3896         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3897         intel_wakeref_t wakeref;
3898         bool ret;
3899         u32 val;
3900
3901         /*
3902          * Not 100% correct for planes that can move between pipes,
3903          * but that's only the case for gen2-4 which don't have any
3904          * display power wells.
3905          */
3906         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3907         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3908         if (!wakeref)
3909                 return false;
3910
3911         val = I915_READ(DSPCNTR(i9xx_plane));
3912
3913         ret = val & DISPLAY_PLANE_ENABLE;
3914
3915         if (INTEL_GEN(dev_priv) >= 5)
3916                 *pipe = plane->pipe;
3917         else
3918                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3919                         DISPPLANE_SEL_PIPE_SHIFT;
3920
3921         intel_display_power_put(dev_priv, power_domain, wakeref);
3922
3923         return ret;
3924 }
3925
3926 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3927 {
3928         struct drm_device *dev = intel_crtc->base.dev;
3929         struct drm_i915_private *dev_priv = to_i915(dev);
3930
3931         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3932         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3933         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3934 }
3935
3936 /*
3937  * This function detaches (aka. unbinds) unused scalers in hardware
3938  */
3939 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3940 {
3941         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3942         const struct intel_crtc_scaler_state *scaler_state =
3943                 &crtc_state->scaler_state;
3944         int i;
3945
3946         /* loop through and disable scalers that aren't in use */
3947         for (i = 0; i < intel_crtc->num_scalers; i++) {
3948                 if (!scaler_state->scalers[i].in_use)
3949                         skl_detach_scaler(intel_crtc, i);
3950         }
3951 }
3952
3953 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3954                                           int color_plane, unsigned int rotation)
3955 {
3956         /*
3957          * The stride is either expressed as a multiple of 64 bytes chunks for
3958          * linear buffers or in number of tiles for tiled buffers.
3959          */
3960         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3961                 return 64;
3962         else if (drm_rotation_90_or_270(rotation))
3963                 return intel_tile_height(fb, color_plane);
3964         else
3965                 return intel_tile_width_bytes(fb, color_plane);
3966 }
3967
3968 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3969                      int color_plane)
3970 {
3971         const struct drm_framebuffer *fb = plane_state->base.fb;
3972         unsigned int rotation = plane_state->base.rotation;
3973         u32 stride = plane_state->color_plane[color_plane].stride;
3974
3975         if (color_plane >= fb->format->num_planes)
3976                 return 0;
3977
3978         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3979 }
3980
3981 static u32 skl_plane_ctl_format(u32 pixel_format)
3982 {
3983         switch (pixel_format) {
3984         case DRM_FORMAT_C8:
3985                 return PLANE_CTL_FORMAT_INDEXED;
3986         case DRM_FORMAT_RGB565:
3987                 return PLANE_CTL_FORMAT_RGB_565;
3988         case DRM_FORMAT_XBGR8888:
3989         case DRM_FORMAT_ABGR8888:
3990                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3991         case DRM_FORMAT_XRGB8888:
3992         case DRM_FORMAT_ARGB8888:
3993                 return PLANE_CTL_FORMAT_XRGB_8888;
3994         case DRM_FORMAT_XBGR2101010:
3995                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
3996         case DRM_FORMAT_XRGB2101010:
3997                 return PLANE_CTL_FORMAT_XRGB_2101010;
3998         case DRM_FORMAT_XBGR16161616F:
3999         case DRM_FORMAT_ABGR16161616F:
4000                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4001         case DRM_FORMAT_XRGB16161616F:
4002         case DRM_FORMAT_ARGB16161616F:
4003                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4004         case DRM_FORMAT_YUYV:
4005                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4006         case DRM_FORMAT_YVYU:
4007                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4008         case DRM_FORMAT_UYVY:
4009                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4010         case DRM_FORMAT_VYUY:
4011                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4012         case DRM_FORMAT_NV12:
4013                 return PLANE_CTL_FORMAT_NV12;
4014         case DRM_FORMAT_P010:
4015                 return PLANE_CTL_FORMAT_P010;
4016         case DRM_FORMAT_P012:
4017                 return PLANE_CTL_FORMAT_P012;
4018         case DRM_FORMAT_P016:
4019                 return PLANE_CTL_FORMAT_P016;
4020         case DRM_FORMAT_Y210:
4021                 return PLANE_CTL_FORMAT_Y210;
4022         case DRM_FORMAT_Y212:
4023                 return PLANE_CTL_FORMAT_Y212;
4024         case DRM_FORMAT_Y216:
4025                 return PLANE_CTL_FORMAT_Y216;
4026         case DRM_FORMAT_XVYU2101010:
4027                 return PLANE_CTL_FORMAT_Y410;
4028         case DRM_FORMAT_XVYU12_16161616:
4029                 return PLANE_CTL_FORMAT_Y412;
4030         case DRM_FORMAT_XVYU16161616:
4031                 return PLANE_CTL_FORMAT_Y416;
4032         default:
4033                 MISSING_CASE(pixel_format);
4034         }
4035
4036         return 0;
4037 }
4038
4039 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4040 {
4041         if (!plane_state->base.fb->format->has_alpha)
4042                 return PLANE_CTL_ALPHA_DISABLE;
4043
4044         switch (plane_state->base.pixel_blend_mode) {
4045         case DRM_MODE_BLEND_PIXEL_NONE:
4046                 return PLANE_CTL_ALPHA_DISABLE;
4047         case DRM_MODE_BLEND_PREMULTI:
4048                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4049         case DRM_MODE_BLEND_COVERAGE:
4050                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4051         default:
4052                 MISSING_CASE(plane_state->base.pixel_blend_mode);
4053                 return PLANE_CTL_ALPHA_DISABLE;
4054         }
4055 }
4056
4057 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4058 {
4059         if (!plane_state->base.fb->format->has_alpha)
4060                 return PLANE_COLOR_ALPHA_DISABLE;
4061
4062         switch (plane_state->base.pixel_blend_mode) {
4063         case DRM_MODE_BLEND_PIXEL_NONE:
4064                 return PLANE_COLOR_ALPHA_DISABLE;
4065         case DRM_MODE_BLEND_PREMULTI:
4066                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4067         case DRM_MODE_BLEND_COVERAGE:
4068                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4069         default:
4070                 MISSING_CASE(plane_state->base.pixel_blend_mode);
4071                 return PLANE_COLOR_ALPHA_DISABLE;
4072         }
4073 }
4074
4075 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4076 {
4077         switch (fb_modifier) {
4078         case DRM_FORMAT_MOD_LINEAR:
4079                 break;
4080         case I915_FORMAT_MOD_X_TILED:
4081                 return PLANE_CTL_TILED_X;
4082         case I915_FORMAT_MOD_Y_TILED:
4083                 return PLANE_CTL_TILED_Y;
4084         case I915_FORMAT_MOD_Y_TILED_CCS:
4085                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4086         case I915_FORMAT_MOD_Yf_TILED:
4087                 return PLANE_CTL_TILED_YF;
4088         case I915_FORMAT_MOD_Yf_TILED_CCS:
4089                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4090         default:
4091                 MISSING_CASE(fb_modifier);
4092         }
4093
4094         return 0;
4095 }
4096
4097 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4098 {
4099         switch (rotate) {
4100         case DRM_MODE_ROTATE_0:
4101                 break;
4102         /*
4103          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4104          * while i915 HW rotation is clockwise, thats why this swapping.
4105          */
4106         case DRM_MODE_ROTATE_90:
4107                 return PLANE_CTL_ROTATE_270;
4108         case DRM_MODE_ROTATE_180:
4109                 return PLANE_CTL_ROTATE_180;
4110         case DRM_MODE_ROTATE_270:
4111                 return PLANE_CTL_ROTATE_90;
4112         default:
4113                 MISSING_CASE(rotate);
4114         }
4115
4116         return 0;
4117 }
4118
4119 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4120 {
4121         switch (reflect) {
4122         case 0:
4123                 break;
4124         case DRM_MODE_REFLECT_X:
4125                 return PLANE_CTL_FLIP_HORIZONTAL;
4126         case DRM_MODE_REFLECT_Y:
4127         default:
4128                 MISSING_CASE(reflect);
4129         }
4130
4131         return 0;
4132 }
4133
4134 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4135 {
4136         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4137         u32 plane_ctl = 0;
4138
4139         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4140                 return plane_ctl;
4141
4142         if (crtc_state->gamma_enable)
4143                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4144
4145         if (crtc_state->csc_enable)
4146                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4147
4148         return plane_ctl;
4149 }
4150
4151 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4152                   const struct intel_plane_state *plane_state)
4153 {
4154         struct drm_i915_private *dev_priv =
4155                 to_i915(plane_state->base.plane->dev);
4156         const struct drm_framebuffer *fb = plane_state->base.fb;
4157         unsigned int rotation = plane_state->base.rotation;
4158         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4159         u32 plane_ctl;
4160
4161         plane_ctl = PLANE_CTL_ENABLE;
4162
4163         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4164                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4165                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4166
4167                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4168                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4169
4170                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4171                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4172         }
4173
4174         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4175         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4176         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4177
4178         if (INTEL_GEN(dev_priv) >= 10)
4179                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4180                                                 DRM_MODE_REFLECT_MASK);
4181
4182         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4183                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4184         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4185                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4186
4187         return plane_ctl;
4188 }
4189
4190 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4191 {
4192         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4193         u32 plane_color_ctl = 0;
4194
4195         if (INTEL_GEN(dev_priv) >= 11)
4196                 return plane_color_ctl;
4197
4198         if (crtc_state->gamma_enable)
4199                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4200
4201         if (crtc_state->csc_enable)
4202                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4203
4204         return plane_color_ctl;
4205 }
4206
4207 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4208                         const struct intel_plane_state *plane_state)
4209 {
4210         struct drm_i915_private *dev_priv =
4211                 to_i915(plane_state->base.plane->dev);
4212         const struct drm_framebuffer *fb = plane_state->base.fb;
4213         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4214         u32 plane_color_ctl = 0;
4215
4216         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4217         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4218
4219         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4220                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4221                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4222                 else
4223                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4224
4225                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4226                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4227         } else if (fb->format->is_yuv) {
4228                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4229         }
4230
4231         return plane_color_ctl;
4232 }
4233
4234 static int
4235 __intel_display_resume(struct drm_device *dev,
4236                        struct drm_atomic_state *state,
4237                        struct drm_modeset_acquire_ctx *ctx)
4238 {
4239         struct drm_crtc_state *crtc_state;
4240         struct drm_crtc *crtc;
4241         int i, ret;
4242
4243         intel_modeset_setup_hw_state(dev, ctx);
4244         intel_vga_redisable(to_i915(dev));
4245
4246         if (!state)
4247                 return 0;
4248
4249         /*
4250          * We've duplicated the state, pointers to the old state are invalid.
4251          *
4252          * Don't attempt to use the old state until we commit the duplicated state.
4253          */
4254         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4255                 /*
4256                  * Force recalculation even if we restore
4257                  * current state. With fast modeset this may not result
4258                  * in a modeset when the state is compatible.
4259                  */
4260                 crtc_state->mode_changed = true;
4261         }
4262
4263         /* ignore any reset values/BIOS leftovers in the WM registers */
4264         if (!HAS_GMCH(to_i915(dev)))
4265                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4266
4267         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4268
4269         WARN_ON(ret == -EDEADLK);
4270         return ret;
4271 }
4272
4273 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4274 {
4275         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4276                 intel_has_gpu_reset(&dev_priv->gt));
4277 }
4278
4279 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4280 {
4281         struct drm_device *dev = &dev_priv->drm;
4282         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4283         struct drm_atomic_state *state;
4284         int ret;
4285
4286         /* reset doesn't touch the display */
4287         if (!i915_modparams.force_reset_modeset_test &&
4288             !gpu_reset_clobbers_display(dev_priv))
4289                 return;
4290
4291         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4292         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4293         smp_mb__after_atomic();
4294         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4295
4296         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4297                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4298                 intel_gt_set_wedged(&dev_priv->gt);
4299         }
4300
4301         /*
4302          * Need mode_config.mutex so that we don't
4303          * trample ongoing ->detect() and whatnot.
4304          */
4305         mutex_lock(&dev->mode_config.mutex);
4306         drm_modeset_acquire_init(ctx, 0);
4307         while (1) {
4308                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4309                 if (ret != -EDEADLK)
4310                         break;
4311
4312                 drm_modeset_backoff(ctx);
4313         }
4314         /*
4315          * Disabling the crtcs gracefully seems nicer. Also the
4316          * g33 docs say we should at least disable all the planes.
4317          */
4318         state = drm_atomic_helper_duplicate_state(dev, ctx);
4319         if (IS_ERR(state)) {
4320                 ret = PTR_ERR(state);
4321                 DRM_ERROR("Duplicating state failed with %i\n", ret);
4322                 return;
4323         }
4324
4325         ret = drm_atomic_helper_disable_all(dev, ctx);
4326         if (ret) {
4327                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4328                 drm_atomic_state_put(state);
4329                 return;
4330         }
4331
4332         dev_priv->modeset_restore_state = state;
4333         state->acquire_ctx = ctx;
4334 }
4335
4336 void intel_finish_reset(struct drm_i915_private *dev_priv)
4337 {
4338         struct drm_device *dev = &dev_priv->drm;
4339         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4340         struct drm_atomic_state *state;
4341         int ret;
4342
4343         /* reset doesn't touch the display */
4344         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4345                 return;
4346
4347         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4348         if (!state)
4349                 goto unlock;
4350
4351         /* reset doesn't touch the display */
4352         if (!gpu_reset_clobbers_display(dev_priv)) {
4353                 /* for testing only restore the display */
4354                 ret = __intel_display_resume(dev, state, ctx);
4355                 if (ret)
4356                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4357         } else {
4358                 /*
4359                  * The display has been reset as well,
4360                  * so need a full re-initialization.
4361                  */
4362                 intel_pps_unlock_regs_wa(dev_priv);
4363                 intel_modeset_init_hw(dev_priv);
4364                 intel_init_clock_gating(dev_priv);
4365
4366                 spin_lock_irq(&dev_priv->irq_lock);
4367                 if (dev_priv->display.hpd_irq_setup)
4368                         dev_priv->display.hpd_irq_setup(dev_priv);
4369                 spin_unlock_irq(&dev_priv->irq_lock);
4370
4371                 ret = __intel_display_resume(dev, state, ctx);
4372                 if (ret)
4373                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4374
4375                 intel_hpd_init(dev_priv);
4376         }
4377
4378         drm_atomic_state_put(state);
4379 unlock:
4380         drm_modeset_drop_locks(ctx);
4381         drm_modeset_acquire_fini(ctx);
4382         mutex_unlock(&dev->mode_config.mutex);
4383
4384         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4385 }
4386
4387 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4388 {
4389         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4390         enum pipe pipe = crtc->pipe;
4391         u32 tmp;
4392
4393         tmp = I915_READ(PIPE_CHICKEN(pipe));
4394
4395         /*
4396          * Display WA #1153: icl
4397          * enable hardware to bypass the alpha math
4398          * and rounding for per-pixel values 00 and 0xff
4399          */
4400         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4401         /*
4402          * Display WA # 1605353570: icl
4403          * Set the pixel rounding bit to 1 for allowing
4404          * passthrough of Frame buffer pixels unmodified
4405          * across pipe
4406          */
4407         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4408         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4409 }
4410
4411 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4412                                      const struct intel_crtc_state *new_crtc_state)
4413 {
4414         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4415         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4416
4417         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4418         crtc->base.mode = new_crtc_state->base.mode;
4419
4420         /*
4421          * Update pipe size and adjust fitter if needed: the reason for this is
4422          * that in compute_mode_changes we check the native mode (not the pfit
4423          * mode) to see if we can flip rather than do a full mode set. In the
4424          * fastboot case, we'll flip, but if we don't update the pipesrc and
4425          * pfit state, we'll end up with a big fb scanned out into the wrong
4426          * sized surface.
4427          */
4428
4429         I915_WRITE(PIPESRC(crtc->pipe),
4430                    ((new_crtc_state->pipe_src_w - 1) << 16) |
4431                    (new_crtc_state->pipe_src_h - 1));
4432
4433         /* on skylake this is done by detaching scalers */
4434         if (INTEL_GEN(dev_priv) >= 9) {
4435                 skl_detach_scalers(new_crtc_state);
4436
4437                 if (new_crtc_state->pch_pfit.enabled)
4438                         skylake_pfit_enable(new_crtc_state);
4439         } else if (HAS_PCH_SPLIT(dev_priv)) {
4440                 if (new_crtc_state->pch_pfit.enabled)
4441                         ironlake_pfit_enable(new_crtc_state);
4442                 else if (old_crtc_state->pch_pfit.enabled)
4443                         ironlake_pfit_disable(old_crtc_state);
4444         }
4445
4446         if (INTEL_GEN(dev_priv) >= 11)
4447                 icl_set_pipe_chicken(crtc);
4448 }
4449
4450 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4451 {
4452         struct drm_device *dev = crtc->base.dev;
4453         struct drm_i915_private *dev_priv = to_i915(dev);
4454         enum pipe pipe = crtc->pipe;
4455         i915_reg_t reg;
4456         u32 temp;
4457
4458         /* enable normal train */
4459         reg = FDI_TX_CTL(pipe);
4460         temp = I915_READ(reg);
4461         if (IS_IVYBRIDGE(dev_priv)) {
4462                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4463                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4464         } else {
4465                 temp &= ~FDI_LINK_TRAIN_NONE;
4466                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4467         }
4468         I915_WRITE(reg, temp);
4469
4470         reg = FDI_RX_CTL(pipe);
4471         temp = I915_READ(reg);
4472         if (HAS_PCH_CPT(dev_priv)) {
4473                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4474                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4475         } else {
4476                 temp &= ~FDI_LINK_TRAIN_NONE;
4477                 temp |= FDI_LINK_TRAIN_NONE;
4478         }
4479         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4480
4481         /* wait one idle pattern time */
4482         POSTING_READ(reg);
4483         udelay(1000);
4484
4485         /* IVB wants error correction enabled */
4486         if (IS_IVYBRIDGE(dev_priv))
4487                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4488                            FDI_FE_ERRC_ENABLE);
4489 }
4490
4491 /* The FDI link training functions for ILK/Ibexpeak. */
4492 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4493                                     const struct intel_crtc_state *crtc_state)
4494 {
4495         struct drm_device *dev = crtc->base.dev;
4496         struct drm_i915_private *dev_priv = to_i915(dev);
4497         enum pipe pipe = crtc->pipe;
4498         i915_reg_t reg;
4499         u32 temp, tries;
4500
4501         /* FDI needs bits from pipe first */
4502         assert_pipe_enabled(dev_priv, pipe);
4503
4504         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4505            for train result */
4506         reg = FDI_RX_IMR(pipe);
4507         temp = I915_READ(reg);
4508         temp &= ~FDI_RX_SYMBOL_LOCK;
4509         temp &= ~FDI_RX_BIT_LOCK;
4510         I915_WRITE(reg, temp);
4511         I915_READ(reg);
4512         udelay(150);
4513
4514         /* enable CPU FDI TX and PCH FDI RX */
4515         reg = FDI_TX_CTL(pipe);
4516         temp = I915_READ(reg);
4517         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4518         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4519         temp &= ~FDI_LINK_TRAIN_NONE;
4520         temp |= FDI_LINK_TRAIN_PATTERN_1;
4521         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4522
4523         reg = FDI_RX_CTL(pipe);
4524         temp = I915_READ(reg);
4525         temp &= ~FDI_LINK_TRAIN_NONE;
4526         temp |= FDI_LINK_TRAIN_PATTERN_1;
4527         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4528
4529         POSTING_READ(reg);
4530         udelay(150);
4531
4532         /* Ironlake workaround, enable clock pointer after FDI enable*/
4533         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4534         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4535                    FDI_RX_PHASE_SYNC_POINTER_EN);
4536
4537         reg = FDI_RX_IIR(pipe);
4538         for (tries = 0; tries < 5; tries++) {
4539                 temp = I915_READ(reg);
4540                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4541
4542                 if ((temp & FDI_RX_BIT_LOCK)) {
4543                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4544                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4545                         break;
4546                 }
4547         }
4548         if (tries == 5)
4549                 DRM_ERROR("FDI train 1 fail!\n");
4550
4551         /* Train 2 */
4552         reg = FDI_TX_CTL(pipe);
4553         temp = I915_READ(reg);
4554         temp &= ~FDI_LINK_TRAIN_NONE;
4555         temp |= FDI_LINK_TRAIN_PATTERN_2;
4556         I915_WRITE(reg, temp);
4557
4558         reg = FDI_RX_CTL(pipe);
4559         temp = I915_READ(reg);
4560         temp &= ~FDI_LINK_TRAIN_NONE;
4561         temp |= FDI_LINK_TRAIN_PATTERN_2;
4562         I915_WRITE(reg, temp);
4563
4564         POSTING_READ(reg);
4565         udelay(150);
4566
4567         reg = FDI_RX_IIR(pipe);
4568         for (tries = 0; tries < 5; tries++) {
4569                 temp = I915_READ(reg);
4570                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4571
4572                 if (temp & FDI_RX_SYMBOL_LOCK) {
4573                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4574                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4575                         break;
4576                 }
4577         }
4578         if (tries == 5)
4579                 DRM_ERROR("FDI train 2 fail!\n");
4580
4581         DRM_DEBUG_KMS("FDI train done\n");
4582
4583 }
4584
4585 static const int snb_b_fdi_train_param[] = {
4586         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4587         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4588         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4589         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4590 };
4591
4592 /* The FDI link training functions for SNB/Cougarpoint. */
4593 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4594                                 const struct intel_crtc_state *crtc_state)
4595 {
4596         struct drm_device *dev = crtc->base.dev;
4597         struct drm_i915_private *dev_priv = to_i915(dev);
4598         enum pipe pipe = crtc->pipe;
4599         i915_reg_t reg;
4600         u32 temp, i, retry;
4601
4602         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4603            for train result */
4604         reg = FDI_RX_IMR(pipe);
4605         temp = I915_READ(reg);
4606         temp &= ~FDI_RX_SYMBOL_LOCK;
4607         temp &= ~FDI_RX_BIT_LOCK;
4608         I915_WRITE(reg, temp);
4609
4610         POSTING_READ(reg);
4611         udelay(150);
4612
4613         /* enable CPU FDI TX and PCH FDI RX */
4614         reg = FDI_TX_CTL(pipe);
4615         temp = I915_READ(reg);
4616         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4617         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4618         temp &= ~FDI_LINK_TRAIN_NONE;
4619         temp |= FDI_LINK_TRAIN_PATTERN_1;
4620         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4621         /* SNB-B */
4622         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4623         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4624
4625         I915_WRITE(FDI_RX_MISC(pipe),
4626                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4627
4628         reg = FDI_RX_CTL(pipe);
4629         temp = I915_READ(reg);
4630         if (HAS_PCH_CPT(dev_priv)) {
4631                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4632                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4633         } else {
4634                 temp &= ~FDI_LINK_TRAIN_NONE;
4635                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4636         }
4637         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4638
4639         POSTING_READ(reg);
4640         udelay(150);
4641
4642         for (i = 0; i < 4; i++) {
4643                 reg = FDI_TX_CTL(pipe);
4644                 temp = I915_READ(reg);
4645                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4646                 temp |= snb_b_fdi_train_param[i];
4647                 I915_WRITE(reg, temp);
4648
4649                 POSTING_READ(reg);
4650                 udelay(500);
4651
4652                 for (retry = 0; retry < 5; retry++) {
4653                         reg = FDI_RX_IIR(pipe);
4654                         temp = I915_READ(reg);
4655                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4656                         if (temp & FDI_RX_BIT_LOCK) {
4657                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4658                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4659                                 break;
4660                         }
4661                         udelay(50);
4662                 }
4663                 if (retry < 5)
4664                         break;
4665         }
4666         if (i == 4)
4667                 DRM_ERROR("FDI train 1 fail!\n");
4668
4669         /* Train 2 */
4670         reg = FDI_TX_CTL(pipe);
4671         temp = I915_READ(reg);
4672         temp &= ~FDI_LINK_TRAIN_NONE;
4673         temp |= FDI_LINK_TRAIN_PATTERN_2;
4674         if (IS_GEN(dev_priv, 6)) {
4675                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4676                 /* SNB-B */
4677                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4678         }
4679         I915_WRITE(reg, temp);
4680
4681         reg = FDI_RX_CTL(pipe);
4682         temp = I915_READ(reg);
4683         if (HAS_PCH_CPT(dev_priv)) {
4684                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4685                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4686         } else {
4687                 temp &= ~FDI_LINK_TRAIN_NONE;
4688                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4689         }
4690         I915_WRITE(reg, temp);
4691
4692         POSTING_READ(reg);
4693         udelay(150);
4694
4695         for (i = 0; i < 4; i++) {
4696                 reg = FDI_TX_CTL(pipe);
4697                 temp = I915_READ(reg);
4698                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4699                 temp |= snb_b_fdi_train_param[i];
4700                 I915_WRITE(reg, temp);
4701
4702                 POSTING_READ(reg);
4703                 udelay(500);
4704
4705                 for (retry = 0; retry < 5; retry++) {
4706                         reg = FDI_RX_IIR(pipe);
4707                         temp = I915_READ(reg);
4708                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4709                         if (temp & FDI_RX_SYMBOL_LOCK) {
4710                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4711                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4712                                 break;
4713                         }
4714                         udelay(50);
4715                 }
4716                 if (retry < 5)
4717                         break;
4718         }
4719         if (i == 4)
4720                 DRM_ERROR("FDI train 2 fail!\n");
4721
4722         DRM_DEBUG_KMS("FDI train done.\n");
4723 }
4724
4725 /* Manual link training for Ivy Bridge A0 parts */
4726 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4727                                       const struct intel_crtc_state *crtc_state)
4728 {
4729         struct drm_device *dev = crtc->base.dev;
4730         struct drm_i915_private *dev_priv = to_i915(dev);
4731         enum pipe pipe = crtc->pipe;
4732         i915_reg_t reg;
4733         u32 temp, i, j;
4734
4735         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4736            for train result */
4737         reg = FDI_RX_IMR(pipe);
4738         temp = I915_READ(reg);
4739         temp &= ~FDI_RX_SYMBOL_LOCK;
4740         temp &= ~FDI_RX_BIT_LOCK;
4741         I915_WRITE(reg, temp);
4742
4743         POSTING_READ(reg);
4744         udelay(150);
4745
4746         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4747                       I915_READ(FDI_RX_IIR(pipe)));
4748
4749         /* Try each vswing and preemphasis setting twice before moving on */
4750         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4751                 /* disable first in case we need to retry */
4752                 reg = FDI_TX_CTL(pipe);
4753                 temp = I915_READ(reg);
4754                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4755                 temp &= ~FDI_TX_ENABLE;
4756                 I915_WRITE(reg, temp);
4757
4758                 reg = FDI_RX_CTL(pipe);
4759                 temp = I915_READ(reg);
4760                 temp &= ~FDI_LINK_TRAIN_AUTO;
4761                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4762                 temp &= ~FDI_RX_ENABLE;
4763                 I915_WRITE(reg, temp);
4764
4765                 /* enable CPU FDI TX and PCH FDI RX */
4766                 reg = FDI_TX_CTL(pipe);
4767                 temp = I915_READ(reg);
4768                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4769                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4770                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4771                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4772                 temp |= snb_b_fdi_train_param[j/2];
4773                 temp |= FDI_COMPOSITE_SYNC;
4774                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4775
4776                 I915_WRITE(FDI_RX_MISC(pipe),
4777                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4778
4779                 reg = FDI_RX_CTL(pipe);
4780                 temp = I915_READ(reg);
4781                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4782                 temp |= FDI_COMPOSITE_SYNC;
4783                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4784
4785                 POSTING_READ(reg);
4786                 udelay(1); /* should be 0.5us */
4787
4788                 for (i = 0; i < 4; i++) {
4789                         reg = FDI_RX_IIR(pipe);
4790                         temp = I915_READ(reg);
4791                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4792
4793                         if (temp & FDI_RX_BIT_LOCK ||
4794                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4795                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4796                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4797                                               i);
4798                                 break;
4799                         }
4800                         udelay(1); /* should be 0.5us */
4801                 }
4802                 if (i == 4) {
4803                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4804                         continue;
4805                 }
4806
4807                 /* Train 2 */
4808                 reg = FDI_TX_CTL(pipe);
4809                 temp = I915_READ(reg);
4810                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4811                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4812                 I915_WRITE(reg, temp);
4813
4814                 reg = FDI_RX_CTL(pipe);
4815                 temp = I915_READ(reg);
4816                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4817                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4818                 I915_WRITE(reg, temp);
4819
4820                 POSTING_READ(reg);
4821                 udelay(2); /* should be 1.5us */
4822
4823                 for (i = 0; i < 4; i++) {
4824                         reg = FDI_RX_IIR(pipe);
4825                         temp = I915_READ(reg);
4826                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4827
4828                         if (temp & FDI_RX_SYMBOL_LOCK ||
4829                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4830                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4831                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4832                                               i);
4833                                 goto train_done;
4834                         }
4835                         udelay(2); /* should be 1.5us */
4836                 }
4837                 if (i == 4)
4838                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4839         }
4840
4841 train_done:
4842         DRM_DEBUG_KMS("FDI train done.\n");
4843 }
4844
4845 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4846 {
4847         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4848         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4849         enum pipe pipe = intel_crtc->pipe;
4850         i915_reg_t reg;
4851         u32 temp;
4852
4853         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4854         reg = FDI_RX_CTL(pipe);
4855         temp = I915_READ(reg);
4856         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4857         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4858         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4859         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4860
4861         POSTING_READ(reg);
4862         udelay(200);
4863
4864         /* Switch from Rawclk to PCDclk */
4865         temp = I915_READ(reg);
4866         I915_WRITE(reg, temp | FDI_PCDCLK);
4867
4868         POSTING_READ(reg);
4869         udelay(200);
4870
4871         /* Enable CPU FDI TX PLL, always on for Ironlake */
4872         reg = FDI_TX_CTL(pipe);
4873         temp = I915_READ(reg);
4874         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4875                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4876
4877                 POSTING_READ(reg);
4878                 udelay(100);
4879         }
4880 }
4881
4882 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4883 {
4884         struct drm_device *dev = intel_crtc->base.dev;
4885         struct drm_i915_private *dev_priv = to_i915(dev);
4886         enum pipe pipe = intel_crtc->pipe;
4887         i915_reg_t reg;
4888         u32 temp;
4889
4890         /* Switch from PCDclk to Rawclk */
4891         reg = FDI_RX_CTL(pipe);
4892         temp = I915_READ(reg);
4893         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4894
4895         /* Disable CPU FDI TX PLL */
4896         reg = FDI_TX_CTL(pipe);
4897         temp = I915_READ(reg);
4898         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4899
4900         POSTING_READ(reg);
4901         udelay(100);
4902
4903         reg = FDI_RX_CTL(pipe);
4904         temp = I915_READ(reg);
4905         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4906
4907         /* Wait for the clocks to turn off. */
4908         POSTING_READ(reg);
4909         udelay(100);
4910 }
4911
4912 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4913 {
4914         struct drm_device *dev = crtc->dev;
4915         struct drm_i915_private *dev_priv = to_i915(dev);
4916         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4917         enum pipe pipe = intel_crtc->pipe;
4918         i915_reg_t reg;
4919         u32 temp;
4920
4921         /* disable CPU FDI tx and PCH FDI rx */
4922         reg = FDI_TX_CTL(pipe);
4923         temp = I915_READ(reg);
4924         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4925         POSTING_READ(reg);
4926
4927         reg = FDI_RX_CTL(pipe);
4928         temp = I915_READ(reg);
4929         temp &= ~(0x7 << 16);
4930         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4931         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4932
4933         POSTING_READ(reg);
4934         udelay(100);
4935
4936         /* Ironlake workaround, disable clock pointer after downing FDI */
4937         if (HAS_PCH_IBX(dev_priv))
4938                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4939
4940         /* still set train pattern 1 */
4941         reg = FDI_TX_CTL(pipe);
4942         temp = I915_READ(reg);
4943         temp &= ~FDI_LINK_TRAIN_NONE;
4944         temp |= FDI_LINK_TRAIN_PATTERN_1;
4945         I915_WRITE(reg, temp);
4946
4947         reg = FDI_RX_CTL(pipe);
4948         temp = I915_READ(reg);
4949         if (HAS_PCH_CPT(dev_priv)) {
4950                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4951                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4952         } else {
4953                 temp &= ~FDI_LINK_TRAIN_NONE;
4954                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4955         }
4956         /* BPC in FDI rx is consistent with that in PIPECONF */
4957         temp &= ~(0x07 << 16);
4958         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4959         I915_WRITE(reg, temp);
4960
4961         POSTING_READ(reg);
4962         udelay(100);
4963 }
4964
4965 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4966 {
4967         struct drm_crtc *crtc;
4968         bool cleanup_done;
4969
4970         drm_for_each_crtc(crtc, &dev_priv->drm) {
4971                 struct drm_crtc_commit *commit;
4972                 spin_lock(&crtc->commit_lock);
4973                 commit = list_first_entry_or_null(&crtc->commit_list,
4974                                                   struct drm_crtc_commit, commit_entry);
4975                 cleanup_done = commit ?
4976                         try_wait_for_completion(&commit->cleanup_done) : true;
4977                 spin_unlock(&crtc->commit_lock);
4978
4979                 if (cleanup_done)
4980                         continue;
4981
4982                 drm_crtc_wait_one_vblank(crtc);
4983
4984                 return true;
4985         }
4986
4987         return false;
4988 }
4989
4990 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4991 {
4992         u32 temp;
4993
4994         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4995
4996         mutex_lock(&dev_priv->sb_lock);
4997
4998         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4999         temp |= SBI_SSCCTL_DISABLE;
5000         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5001
5002         mutex_unlock(&dev_priv->sb_lock);
5003 }
5004
5005 /* Program iCLKIP clock to the desired frequency */
5006 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5007 {
5008         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5009         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5010         int clock = crtc_state->base.adjusted_mode.crtc_clock;
5011         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5012         u32 temp;
5013
5014         lpt_disable_iclkip(dev_priv);
5015
5016         /* The iCLK virtual clock root frequency is in MHz,
5017          * but the adjusted_mode->crtc_clock in in KHz. To get the
5018          * divisors, it is necessary to divide one by another, so we
5019          * convert the virtual clock precision to KHz here for higher
5020          * precision.
5021          */
5022         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5023                 u32 iclk_virtual_root_freq = 172800 * 1000;
5024                 u32 iclk_pi_range = 64;
5025                 u32 desired_divisor;
5026
5027                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5028                                                     clock << auxdiv);
5029                 divsel = (desired_divisor / iclk_pi_range) - 2;
5030                 phaseinc = desired_divisor % iclk_pi_range;
5031
5032                 /*
5033                  * Near 20MHz is a corner case which is
5034                  * out of range for the 7-bit divisor
5035                  */
5036                 if (divsel <= 0x7f)
5037                         break;
5038         }
5039
5040         /* This should not happen with any sane values */
5041         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5042                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5043         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5044                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5045
5046         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5047                         clock,
5048                         auxdiv,
5049                         divsel,
5050                         phasedir,
5051                         phaseinc);
5052
5053         mutex_lock(&dev_priv->sb_lock);
5054
5055         /* Program SSCDIVINTPHASE6 */
5056         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5057         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5058         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5059         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5060         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5061         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5062         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5063         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5064
5065         /* Program SSCAUXDIV */
5066         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5067         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5068         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5069         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5070
5071         /* Enable modulator and associated divider */
5072         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5073         temp &= ~SBI_SSCCTL_DISABLE;
5074         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5075
5076         mutex_unlock(&dev_priv->sb_lock);
5077
5078         /* Wait for initialization time */
5079         udelay(24);
5080
5081         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5082 }
5083
5084 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5085 {
5086         u32 divsel, phaseinc, auxdiv;
5087         u32 iclk_virtual_root_freq = 172800 * 1000;
5088         u32 iclk_pi_range = 64;
5089         u32 desired_divisor;
5090         u32 temp;
5091
5092         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5093                 return 0;
5094
5095         mutex_lock(&dev_priv->sb_lock);
5096
5097         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5098         if (temp & SBI_SSCCTL_DISABLE) {
5099                 mutex_unlock(&dev_priv->sb_lock);
5100                 return 0;
5101         }
5102
5103         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5104         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5105                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5106         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5107                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5108
5109         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5110         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5111                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5112
5113         mutex_unlock(&dev_priv->sb_lock);
5114
5115         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5116
5117         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5118                                  desired_divisor << auxdiv);
5119 }
5120
5121 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5122                                                 enum pipe pch_transcoder)
5123 {
5124         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5125         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5126         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5127
5128         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5129                    I915_READ(HTOTAL(cpu_transcoder)));
5130         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5131                    I915_READ(HBLANK(cpu_transcoder)));
5132         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5133                    I915_READ(HSYNC(cpu_transcoder)));
5134
5135         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5136                    I915_READ(VTOTAL(cpu_transcoder)));
5137         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5138                    I915_READ(VBLANK(cpu_transcoder)));
5139         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5140                    I915_READ(VSYNC(cpu_transcoder)));
5141         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5142                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
5143 }
5144
5145 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5146 {
5147         u32 temp;
5148
5149         temp = I915_READ(SOUTH_CHICKEN1);
5150         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5151                 return;
5152
5153         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5154         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5155
5156         temp &= ~FDI_BC_BIFURCATION_SELECT;
5157         if (enable)
5158                 temp |= FDI_BC_BIFURCATION_SELECT;
5159
5160         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5161         I915_WRITE(SOUTH_CHICKEN1, temp);
5162         POSTING_READ(SOUTH_CHICKEN1);
5163 }
5164
5165 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5166 {
5167         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5168         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5169
5170         switch (crtc->pipe) {
5171         case PIPE_A:
5172                 break;
5173         case PIPE_B:
5174                 if (crtc_state->fdi_lanes > 2)
5175                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5176                 else
5177                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5178
5179                 break;
5180         case PIPE_C:
5181                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5182
5183                 break;
5184         default:
5185                 BUG();
5186         }
5187 }
5188
5189 /*
5190  * Finds the encoder associated with the given CRTC. This can only be
5191  * used when we know that the CRTC isn't feeding multiple encoders!
5192  */
5193 static struct intel_encoder *
5194 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5195                            const struct intel_crtc_state *crtc_state)
5196 {
5197         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5198         const struct drm_connector_state *connector_state;
5199         const struct drm_connector *connector;
5200         struct intel_encoder *encoder = NULL;
5201         int num_encoders = 0;
5202         int i;
5203
5204         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5205                 if (connector_state->crtc != &crtc->base)
5206                         continue;
5207
5208                 encoder = to_intel_encoder(connector_state->best_encoder);
5209                 num_encoders++;
5210         }
5211
5212         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5213              num_encoders, pipe_name(crtc->pipe));
5214
5215         return encoder;
5216 }
5217
5218 /*
5219  * Enable PCH resources required for PCH ports:
5220  *   - PCH PLLs
5221  *   - FDI training & RX/TX
5222  *   - update transcoder timings
5223  *   - DP transcoding bits
5224  *   - transcoder
5225  */
5226 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5227                                 const struct intel_crtc_state *crtc_state)
5228 {
5229         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5230         struct drm_device *dev = crtc->base.dev;
5231         struct drm_i915_private *dev_priv = to_i915(dev);
5232         enum pipe pipe = crtc->pipe;
5233         u32 temp;
5234
5235         assert_pch_transcoder_disabled(dev_priv, pipe);
5236
5237         if (IS_IVYBRIDGE(dev_priv))
5238                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5239
5240         /* Write the TU size bits before fdi link training, so that error
5241          * detection works. */
5242         I915_WRITE(FDI_RX_TUSIZE1(pipe),
5243                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5244
5245         /* For PCH output, training FDI link */
5246         dev_priv->display.fdi_link_train(crtc, crtc_state);
5247
5248         /* We need to program the right clock selection before writing the pixel
5249          * mutliplier into the DPLL. */
5250         if (HAS_PCH_CPT(dev_priv)) {
5251                 u32 sel;
5252
5253                 temp = I915_READ(PCH_DPLL_SEL);
5254                 temp |= TRANS_DPLL_ENABLE(pipe);
5255                 sel = TRANS_DPLLB_SEL(pipe);
5256                 if (crtc_state->shared_dpll ==
5257                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5258                         temp |= sel;
5259                 else
5260                         temp &= ~sel;
5261                 I915_WRITE(PCH_DPLL_SEL, temp);
5262         }
5263
5264         /* XXX: pch pll's can be enabled any time before we enable the PCH
5265          * transcoder, and we actually should do this to not upset any PCH
5266          * transcoder that already use the clock when we share it.
5267          *
5268          * Note that enable_shared_dpll tries to do the right thing, but
5269          * get_shared_dpll unconditionally resets the pll - we need that to have
5270          * the right LVDS enable sequence. */
5271         intel_enable_shared_dpll(crtc_state);
5272
5273         /* set transcoder timing, panel must allow it */
5274         assert_panel_unlocked(dev_priv, pipe);
5275         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5276
5277         intel_fdi_normal_train(crtc);
5278
5279         /* For PCH DP, enable TRANS_DP_CTL */
5280         if (HAS_PCH_CPT(dev_priv) &&
5281             intel_crtc_has_dp_encoder(crtc_state)) {
5282                 const struct drm_display_mode *adjusted_mode =
5283                         &crtc_state->base.adjusted_mode;
5284                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5285                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5286                 enum port port;
5287
5288                 temp = I915_READ(reg);
5289                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5290                           TRANS_DP_SYNC_MASK |
5291                           TRANS_DP_BPC_MASK);
5292                 temp |= TRANS_DP_OUTPUT_ENABLE;
5293                 temp |= bpc << 9; /* same format but at 11:9 */
5294
5295                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5296                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5297                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5298                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5299
5300                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5301                 WARN_ON(port < PORT_B || port > PORT_D);
5302                 temp |= TRANS_DP_PORT_SEL(port);
5303
5304                 I915_WRITE(reg, temp);
5305         }
5306
5307         ironlake_enable_pch_transcoder(crtc_state);
5308 }
5309
5310 static void lpt_pch_enable(const struct intel_atomic_state *state,
5311                            const struct intel_crtc_state *crtc_state)
5312 {
5313         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5314         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5315         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5316
5317         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5318
5319         lpt_program_iclkip(crtc_state);
5320
5321         /* Set transcoder timing. */
5322         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5323
5324         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5325 }
5326
5327 static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
5328 {
5329         struct drm_i915_private *dev_priv = to_i915(dev);
5330         i915_reg_t dslreg = PIPEDSL(pipe);
5331         u32 temp;
5332
5333         temp = I915_READ(dslreg);
5334         udelay(500);
5335         if (wait_for(I915_READ(dslreg) != temp, 5)) {
5336                 if (wait_for(I915_READ(dslreg) != temp, 5))
5337                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5338         }
5339 }
5340
5341 /*
5342  * The hardware phase 0.0 refers to the center of the pixel.
5343  * We want to start from the top/left edge which is phase
5344  * -0.5. That matches how the hardware calculates the scaling
5345  * factors (from top-left of the first pixel to bottom-right
5346  * of the last pixel, as opposed to the pixel centers).
5347  *
5348  * For 4:2:0 subsampled chroma planes we obviously have to
5349  * adjust that so that the chroma sample position lands in
5350  * the right spot.
5351  *
5352  * Note that for packed YCbCr 4:2:2 formats there is no way to
5353  * control chroma siting. The hardware simply replicates the
5354  * chroma samples for both of the luma samples, and thus we don't
5355  * actually get the expected MPEG2 chroma siting convention :(
5356  * The same behaviour is observed on pre-SKL platforms as well.
5357  *
5358  * Theory behind the formula (note that we ignore sub-pixel
5359  * source coordinates):
5360  * s = source sample position
5361  * d = destination sample position
5362  *
5363  * Downscaling 4:1:
5364  * -0.5
5365  * | 0.0
5366  * | |     1.5 (initial phase)
5367  * | |     |
5368  * v v     v
5369  * | s | s | s | s |
5370  * |       d       |
5371  *
5372  * Upscaling 1:4:
5373  * -0.5
5374  * | -0.375 (initial phase)
5375  * | |     0.0
5376  * | |     |
5377  * v v     v
5378  * |       s       |
5379  * | d | d | d | d |
5380  */
5381 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5382 {
5383         int phase = -0x8000;
5384         u16 trip = 0;
5385
5386         if (chroma_cosited)
5387                 phase += (sub - 1) * 0x8000 / sub;
5388
5389         phase += scale / (2 * sub);
5390
5391         /*
5392          * Hardware initial phase limited to [-0.5:1.5].
5393          * Since the max hardware scale factor is 3.0, we
5394          * should never actually excdeed 1.0 here.
5395          */
5396         WARN_ON(phase < -0x8000 || phase > 0x18000);
5397
5398         if (phase < 0)
5399                 phase = 0x10000 + phase;
5400         else
5401                 trip = PS_PHASE_TRIP;
5402
5403         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5404 }
5405
5406 #define SKL_MIN_SRC_W 8
5407 #define SKL_MAX_SRC_W 4096
5408 #define SKL_MIN_SRC_H 8
5409 #define SKL_MAX_SRC_H 4096
5410 #define SKL_MIN_DST_W 8
5411 #define SKL_MAX_DST_W 4096
5412 #define SKL_MIN_DST_H 8
5413 #define SKL_MAX_DST_H 4096
5414 #define ICL_MAX_SRC_W 5120
5415 #define ICL_MAX_SRC_H 4096
5416 #define ICL_MAX_DST_W 5120
5417 #define ICL_MAX_DST_H 4096
5418 #define SKL_MIN_YUV_420_SRC_W 16
5419 #define SKL_MIN_YUV_420_SRC_H 16
5420
5421 static int
5422 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5423                   unsigned int scaler_user, int *scaler_id,
5424                   int src_w, int src_h, int dst_w, int dst_h,
5425                   const struct drm_format_info *format, bool need_scaler)
5426 {
5427         struct intel_crtc_scaler_state *scaler_state =
5428                 &crtc_state->scaler_state;
5429         struct intel_crtc *intel_crtc =
5430                 to_intel_crtc(crtc_state->base.crtc);
5431         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5432         const struct drm_display_mode *adjusted_mode =
5433                 &crtc_state->base.adjusted_mode;
5434
5435         /*
5436          * Src coordinates are already rotated by 270 degrees for
5437          * the 90/270 degree plane rotation cases (to match the
5438          * GTT mapping), hence no need to account for rotation here.
5439          */
5440         if (src_w != dst_w || src_h != dst_h)
5441                 need_scaler = true;
5442
5443         /*
5444          * Scaling/fitting not supported in IF-ID mode in GEN9+
5445          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5446          * Once NV12 is enabled, handle it here while allocating scaler
5447          * for NV12.
5448          */
5449         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5450             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5451                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5452                 return -EINVAL;
5453         }
5454
5455         /*
5456          * if plane is being disabled or scaler is no more required or force detach
5457          *  - free scaler binded to this plane/crtc
5458          *  - in order to do this, update crtc->scaler_usage
5459          *
5460          * Here scaler state in crtc_state is set free so that
5461          * scaler can be assigned to other user. Actual register
5462          * update to free the scaler is done in plane/panel-fit programming.
5463          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5464          */
5465         if (force_detach || !need_scaler) {
5466                 if (*scaler_id >= 0) {
5467                         scaler_state->scaler_users &= ~(1 << scaler_user);
5468                         scaler_state->scalers[*scaler_id].in_use = 0;
5469
5470                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5471                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5472                                 intel_crtc->pipe, scaler_user, *scaler_id,
5473                                 scaler_state->scaler_users);
5474                         *scaler_id = -1;
5475                 }
5476                 return 0;
5477         }
5478
5479         if (format && drm_format_info_is_yuv_semiplanar(format) &&
5480             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5481                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5482                 return -EINVAL;
5483         }
5484
5485         /* range checks */
5486         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5487             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5488             (INTEL_GEN(dev_priv) >= 11 &&
5489              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5490               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5491             (INTEL_GEN(dev_priv) < 11 &&
5492              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5493               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5494                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5495                         "size is out of scaler range\n",
5496                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5497                 return -EINVAL;
5498         }
5499
5500         /* mark this plane as a scaler user in crtc_state */
5501         scaler_state->scaler_users |= (1 << scaler_user);
5502         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5503                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5504                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5505                 scaler_state->scaler_users);
5506
5507         return 0;
5508 }
5509
5510 /**
5511  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5512  *
5513  * @state: crtc's scaler state
5514  *
5515  * Return
5516  *     0 - scaler_usage updated successfully
5517  *    error - requested scaling cannot be supported or other error condition
5518  */
5519 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5520 {
5521         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5522         bool need_scaler = false;
5523
5524         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5525                 need_scaler = true;
5526
5527         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5528                                  &state->scaler_state.scaler_id,
5529                                  state->pipe_src_w, state->pipe_src_h,
5530                                  adjusted_mode->crtc_hdisplay,
5531                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5532 }
5533
5534 /**
5535  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5536  * @crtc_state: crtc's scaler state
5537  * @plane_state: atomic plane state to update
5538  *
5539  * Return
5540  *     0 - scaler_usage updated successfully
5541  *    error - requested scaling cannot be supported or other error condition
5542  */
5543 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5544                                    struct intel_plane_state *plane_state)
5545 {
5546         struct intel_plane *intel_plane =
5547                 to_intel_plane(plane_state->base.plane);
5548         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5549         struct drm_framebuffer *fb = plane_state->base.fb;
5550         int ret;
5551         bool force_detach = !fb || !plane_state->base.visible;
5552         bool need_scaler = false;
5553
5554         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5555         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5556             fb && drm_format_info_is_yuv_semiplanar(fb->format))
5557                 need_scaler = true;
5558
5559         ret = skl_update_scaler(crtc_state, force_detach,
5560                                 drm_plane_index(&intel_plane->base),
5561                                 &plane_state->scaler_id,
5562                                 drm_rect_width(&plane_state->base.src) >> 16,
5563                                 drm_rect_height(&plane_state->base.src) >> 16,
5564                                 drm_rect_width(&plane_state->base.dst),
5565                                 drm_rect_height(&plane_state->base.dst),
5566                                 fb ? fb->format : NULL, need_scaler);
5567
5568         if (ret || plane_state->scaler_id < 0)
5569                 return ret;
5570
5571         /* check colorkey */
5572         if (plane_state->ckey.flags) {
5573                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5574                               intel_plane->base.base.id,
5575                               intel_plane->base.name);
5576                 return -EINVAL;
5577         }
5578
5579         /* Check src format */
5580         switch (fb->format->format) {
5581         case DRM_FORMAT_RGB565:
5582         case DRM_FORMAT_XBGR8888:
5583         case DRM_FORMAT_XRGB8888:
5584         case DRM_FORMAT_ABGR8888:
5585         case DRM_FORMAT_ARGB8888:
5586         case DRM_FORMAT_XRGB2101010:
5587         case DRM_FORMAT_XBGR2101010:
5588         case DRM_FORMAT_XBGR16161616F:
5589         case DRM_FORMAT_ABGR16161616F:
5590         case DRM_FORMAT_XRGB16161616F:
5591         case DRM_FORMAT_ARGB16161616F:
5592         case DRM_FORMAT_YUYV:
5593         case DRM_FORMAT_YVYU:
5594         case DRM_FORMAT_UYVY:
5595         case DRM_FORMAT_VYUY:
5596         case DRM_FORMAT_NV12:
5597         case DRM_FORMAT_P010:
5598         case DRM_FORMAT_P012:
5599         case DRM_FORMAT_P016:
5600         case DRM_FORMAT_Y210:
5601         case DRM_FORMAT_Y212:
5602         case DRM_FORMAT_Y216:
5603         case DRM_FORMAT_XVYU2101010:
5604         case DRM_FORMAT_XVYU12_16161616:
5605         case DRM_FORMAT_XVYU16161616:
5606                 break;
5607         default:
5608                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5609                               intel_plane->base.base.id, intel_plane->base.name,
5610                               fb->base.id, fb->format->format);
5611                 return -EINVAL;
5612         }
5613
5614         return 0;
5615 }
5616
5617 static void skylake_scaler_disable(struct intel_crtc *crtc)
5618 {
5619         int i;
5620
5621         for (i = 0; i < crtc->num_scalers; i++)
5622                 skl_detach_scaler(crtc, i);
5623 }
5624
5625 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5626 {
5627         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5628         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5629         enum pipe pipe = crtc->pipe;
5630         const struct intel_crtc_scaler_state *scaler_state =
5631                 &crtc_state->scaler_state;
5632
5633         if (crtc_state->pch_pfit.enabled) {
5634                 u16 uv_rgb_hphase, uv_rgb_vphase;
5635                 int pfit_w, pfit_h, hscale, vscale;
5636                 int id;
5637
5638                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5639                         return;
5640
5641                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5642                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5643
5644                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5645                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5646
5647                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5648                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5649
5650                 id = scaler_state->scaler_id;
5651                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5652                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5653                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5654                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5655                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5656                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5657                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5658                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5659         }
5660 }
5661
5662 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5663 {
5664         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5665         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5666         enum pipe pipe = crtc->pipe;
5667
5668         if (crtc_state->pch_pfit.enabled) {
5669                 /* Force use of hard-coded filter coefficients
5670                  * as some pre-programmed values are broken,
5671                  * e.g. x201.
5672                  */
5673                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5674                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5675                                                  PF_PIPE_SEL_IVB(pipe));
5676                 else
5677                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5678                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5679                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5680         }
5681 }
5682
5683 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5684 {
5685         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5686         struct drm_device *dev = crtc->base.dev;
5687         struct drm_i915_private *dev_priv = to_i915(dev);
5688
5689         if (!crtc_state->ips_enabled)
5690                 return;
5691
5692         /*
5693          * We can only enable IPS after we enable a plane and wait for a vblank
5694          * This function is called from post_plane_update, which is run after
5695          * a vblank wait.
5696          */
5697         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5698
5699         if (IS_BROADWELL(dev_priv)) {
5700                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5701                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5702                 /* Quoting Art Runyan: "its not safe to expect any particular
5703                  * value in IPS_CTL bit 31 after enabling IPS through the
5704                  * mailbox." Moreover, the mailbox may return a bogus state,
5705                  * so we need to just enable it and continue on.
5706                  */
5707         } else {
5708                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5709                 /* The bit only becomes 1 in the next vblank, so this wait here
5710                  * is essentially intel_wait_for_vblank. If we don't have this
5711                  * and don't wait for vblanks until the end of crtc_enable, then
5712                  * the HW state readout code will complain that the expected
5713                  * IPS_CTL value is not the one we read. */
5714                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5715                         DRM_ERROR("Timed out waiting for IPS enable\n");
5716         }
5717 }
5718
5719 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5720 {
5721         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5722         struct drm_device *dev = crtc->base.dev;
5723         struct drm_i915_private *dev_priv = to_i915(dev);
5724
5725         if (!crtc_state->ips_enabled)
5726                 return;
5727
5728         if (IS_BROADWELL(dev_priv)) {
5729                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5730                 /*
5731                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5732                  * 42ms timeout value leads to occasional timeouts so use 100ms
5733                  * instead.
5734                  */
5735                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5736                         DRM_ERROR("Timed out waiting for IPS disable\n");
5737         } else {
5738                 I915_WRITE(IPS_CTL, 0);
5739                 POSTING_READ(IPS_CTL);
5740         }
5741
5742         /* We need to wait for a vblank before we can disable the plane. */
5743         intel_wait_for_vblank(dev_priv, crtc->pipe);
5744 }
5745
5746 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5747 {
5748         if (intel_crtc->overlay) {
5749                 struct drm_device *dev = intel_crtc->base.dev;
5750
5751                 mutex_lock(&dev->struct_mutex);
5752                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5753                 mutex_unlock(&dev->struct_mutex);
5754         }
5755
5756         /* Let userspace switch the overlay on again. In most cases userspace
5757          * has to recompute where to put it anyway.
5758          */
5759 }
5760
5761 /**
5762  * intel_post_enable_primary - Perform operations after enabling primary plane
5763  * @crtc: the CRTC whose primary plane was just enabled
5764  * @new_crtc_state: the enabling state
5765  *
5766  * Performs potentially sleeping operations that must be done after the primary
5767  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5768  * called due to an explicit primary plane update, or due to an implicit
5769  * re-enable that is caused when a sprite plane is updated to no longer
5770  * completely hide the primary plane.
5771  */
5772 static void
5773 intel_post_enable_primary(struct drm_crtc *crtc,
5774                           const struct intel_crtc_state *new_crtc_state)
5775 {
5776         struct drm_device *dev = crtc->dev;
5777         struct drm_i915_private *dev_priv = to_i915(dev);
5778         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5779         enum pipe pipe = intel_crtc->pipe;
5780
5781         /*
5782          * Gen2 reports pipe underruns whenever all planes are disabled.
5783          * So don't enable underrun reporting before at least some planes
5784          * are enabled.
5785          * FIXME: Need to fix the logic to work when we turn off all planes
5786          * but leave the pipe running.
5787          */
5788         if (IS_GEN(dev_priv, 2))
5789                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5790
5791         /* Underruns don't always raise interrupts, so check manually. */
5792         intel_check_cpu_fifo_underruns(dev_priv);
5793         intel_check_pch_fifo_underruns(dev_priv);
5794 }
5795
5796 /* FIXME get rid of this and use pre_plane_update */
5797 static void
5798 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5799 {
5800         struct drm_device *dev = crtc->dev;
5801         struct drm_i915_private *dev_priv = to_i915(dev);
5802         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5803         enum pipe pipe = intel_crtc->pipe;
5804
5805         /*
5806          * Gen2 reports pipe underruns whenever all planes are disabled.
5807          * So disable underrun reporting before all the planes get disabled.
5808          */
5809         if (IS_GEN(dev_priv, 2))
5810                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5811
5812         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5813
5814         /*
5815          * Vblank time updates from the shadow to live plane control register
5816          * are blocked if the memory self-refresh mode is active at that
5817          * moment. So to make sure the plane gets truly disabled, disable
5818          * first the self-refresh mode. The self-refresh enable bit in turn
5819          * will be checked/applied by the HW only at the next frame start
5820          * event which is after the vblank start event, so we need to have a
5821          * wait-for-vblank between disabling the plane and the pipe.
5822          */
5823         if (HAS_GMCH(dev_priv) &&
5824             intel_set_memory_cxsr(dev_priv, false))
5825                 intel_wait_for_vblank(dev_priv, pipe);
5826 }
5827
5828 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5829                                        const struct intel_crtc_state *new_crtc_state)
5830 {
5831         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5832         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5833
5834         if (!old_crtc_state->ips_enabled)
5835                 return false;
5836
5837         if (needs_modeset(new_crtc_state))
5838                 return true;
5839
5840         /*
5841          * Workaround : Do not read or write the pipe palette/gamma data while
5842          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5843          *
5844          * Disable IPS before we program the LUT.
5845          */
5846         if (IS_HASWELL(dev_priv) &&
5847             (new_crtc_state->base.color_mgmt_changed ||
5848              new_crtc_state->update_pipe) &&
5849             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5850                 return true;
5851
5852         return !new_crtc_state->ips_enabled;
5853 }
5854
5855 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5856                                        const struct intel_crtc_state *new_crtc_state)
5857 {
5858         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5859         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5860
5861         if (!new_crtc_state->ips_enabled)
5862                 return false;
5863
5864         if (needs_modeset(new_crtc_state))
5865                 return true;
5866
5867         /*
5868          * Workaround : Do not read or write the pipe palette/gamma data while
5869          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5870          *
5871          * Re-enable IPS after the LUT has been programmed.
5872          */
5873         if (IS_HASWELL(dev_priv) &&
5874             (new_crtc_state->base.color_mgmt_changed ||
5875              new_crtc_state->update_pipe) &&
5876             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5877                 return true;
5878
5879         /*
5880          * We can't read out IPS on broadwell, assume the worst and
5881          * forcibly enable IPS on the first fastset.
5882          */
5883         if (new_crtc_state->update_pipe &&
5884             old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5885                 return true;
5886
5887         return !old_crtc_state->ips_enabled;
5888 }
5889
5890 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5891                           const struct intel_crtc_state *crtc_state)
5892 {
5893         if (!crtc_state->nv12_planes)
5894                 return false;
5895
5896         /* WA Display #0827: Gen9:all */
5897         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5898                 return true;
5899
5900         return false;
5901 }
5902
5903 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5904                                const struct intel_crtc_state *crtc_state)
5905 {
5906         /* Wa_2006604312:icl */
5907         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5908                 return true;
5909
5910         return false;
5911 }
5912
5913 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5914 {
5915         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5916         struct drm_device *dev = crtc->base.dev;
5917         struct drm_i915_private *dev_priv = to_i915(dev);
5918         struct drm_atomic_state *state = old_crtc_state->base.state;
5919         struct intel_crtc_state *pipe_config =
5920                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
5921                                                 crtc);
5922         struct drm_plane *primary = crtc->base.primary;
5923         struct drm_plane_state *old_primary_state =
5924                 drm_atomic_get_old_plane_state(state, primary);
5925
5926         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5927
5928         if (pipe_config->update_wm_post && pipe_config->base.active)
5929                 intel_update_watermarks(crtc);
5930
5931         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5932                 hsw_enable_ips(pipe_config);
5933
5934         if (old_primary_state) {
5935                 struct drm_plane_state *new_primary_state =
5936                         drm_atomic_get_new_plane_state(state, primary);
5937
5938                 intel_fbc_post_update(crtc);
5939
5940                 if (new_primary_state->visible &&
5941                     (needs_modeset(pipe_config) ||
5942                      !old_primary_state->visible))
5943                         intel_post_enable_primary(&crtc->base, pipe_config);
5944         }
5945
5946         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5947             !needs_nv12_wa(dev_priv, pipe_config))
5948                 skl_wa_827(dev_priv, crtc->pipe, false);
5949
5950         if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5951             !needs_scalerclk_wa(dev_priv, pipe_config))
5952                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
5953 }
5954
5955 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5956                                    struct intel_crtc_state *pipe_config)
5957 {
5958         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5959         struct drm_device *dev = crtc->base.dev;
5960         struct drm_i915_private *dev_priv = to_i915(dev);
5961         struct drm_atomic_state *state = old_crtc_state->base.state;
5962         struct drm_plane *primary = crtc->base.primary;
5963         struct drm_plane_state *old_primary_state =
5964                 drm_atomic_get_old_plane_state(state, primary);
5965         bool modeset = needs_modeset(pipe_config);
5966         struct intel_atomic_state *intel_state =
5967                 to_intel_atomic_state(state);
5968
5969         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5970                 hsw_disable_ips(old_crtc_state);
5971
5972         if (old_primary_state) {
5973                 struct intel_plane_state *new_primary_state =
5974                         intel_atomic_get_new_plane_state(intel_state,
5975                                                          to_intel_plane(primary));
5976
5977                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5978                 /*
5979                  * Gen2 reports pipe underruns whenever all planes are disabled.
5980                  * So disable underrun reporting before all the planes get disabled.
5981                  */
5982                 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5983                     (modeset || !new_primary_state->base.visible))
5984                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5985         }
5986
5987         /* Display WA 827 */
5988         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5989             needs_nv12_wa(dev_priv, pipe_config))
5990                 skl_wa_827(dev_priv, crtc->pipe, true);
5991
5992         /* Wa_2006604312:icl */
5993         if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5994             needs_scalerclk_wa(dev_priv, pipe_config))
5995                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
5996
5997         /*
5998          * Vblank time updates from the shadow to live plane control register
5999          * are blocked if the memory self-refresh mode is active at that
6000          * moment. So to make sure the plane gets truly disabled, disable
6001          * first the self-refresh mode. The self-refresh enable bit in turn
6002          * will be checked/applied by the HW only at the next frame start
6003          * event which is after the vblank start event, so we need to have a
6004          * wait-for-vblank between disabling the plane and the pipe.
6005          */
6006         if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
6007             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6008                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6009
6010         /*
6011          * IVB workaround: must disable low power watermarks for at least
6012          * one frame before enabling scaling.  LP watermarks can be re-enabled
6013          * when scaling is disabled.
6014          *
6015          * WaCxSRDisabledForSpriteScaling:ivb
6016          */
6017         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
6018             old_crtc_state->base.active)
6019                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6020
6021         /*
6022          * If we're doing a modeset, we're done.  No need to do any pre-vblank
6023          * watermark programming here.
6024          */
6025         if (needs_modeset(pipe_config))
6026                 return;
6027
6028         /*
6029          * For platforms that support atomic watermarks, program the
6030          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6031          * will be the intermediate values that are safe for both pre- and
6032          * post- vblank; when vblank happens, the 'active' values will be set
6033          * to the final 'target' values and we'll do this again to get the
6034          * optimal watermarks.  For gen9+ platforms, the values we program here
6035          * will be the final target values which will get automatically latched
6036          * at vblank time; no further programming will be necessary.
6037          *
6038          * If a platform hasn't been transitioned to atomic watermarks yet,
6039          * we'll continue to update watermarks the old way, if flags tell
6040          * us to.
6041          */
6042         if (dev_priv->display.initial_watermarks != NULL)
6043                 dev_priv->display.initial_watermarks(intel_state,
6044                                                      pipe_config);
6045         else if (pipe_config->update_wm_pre)
6046                 intel_update_watermarks(crtc);
6047 }
6048
6049 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6050                                       struct intel_crtc *crtc)
6051 {
6052         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6053         const struct intel_crtc_state *new_crtc_state =
6054                 intel_atomic_get_new_crtc_state(state, crtc);
6055         unsigned int update_mask = new_crtc_state->update_planes;
6056         const struct intel_plane_state *old_plane_state;
6057         struct intel_plane *plane;
6058         unsigned fb_bits = 0;
6059         int i;
6060
6061         intel_crtc_dpms_overlay_disable(crtc);
6062
6063         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6064                 if (crtc->pipe != plane->pipe ||
6065                     !(update_mask & BIT(plane->id)))
6066                         continue;
6067
6068                 intel_disable_plane(plane, new_crtc_state);
6069
6070                 if (old_plane_state->base.visible)
6071                         fb_bits |= plane->frontbuffer_bit;
6072         }
6073
6074         intel_frontbuffer_flip(dev_priv, fb_bits);
6075 }
6076
6077 /*
6078  * intel_connector_primary_encoder - get the primary encoder for a connector
6079  * @connector: connector for which to return the encoder
6080  *
6081  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6082  * all connectors to their encoder, except for DP-MST connectors which have
6083  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6084  * pointed to by as many DP-MST connectors as there are pipes.
6085  */
6086 static struct intel_encoder *
6087 intel_connector_primary_encoder(struct intel_connector *connector)
6088 {
6089         struct intel_encoder *encoder;
6090
6091         if (connector->mst_port)
6092                 return &dp_to_dig_port(connector->mst_port)->base;
6093
6094         encoder = intel_attached_encoder(&connector->base);
6095         WARN_ON(!encoder);
6096
6097         return encoder;
6098 }
6099
6100 static bool
6101 intel_connector_needs_modeset(struct intel_atomic_state *state,
6102                               const struct drm_connector_state *old_conn_state,
6103                               const struct drm_connector_state *new_conn_state)
6104 {
6105         struct intel_crtc *old_crtc = old_conn_state->crtc ?
6106                                       to_intel_crtc(old_conn_state->crtc) : NULL;
6107         struct intel_crtc *new_crtc = new_conn_state->crtc ?
6108                                       to_intel_crtc(new_conn_state->crtc) : NULL;
6109
6110         return new_crtc != old_crtc ||
6111                (new_crtc &&
6112                 needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6113 }
6114
6115 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6116 {
6117         struct drm_connector_state *old_conn_state;
6118         struct drm_connector_state *new_conn_state;
6119         struct drm_connector *conn;
6120         int i;
6121
6122         for_each_oldnew_connector_in_state(&state->base, conn,
6123                                            old_conn_state, new_conn_state, i) {
6124                 struct intel_encoder *encoder;
6125                 struct intel_crtc *crtc;
6126
6127                 if (!intel_connector_needs_modeset(state,
6128                                                    old_conn_state,
6129                                                    new_conn_state))
6130                         continue;
6131
6132                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6133                 if (!encoder->update_prepare)
6134                         continue;
6135
6136                 crtc = new_conn_state->crtc ?
6137                         to_intel_crtc(new_conn_state->crtc) : NULL;
6138                 encoder->update_prepare(state, encoder, crtc);
6139         }
6140 }
6141
6142 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6143 {
6144         struct drm_connector_state *old_conn_state;
6145         struct drm_connector_state *new_conn_state;
6146         struct drm_connector *conn;
6147         int i;
6148
6149         for_each_oldnew_connector_in_state(&state->base, conn,
6150                                            old_conn_state, new_conn_state, i) {
6151                 struct intel_encoder *encoder;
6152                 struct intel_crtc *crtc;
6153
6154                 if (!intel_connector_needs_modeset(state,
6155                                                    old_conn_state,
6156                                                    new_conn_state))
6157                         continue;
6158
6159                 encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6160                 if (!encoder->update_complete)
6161                         continue;
6162
6163                 crtc = new_conn_state->crtc ?
6164                         to_intel_crtc(new_conn_state->crtc) : NULL;
6165                 encoder->update_complete(state, encoder, crtc);
6166         }
6167 }
6168
6169 static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
6170                                           struct intel_crtc_state *crtc_state,
6171                                           struct intel_atomic_state *state)
6172 {
6173         struct drm_connector_state *conn_state;
6174         struct drm_connector *conn;
6175         int i;
6176
6177         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6178                 struct intel_encoder *encoder =
6179                         to_intel_encoder(conn_state->best_encoder);
6180
6181                 if (conn_state->crtc != &crtc->base)
6182                         continue;
6183
6184                 if (encoder->pre_pll_enable)
6185                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6186         }
6187 }
6188
6189 static void intel_encoders_pre_enable(struct intel_crtc *crtc,
6190                                       struct intel_crtc_state *crtc_state,
6191                                       struct intel_atomic_state *state)
6192 {
6193         struct drm_connector_state *conn_state;
6194         struct drm_connector *conn;
6195         int i;
6196
6197         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6198                 struct intel_encoder *encoder =
6199                         to_intel_encoder(conn_state->best_encoder);
6200
6201                 if (conn_state->crtc != &crtc->base)
6202                         continue;
6203
6204                 if (encoder->pre_enable)
6205                         encoder->pre_enable(encoder, crtc_state, conn_state);
6206         }
6207 }
6208
6209 static void intel_encoders_enable(struct intel_crtc *crtc,
6210                                   struct intel_crtc_state *crtc_state,
6211                                   struct intel_atomic_state *state)
6212 {
6213         struct drm_connector_state *conn_state;
6214         struct drm_connector *conn;
6215         int i;
6216
6217         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6218                 struct intel_encoder *encoder =
6219                         to_intel_encoder(conn_state->best_encoder);
6220
6221                 if (conn_state->crtc != &crtc->base)
6222                         continue;
6223
6224                 if (encoder->enable)
6225                         encoder->enable(encoder, crtc_state, conn_state);
6226                 intel_opregion_notify_encoder(encoder, true);
6227         }
6228 }
6229
6230 static void intel_encoders_disable(struct intel_crtc *crtc,
6231                                    struct intel_crtc_state *old_crtc_state,
6232                                    struct intel_atomic_state *state)
6233 {
6234         struct drm_connector_state *old_conn_state;
6235         struct drm_connector *conn;
6236         int i;
6237
6238         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6239                 struct intel_encoder *encoder =
6240                         to_intel_encoder(old_conn_state->best_encoder);
6241
6242                 if (old_conn_state->crtc != &crtc->base)
6243                         continue;
6244
6245                 intel_opregion_notify_encoder(encoder, false);
6246                 if (encoder->disable)
6247                         encoder->disable(encoder, old_crtc_state, old_conn_state);
6248         }
6249 }
6250
6251 static void intel_encoders_post_disable(struct intel_crtc *crtc,
6252                                         struct intel_crtc_state *old_crtc_state,
6253                                         struct intel_atomic_state *state)
6254 {
6255         struct drm_connector_state *old_conn_state;
6256         struct drm_connector *conn;
6257         int i;
6258
6259         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6260                 struct intel_encoder *encoder =
6261                         to_intel_encoder(old_conn_state->best_encoder);
6262
6263                 if (old_conn_state->crtc != &crtc->base)
6264                         continue;
6265
6266                 if (encoder->post_disable)
6267                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6268         }
6269 }
6270
6271 static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
6272                                             struct intel_crtc_state *old_crtc_state,
6273                                             struct intel_atomic_state *state)
6274 {
6275         struct drm_connector_state *old_conn_state;
6276         struct drm_connector *conn;
6277         int i;
6278
6279         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6280                 struct intel_encoder *encoder =
6281                         to_intel_encoder(old_conn_state->best_encoder);
6282
6283                 if (old_conn_state->crtc != &crtc->base)
6284                         continue;
6285
6286                 if (encoder->post_pll_disable)
6287                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6288         }
6289 }
6290
6291 static void intel_encoders_update_pipe(struct intel_crtc *crtc,
6292                                        struct intel_crtc_state *crtc_state,
6293                                        struct intel_atomic_state *state)
6294 {
6295         struct drm_connector_state *conn_state;
6296         struct drm_connector *conn;
6297         int i;
6298
6299         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6300                 struct intel_encoder *encoder =
6301                         to_intel_encoder(conn_state->best_encoder);
6302
6303                 if (conn_state->crtc != &crtc->base)
6304                         continue;
6305
6306                 if (encoder->update_pipe)
6307                         encoder->update_pipe(encoder, crtc_state, conn_state);
6308         }
6309 }
6310
6311 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6312 {
6313         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6314         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6315
6316         plane->disable_plane(plane, crtc_state);
6317 }
6318
6319 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6320                                  struct intel_atomic_state *state)
6321 {
6322         struct drm_crtc *crtc = pipe_config->base.crtc;
6323         struct drm_device *dev = crtc->dev;
6324         struct drm_i915_private *dev_priv = to_i915(dev);
6325         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6326         enum pipe pipe = intel_crtc->pipe;
6327
6328         if (WARN_ON(intel_crtc->active))
6329                 return;
6330
6331         /*
6332          * Sometimes spurious CPU pipe underruns happen during FDI
6333          * training, at least with VGA+HDMI cloning. Suppress them.
6334          *
6335          * On ILK we get an occasional spurious CPU pipe underruns
6336          * between eDP port A enable and vdd enable. Also PCH port
6337          * enable seems to result in the occasional CPU pipe underrun.
6338          *
6339          * Spurious PCH underruns also occur during PCH enabling.
6340          */
6341         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6342         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6343
6344         if (pipe_config->has_pch_encoder)
6345                 intel_prepare_shared_dpll(pipe_config);
6346
6347         if (intel_crtc_has_dp_encoder(pipe_config))
6348                 intel_dp_set_m_n(pipe_config, M1_N1);
6349
6350         intel_set_pipe_timings(pipe_config);
6351         intel_set_pipe_src_size(pipe_config);
6352
6353         if (pipe_config->has_pch_encoder) {
6354                 intel_cpu_transcoder_set_m_n(pipe_config,
6355                                              &pipe_config->fdi_m_n, NULL);
6356         }
6357
6358         ironlake_set_pipeconf(pipe_config);
6359
6360         intel_crtc->active = true;
6361
6362         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6363
6364         if (pipe_config->has_pch_encoder) {
6365                 /* Note: FDI PLL enabling _must_ be done before we enable the
6366                  * cpu pipes, hence this is separate from all the other fdi/pch
6367                  * enabling. */
6368                 ironlake_fdi_pll_enable(pipe_config);
6369         } else {
6370                 assert_fdi_tx_disabled(dev_priv, pipe);
6371                 assert_fdi_rx_disabled(dev_priv, pipe);
6372         }
6373
6374         ironlake_pfit_enable(pipe_config);
6375
6376         /*
6377          * On ILK+ LUT must be loaded before the pipe is running but with
6378          * clocks enabled
6379          */
6380         intel_color_load_luts(pipe_config);
6381         intel_color_commit(pipe_config);
6382         /* update DSPCNTR to configure gamma for pipe bottom color */
6383         intel_disable_primary_plane(pipe_config);
6384
6385         if (dev_priv->display.initial_watermarks != NULL)
6386                 dev_priv->display.initial_watermarks(state, pipe_config);
6387         intel_enable_pipe(pipe_config);
6388
6389         if (pipe_config->has_pch_encoder)
6390                 ironlake_pch_enable(state, pipe_config);
6391
6392         assert_vblank_disabled(crtc);
6393         intel_crtc_vblank_on(pipe_config);
6394
6395         intel_encoders_enable(intel_crtc, pipe_config, state);
6396
6397         if (HAS_PCH_CPT(dev_priv))
6398                 cpt_verify_modeset(dev, intel_crtc->pipe);
6399
6400         /*
6401          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6402          * And a second vblank wait is needed at least on ILK with
6403          * some interlaced HDMI modes. Let's do the double wait always
6404          * in case there are more corner cases we don't know about.
6405          */
6406         if (pipe_config->has_pch_encoder) {
6407                 intel_wait_for_vblank(dev_priv, pipe);
6408                 intel_wait_for_vblank(dev_priv, pipe);
6409         }
6410         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6411         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6412 }
6413
6414 /* IPS only exists on ULT machines and is tied to pipe A. */
6415 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6416 {
6417         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6418 }
6419
6420 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6421                                             enum pipe pipe, bool apply)
6422 {
6423         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6424         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6425
6426         if (apply)
6427                 val |= mask;
6428         else
6429                 val &= ~mask;
6430
6431         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6432 }
6433
6434 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6435 {
6436         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6437         enum pipe pipe = crtc->pipe;
6438         u32 val;
6439
6440         val = MBUS_DBOX_A_CREDIT(2);
6441
6442         if (INTEL_GEN(dev_priv) >= 12) {
6443                 val |= MBUS_DBOX_BW_CREDIT(2);
6444                 val |= MBUS_DBOX_B_CREDIT(12);
6445         } else {
6446                 val |= MBUS_DBOX_BW_CREDIT(1);
6447                 val |= MBUS_DBOX_B_CREDIT(8);
6448         }
6449
6450         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6451 }
6452
6453 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6454                                 struct intel_atomic_state *state)
6455 {
6456         struct drm_crtc *crtc = pipe_config->base.crtc;
6457         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6458         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6459         enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
6460         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6461         bool psl_clkgate_wa;
6462
6463         if (WARN_ON(intel_crtc->active))
6464                 return;
6465
6466         intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6467
6468         if (pipe_config->shared_dpll)
6469                 intel_enable_shared_dpll(pipe_config);
6470
6471         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6472
6473         if (intel_crtc_has_dp_encoder(pipe_config))
6474                 intel_dp_set_m_n(pipe_config, M1_N1);
6475
6476         if (!transcoder_is_dsi(cpu_transcoder))
6477                 intel_set_pipe_timings(pipe_config);
6478
6479         intel_set_pipe_src_size(pipe_config);
6480
6481         if (cpu_transcoder != TRANSCODER_EDP &&
6482             !transcoder_is_dsi(cpu_transcoder)) {
6483                 I915_WRITE(PIPE_MULT(cpu_transcoder),
6484                            pipe_config->pixel_multiplier - 1);
6485         }
6486
6487         if (pipe_config->has_pch_encoder) {
6488                 intel_cpu_transcoder_set_m_n(pipe_config,
6489                                              &pipe_config->fdi_m_n, NULL);
6490         }
6491
6492         if (!transcoder_is_dsi(cpu_transcoder))
6493                 haswell_set_pipeconf(pipe_config);
6494
6495         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6496                 bdw_set_pipemisc(pipe_config);
6497
6498         intel_crtc->active = true;
6499
6500         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6501         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6502                          pipe_config->pch_pfit.enabled;
6503         if (psl_clkgate_wa)
6504                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6505
6506         if (INTEL_GEN(dev_priv) >= 9)
6507                 skylake_pfit_enable(pipe_config);
6508         else
6509                 ironlake_pfit_enable(pipe_config);
6510
6511         /*
6512          * On ILK+ LUT must be loaded before the pipe is running but with
6513          * clocks enabled
6514          */
6515         intel_color_load_luts(pipe_config);
6516         intel_color_commit(pipe_config);
6517         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6518         if (INTEL_GEN(dev_priv) < 9)
6519                 intel_disable_primary_plane(pipe_config);
6520
6521         if (INTEL_GEN(dev_priv) >= 11)
6522                 icl_set_pipe_chicken(intel_crtc);
6523
6524         intel_ddi_set_pipe_settings(pipe_config);
6525         if (!transcoder_is_dsi(cpu_transcoder))
6526                 intel_ddi_enable_transcoder_func(pipe_config);
6527
6528         if (dev_priv->display.initial_watermarks != NULL)
6529                 dev_priv->display.initial_watermarks(state, pipe_config);
6530
6531         if (INTEL_GEN(dev_priv) >= 11)
6532                 icl_pipe_mbus_enable(intel_crtc);
6533
6534         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6535         if (!transcoder_is_dsi(cpu_transcoder))
6536                 intel_enable_pipe(pipe_config);
6537
6538         if (pipe_config->has_pch_encoder)
6539                 lpt_pch_enable(state, pipe_config);
6540
6541         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6542                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6543
6544         assert_vblank_disabled(crtc);
6545         intel_crtc_vblank_on(pipe_config);
6546
6547         intel_encoders_enable(intel_crtc, pipe_config, state);
6548
6549         if (psl_clkgate_wa) {
6550                 intel_wait_for_vblank(dev_priv, pipe);
6551                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6552         }
6553
6554         /* If we change the relative order between pipe/planes enabling, we need
6555          * to change the workaround. */
6556         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6557         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6558                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6559                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6560         }
6561 }
6562
6563 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6564 {
6565         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6566         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6567         enum pipe pipe = crtc->pipe;
6568
6569         /* To avoid upsetting the power well on haswell only disable the pfit if
6570          * it's in use. The hw state code will make sure we get this right. */
6571         if (old_crtc_state->pch_pfit.enabled) {
6572                 I915_WRITE(PF_CTL(pipe), 0);
6573                 I915_WRITE(PF_WIN_POS(pipe), 0);
6574                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6575         }
6576 }
6577
6578 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6579                                   struct intel_atomic_state *state)
6580 {
6581         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6582         struct drm_device *dev = crtc->dev;
6583         struct drm_i915_private *dev_priv = to_i915(dev);
6584         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6585         enum pipe pipe = intel_crtc->pipe;
6586
6587         /*
6588          * Sometimes spurious CPU pipe underruns happen when the
6589          * pipe is already disabled, but FDI RX/TX is still enabled.
6590          * Happens at least with VGA+HDMI cloning. Suppress them.
6591          */
6592         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6593         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6594
6595         intel_encoders_disable(intel_crtc, old_crtc_state, state);
6596
6597         drm_crtc_vblank_off(crtc);
6598         assert_vblank_disabled(crtc);
6599
6600         intel_disable_pipe(old_crtc_state);
6601
6602         ironlake_pfit_disable(old_crtc_state);
6603
6604         if (old_crtc_state->has_pch_encoder)
6605                 ironlake_fdi_disable(crtc);
6606
6607         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6608
6609         if (old_crtc_state->has_pch_encoder) {
6610                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6611
6612                 if (HAS_PCH_CPT(dev_priv)) {
6613                         i915_reg_t reg;
6614                         u32 temp;
6615
6616                         /* disable TRANS_DP_CTL */
6617                         reg = TRANS_DP_CTL(pipe);
6618                         temp = I915_READ(reg);
6619                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6620                                   TRANS_DP_PORT_SEL_MASK);
6621                         temp |= TRANS_DP_PORT_SEL_NONE;
6622                         I915_WRITE(reg, temp);
6623
6624                         /* disable DPLL_SEL */
6625                         temp = I915_READ(PCH_DPLL_SEL);
6626                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6627                         I915_WRITE(PCH_DPLL_SEL, temp);
6628                 }
6629
6630                 ironlake_fdi_pll_disable(intel_crtc);
6631         }
6632
6633         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6634         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6635 }
6636
6637 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6638                                  struct intel_atomic_state *state)
6639 {
6640         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6641         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6642         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6643         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6644
6645         intel_encoders_disable(intel_crtc, old_crtc_state, state);
6646
6647         drm_crtc_vblank_off(crtc);
6648         assert_vblank_disabled(crtc);
6649
6650         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6651         if (!transcoder_is_dsi(cpu_transcoder))
6652                 intel_disable_pipe(old_crtc_state);
6653
6654         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6655                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6656
6657         if (!transcoder_is_dsi(cpu_transcoder))
6658                 intel_ddi_disable_transcoder_func(old_crtc_state);
6659
6660         intel_dsc_disable(old_crtc_state);
6661
6662         if (INTEL_GEN(dev_priv) >= 9)
6663                 skylake_scaler_disable(intel_crtc);
6664         else
6665                 ironlake_pfit_disable(old_crtc_state);
6666
6667         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6668
6669         intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
6670 }
6671
6672 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6673 {
6674         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6675         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6676
6677         if (!crtc_state->gmch_pfit.control)
6678                 return;
6679
6680         /*
6681          * The panel fitter should only be adjusted whilst the pipe is disabled,
6682          * according to register description and PRM.
6683          */
6684         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6685         assert_pipe_disabled(dev_priv, crtc->pipe);
6686
6687         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6688         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6689
6690         /* Border color in case we don't scale up to the full screen. Black by
6691          * default, change to something else for debugging. */
6692         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6693 }
6694
6695 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6696 {
6697         if (phy == PHY_NONE)
6698                 return false;
6699
6700         if (IS_ELKHARTLAKE(dev_priv))
6701                 return phy <= PHY_C;
6702
6703         if (INTEL_GEN(dev_priv) >= 11)
6704                 return phy <= PHY_B;
6705
6706         return false;
6707 }
6708
6709 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6710 {
6711         if (INTEL_GEN(dev_priv) >= 12)
6712                 return phy >= PHY_D && phy <= PHY_I;
6713
6714         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6715                 return phy >= PHY_C && phy <= PHY_F;
6716
6717         return false;
6718 }
6719
6720 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6721 {
6722         if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6723                 return PHY_A;
6724
6725         return (enum phy)port;
6726 }
6727
6728 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6729 {
6730         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6731                 return PORT_TC_NONE;
6732
6733         if (INTEL_GEN(dev_priv) >= 12)
6734                 return port - PORT_D;
6735
6736         return port - PORT_C;
6737 }
6738
6739 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6740 {
6741         switch (port) {
6742         case PORT_A:
6743                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6744         case PORT_B:
6745                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6746         case PORT_C:
6747                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6748         case PORT_D:
6749                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6750         case PORT_E:
6751                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6752         case PORT_F:
6753                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6754         default:
6755                 MISSING_CASE(port);
6756                 return POWER_DOMAIN_PORT_OTHER;
6757         }
6758 }
6759
6760 enum intel_display_power_domain
6761 intel_aux_power_domain(struct intel_digital_port *dig_port)
6762 {
6763         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
6764         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
6765
6766         if (intel_phy_is_tc(dev_priv, phy) &&
6767             dig_port->tc_mode == TC_PORT_TBT_ALT) {
6768                 switch (dig_port->aux_ch) {
6769                 case AUX_CH_C:
6770                         return POWER_DOMAIN_AUX_C_TBT;
6771                 case AUX_CH_D:
6772                         return POWER_DOMAIN_AUX_D_TBT;
6773                 case AUX_CH_E:
6774                         return POWER_DOMAIN_AUX_E_TBT;
6775                 case AUX_CH_F:
6776                         return POWER_DOMAIN_AUX_F_TBT;
6777                 default:
6778                         MISSING_CASE(dig_port->aux_ch);
6779                         return POWER_DOMAIN_AUX_C_TBT;
6780                 }
6781         }
6782
6783         switch (dig_port->aux_ch) {
6784         case AUX_CH_A:
6785                 return POWER_DOMAIN_AUX_A;
6786         case AUX_CH_B:
6787                 return POWER_DOMAIN_AUX_B;
6788         case AUX_CH_C:
6789                 return POWER_DOMAIN_AUX_C;
6790         case AUX_CH_D:
6791                 return POWER_DOMAIN_AUX_D;
6792         case AUX_CH_E:
6793                 return POWER_DOMAIN_AUX_E;
6794         case AUX_CH_F:
6795                 return POWER_DOMAIN_AUX_F;
6796         default:
6797                 MISSING_CASE(dig_port->aux_ch);
6798                 return POWER_DOMAIN_AUX_A;
6799         }
6800 }
6801
6802 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6803 {
6804         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6805         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6806         struct drm_encoder *encoder;
6807         enum pipe pipe = crtc->pipe;
6808         u64 mask;
6809         enum transcoder transcoder = crtc_state->cpu_transcoder;
6810
6811         if (!crtc_state->base.active)
6812                 return 0;
6813
6814         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6815         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6816         if (crtc_state->pch_pfit.enabled ||
6817             crtc_state->pch_pfit.force_thru)
6818                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6819
6820         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
6821                                   crtc_state->base.encoder_mask) {
6822                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6823
6824                 mask |= BIT_ULL(intel_encoder->power_domain);
6825         }
6826
6827         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6828                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6829
6830         if (crtc_state->shared_dpll)
6831                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6832
6833         return mask;
6834 }
6835
6836 static u64
6837 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6838 {
6839         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6840         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6841         enum intel_display_power_domain domain;
6842         u64 domains, new_domains, old_domains;
6843
6844         old_domains = crtc->enabled_power_domains;
6845         crtc->enabled_power_domains = new_domains =
6846                 get_crtc_power_domains(crtc_state);
6847
6848         domains = new_domains & ~old_domains;
6849
6850         for_each_power_domain(domain, domains)
6851                 intel_display_power_get(dev_priv, domain);
6852
6853         return old_domains & ~new_domains;
6854 }
6855
6856 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6857                                       u64 domains)
6858 {
6859         enum intel_display_power_domain domain;
6860
6861         for_each_power_domain(domain, domains)
6862                 intel_display_power_put_unchecked(dev_priv, domain);
6863 }
6864
6865 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6866                                    struct intel_atomic_state *state)
6867 {
6868         struct drm_crtc *crtc = pipe_config->base.crtc;
6869         struct drm_device *dev = crtc->dev;
6870         struct drm_i915_private *dev_priv = to_i915(dev);
6871         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6872         enum pipe pipe = intel_crtc->pipe;
6873
6874         if (WARN_ON(intel_crtc->active))
6875                 return;
6876
6877         if (intel_crtc_has_dp_encoder(pipe_config))
6878                 intel_dp_set_m_n(pipe_config, M1_N1);
6879
6880         intel_set_pipe_timings(pipe_config);
6881         intel_set_pipe_src_size(pipe_config);
6882
6883         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6884                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6885                 I915_WRITE(CHV_CANVAS(pipe), 0);
6886         }
6887
6888         i9xx_set_pipeconf(pipe_config);
6889
6890         intel_crtc->active = true;
6891
6892         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6893
6894         intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6895
6896         if (IS_CHERRYVIEW(dev_priv)) {
6897                 chv_prepare_pll(intel_crtc, pipe_config);
6898                 chv_enable_pll(intel_crtc, pipe_config);
6899         } else {
6900                 vlv_prepare_pll(intel_crtc, pipe_config);
6901                 vlv_enable_pll(intel_crtc, pipe_config);
6902         }
6903
6904         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6905
6906         i9xx_pfit_enable(pipe_config);
6907
6908         intel_color_load_luts(pipe_config);
6909         intel_color_commit(pipe_config);
6910         /* update DSPCNTR to configure gamma for pipe bottom color */
6911         intel_disable_primary_plane(pipe_config);
6912
6913         dev_priv->display.initial_watermarks(state, pipe_config);
6914         intel_enable_pipe(pipe_config);
6915
6916         assert_vblank_disabled(crtc);
6917         intel_crtc_vblank_on(pipe_config);
6918
6919         intel_encoders_enable(intel_crtc, pipe_config, state);
6920 }
6921
6922 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6923 {
6924         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6925         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6926
6927         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6928         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6929 }
6930
6931 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6932                              struct intel_atomic_state *state)
6933 {
6934         struct drm_crtc *crtc = pipe_config->base.crtc;
6935         struct drm_device *dev = crtc->dev;
6936         struct drm_i915_private *dev_priv = to_i915(dev);
6937         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6938         enum pipe pipe = intel_crtc->pipe;
6939
6940         if (WARN_ON(intel_crtc->active))
6941                 return;
6942
6943         i9xx_set_pll_dividers(pipe_config);
6944
6945         if (intel_crtc_has_dp_encoder(pipe_config))
6946                 intel_dp_set_m_n(pipe_config, M1_N1);
6947
6948         intel_set_pipe_timings(pipe_config);
6949         intel_set_pipe_src_size(pipe_config);
6950
6951         i9xx_set_pipeconf(pipe_config);
6952
6953         intel_crtc->active = true;
6954
6955         if (!IS_GEN(dev_priv, 2))
6956                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6957
6958         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6959
6960         i9xx_enable_pll(intel_crtc, pipe_config);
6961
6962         i9xx_pfit_enable(pipe_config);
6963
6964         intel_color_load_luts(pipe_config);
6965         intel_color_commit(pipe_config);
6966         /* update DSPCNTR to configure gamma for pipe bottom color */
6967         intel_disable_primary_plane(pipe_config);
6968
6969         if (dev_priv->display.initial_watermarks != NULL)
6970                 dev_priv->display.initial_watermarks(state,
6971                                                      pipe_config);
6972         else
6973                 intel_update_watermarks(intel_crtc);
6974         intel_enable_pipe(pipe_config);
6975
6976         assert_vblank_disabled(crtc);
6977         intel_crtc_vblank_on(pipe_config);
6978
6979         intel_encoders_enable(intel_crtc, pipe_config, state);
6980 }
6981
6982 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6983 {
6984         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6985         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6986
6987         if (!old_crtc_state->gmch_pfit.control)
6988                 return;
6989
6990         assert_pipe_disabled(dev_priv, crtc->pipe);
6991
6992         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6993                       I915_READ(PFIT_CONTROL));
6994         I915_WRITE(PFIT_CONTROL, 0);
6995 }
6996
6997 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6998                               struct intel_atomic_state *state)
6999 {
7000         struct drm_crtc *crtc = old_crtc_state->base.crtc;
7001         struct drm_device *dev = crtc->dev;
7002         struct drm_i915_private *dev_priv = to_i915(dev);
7003         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7004         enum pipe pipe = intel_crtc->pipe;
7005
7006         /*
7007          * On gen2 planes are double buffered but the pipe isn't, so we must
7008          * wait for planes to fully turn off before disabling the pipe.
7009          */
7010         if (IS_GEN(dev_priv, 2))
7011                 intel_wait_for_vblank(dev_priv, pipe);
7012
7013         intel_encoders_disable(intel_crtc, old_crtc_state, state);
7014
7015         drm_crtc_vblank_off(crtc);
7016         assert_vblank_disabled(crtc);
7017
7018         intel_disable_pipe(old_crtc_state);
7019
7020         i9xx_pfit_disable(old_crtc_state);
7021
7022         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
7023
7024         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7025                 if (IS_CHERRYVIEW(dev_priv))
7026                         chv_disable_pll(dev_priv, pipe);
7027                 else if (IS_VALLEYVIEW(dev_priv))
7028                         vlv_disable_pll(dev_priv, pipe);
7029                 else
7030                         i9xx_disable_pll(old_crtc_state);
7031         }
7032
7033         intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
7034
7035         if (!IS_GEN(dev_priv, 2))
7036                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7037
7038         if (!dev_priv->display.initial_watermarks)
7039                 intel_update_watermarks(intel_crtc);
7040
7041         /* clock the pipe down to 640x480@60 to potentially save power */
7042         if (IS_I830(dev_priv))
7043                 i830_enable_pipe(dev_priv, pipe);
7044 }
7045
7046 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
7047                                         struct drm_modeset_acquire_ctx *ctx)
7048 {
7049         struct intel_encoder *encoder;
7050         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7051         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
7052         struct intel_bw_state *bw_state =
7053                 to_intel_bw_state(dev_priv->bw_obj.state);
7054         enum intel_display_power_domain domain;
7055         struct intel_plane *plane;
7056         u64 domains;
7057         struct drm_atomic_state *state;
7058         struct intel_crtc_state *crtc_state;
7059         int ret;
7060
7061         if (!intel_crtc->active)
7062                 return;
7063
7064         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
7065                 const struct intel_plane_state *plane_state =
7066                         to_intel_plane_state(plane->base.state);
7067
7068                 if (plane_state->base.visible)
7069                         intel_plane_disable_noatomic(intel_crtc, plane);
7070         }
7071
7072         state = drm_atomic_state_alloc(crtc->dev);
7073         if (!state) {
7074                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7075                               crtc->base.id, crtc->name);
7076                 return;
7077         }
7078
7079         state->acquire_ctx = ctx;
7080
7081         /* Everything's already locked, -EDEADLK can't happen. */
7082         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7083         ret = drm_atomic_add_affected_connectors(state, crtc);
7084
7085         WARN_ON(IS_ERR(crtc_state) || ret);
7086
7087         dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
7088
7089         drm_atomic_state_put(state);
7090
7091         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7092                       crtc->base.id, crtc->name);
7093
7094         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
7095         crtc->state->active = false;
7096         intel_crtc->active = false;
7097         crtc->enabled = false;
7098         crtc->state->connector_mask = 0;
7099         crtc->state->encoder_mask = 0;
7100
7101         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
7102                 encoder->base.crtc = NULL;
7103
7104         intel_fbc_disable(intel_crtc);
7105         intel_update_watermarks(intel_crtc);
7106         intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
7107
7108         domains = intel_crtc->enabled_power_domains;
7109         for_each_power_domain(domain, domains)
7110                 intel_display_power_put_unchecked(dev_priv, domain);
7111         intel_crtc->enabled_power_domains = 0;
7112
7113         dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
7114         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
7115         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
7116
7117         bw_state->data_rate[intel_crtc->pipe] = 0;
7118         bw_state->num_active_planes[intel_crtc->pipe] = 0;
7119 }
7120
7121 /*
7122  * turn all crtc's off, but do not adjust state
7123  * This has to be paired with a call to intel_modeset_setup_hw_state.
7124  */
7125 int intel_display_suspend(struct drm_device *dev)
7126 {
7127         struct drm_i915_private *dev_priv = to_i915(dev);
7128         struct drm_atomic_state *state;
7129         int ret;
7130
7131         state = drm_atomic_helper_suspend(dev);
7132         ret = PTR_ERR_OR_ZERO(state);
7133         if (ret)
7134                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7135         else
7136                 dev_priv->modeset_restore_state = state;
7137         return ret;
7138 }
7139
7140 void intel_encoder_destroy(struct drm_encoder *encoder)
7141 {
7142         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7143
7144         drm_encoder_cleanup(encoder);
7145         kfree(intel_encoder);
7146 }
7147
7148 /* Cross check the actual hw state with our own modeset state tracking (and it's
7149  * internal consistency). */
7150 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7151                                          struct drm_connector_state *conn_state)
7152 {
7153         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7154
7155         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7156                       connector->base.base.id,
7157                       connector->base.name);
7158
7159         if (connector->get_hw_state(connector)) {
7160                 struct intel_encoder *encoder = connector->encoder;
7161
7162                 I915_STATE_WARN(!crtc_state,
7163                          "connector enabled without attached crtc\n");
7164
7165                 if (!crtc_state)
7166                         return;
7167
7168                 I915_STATE_WARN(!crtc_state->base.active,
7169                       "connector is active, but attached crtc isn't\n");
7170
7171                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7172                         return;
7173
7174                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7175                         "atomic encoder doesn't match attached encoder\n");
7176
7177                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7178                         "attached encoder crtc differs from connector crtc\n");
7179         } else {
7180                 I915_STATE_WARN(crtc_state && crtc_state->base.active,
7181                         "attached crtc is active, but connector isn't\n");
7182                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7183                         "best encoder set without crtc!\n");
7184         }
7185 }
7186
7187 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7188 {
7189         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
7190                 return crtc_state->fdi_lanes;
7191
7192         return 0;
7193 }
7194
7195 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7196                                      struct intel_crtc_state *pipe_config)
7197 {
7198         struct drm_i915_private *dev_priv = to_i915(dev);
7199         struct drm_atomic_state *state = pipe_config->base.state;
7200         struct intel_crtc *other_crtc;
7201         struct intel_crtc_state *other_crtc_state;
7202
7203         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7204                       pipe_name(pipe), pipe_config->fdi_lanes);
7205         if (pipe_config->fdi_lanes > 4) {
7206                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7207                               pipe_name(pipe), pipe_config->fdi_lanes);
7208                 return -EINVAL;
7209         }
7210
7211         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7212                 if (pipe_config->fdi_lanes > 2) {
7213                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7214                                       pipe_config->fdi_lanes);
7215                         return -EINVAL;
7216                 } else {
7217                         return 0;
7218                 }
7219         }
7220
7221         if (INTEL_NUM_PIPES(dev_priv) == 2)
7222                 return 0;
7223
7224         /* Ivybridge 3 pipe is really complicated */
7225         switch (pipe) {
7226         case PIPE_A:
7227                 return 0;
7228         case PIPE_B:
7229                 if (pipe_config->fdi_lanes <= 2)
7230                         return 0;
7231
7232                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7233                 other_crtc_state =
7234                         intel_atomic_get_crtc_state(state, other_crtc);
7235                 if (IS_ERR(other_crtc_state))
7236                         return PTR_ERR(other_crtc_state);
7237
7238                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7239                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7240                                       pipe_name(pipe), pipe_config->fdi_lanes);
7241                         return -EINVAL;
7242                 }
7243                 return 0;
7244         case PIPE_C:
7245                 if (pipe_config->fdi_lanes > 2) {
7246                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7247                                       pipe_name(pipe), pipe_config->fdi_lanes);
7248                         return -EINVAL;
7249                 }
7250
7251                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7252                 other_crtc_state =
7253                         intel_atomic_get_crtc_state(state, other_crtc);
7254                 if (IS_ERR(other_crtc_state))
7255                         return PTR_ERR(other_crtc_state);
7256
7257                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7258                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7259                         return -EINVAL;
7260                 }
7261                 return 0;
7262         default:
7263                 BUG();
7264         }
7265 }
7266
7267 #define RETRY 1
7268 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7269                                        struct intel_crtc_state *pipe_config)
7270 {
7271         struct drm_device *dev = intel_crtc->base.dev;
7272         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7273         int lane, link_bw, fdi_dotclock, ret;
7274         bool needs_recompute = false;
7275
7276 retry:
7277         /* FDI is a binary signal running at ~2.7GHz, encoding
7278          * each output octet as 10 bits. The actual frequency
7279          * is stored as a divider into a 100MHz clock, and the
7280          * mode pixel clock is stored in units of 1KHz.
7281          * Hence the bw of each lane in terms of the mode signal
7282          * is:
7283          */
7284         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7285
7286         fdi_dotclock = adjusted_mode->crtc_clock;
7287
7288         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7289                                            pipe_config->pipe_bpp);
7290
7291         pipe_config->fdi_lanes = lane;
7292
7293         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7294                                link_bw, &pipe_config->fdi_m_n, false, false);
7295
7296         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7297         if (ret == -EDEADLK)
7298                 return ret;
7299
7300         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7301                 pipe_config->pipe_bpp -= 2*3;
7302                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7303                               pipe_config->pipe_bpp);
7304                 needs_recompute = true;
7305                 pipe_config->bw_constrained = true;
7306
7307                 goto retry;
7308         }
7309
7310         if (needs_recompute)
7311                 return RETRY;
7312
7313         return ret;
7314 }
7315
7316 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7317 {
7318         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7319         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7320
7321         /* IPS only exists on ULT machines and is tied to pipe A. */
7322         if (!hsw_crtc_supports_ips(crtc))
7323                 return false;
7324
7325         if (!i915_modparams.enable_ips)
7326                 return false;
7327
7328         if (crtc_state->pipe_bpp > 24)
7329                 return false;
7330
7331         /*
7332          * We compare against max which means we must take
7333          * the increased cdclk requirement into account when
7334          * calculating the new cdclk.
7335          *
7336          * Should measure whether using a lower cdclk w/o IPS
7337          */
7338         if (IS_BROADWELL(dev_priv) &&
7339             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7340                 return false;
7341
7342         return true;
7343 }
7344
7345 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7346 {
7347         struct drm_i915_private *dev_priv =
7348                 to_i915(crtc_state->base.crtc->dev);
7349         struct intel_atomic_state *intel_state =
7350                 to_intel_atomic_state(crtc_state->base.state);
7351
7352         if (!hsw_crtc_state_ips_capable(crtc_state))
7353                 return false;
7354
7355         /*
7356          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7357          * enabled and disabled dynamically based on package C states,
7358          * user space can't make reliable use of the CRCs, so let's just
7359          * completely disable it.
7360          */
7361         if (crtc_state->crc_enabled)
7362                 return false;
7363
7364         /* IPS should be fine as long as at least one plane is enabled. */
7365         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7366                 return false;
7367
7368         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7369         if (IS_BROADWELL(dev_priv) &&
7370             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7371                 return false;
7372
7373         return true;
7374 }
7375
7376 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7377 {
7378         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7379
7380         /* GDG double wide on either pipe, otherwise pipe A only */
7381         return INTEL_GEN(dev_priv) < 4 &&
7382                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7383 }
7384
7385 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7386 {
7387         u32 pixel_rate;
7388
7389         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
7390
7391         /*
7392          * We only use IF-ID interlacing. If we ever use
7393          * PF-ID we'll need to adjust the pixel_rate here.
7394          */
7395
7396         if (pipe_config->pch_pfit.enabled) {
7397                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7398                 u32 pfit_size = pipe_config->pch_pfit.size;
7399
7400                 pipe_w = pipe_config->pipe_src_w;
7401                 pipe_h = pipe_config->pipe_src_h;
7402
7403                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7404                 pfit_h = pfit_size & 0xFFFF;
7405                 if (pipe_w < pfit_w)
7406                         pipe_w = pfit_w;
7407                 if (pipe_h < pfit_h)
7408                         pipe_h = pfit_h;
7409
7410                 if (WARN_ON(!pfit_w || !pfit_h))
7411                         return pixel_rate;
7412
7413                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7414                                      pfit_w * pfit_h);
7415         }
7416
7417         return pixel_rate;
7418 }
7419
7420 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7421 {
7422         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
7423
7424         if (HAS_GMCH(dev_priv))
7425                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7426                 crtc_state->pixel_rate =
7427                         crtc_state->base.adjusted_mode.crtc_clock;
7428         else
7429                 crtc_state->pixel_rate =
7430                         ilk_pipe_pixel_rate(crtc_state);
7431 }
7432
7433 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7434                                      struct intel_crtc_state *pipe_config)
7435 {
7436         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7437         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7438         int clock_limit = dev_priv->max_dotclk_freq;
7439
7440         if (INTEL_GEN(dev_priv) < 4) {
7441                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7442
7443                 /*
7444                  * Enable double wide mode when the dot clock
7445                  * is > 90% of the (display) core speed.
7446                  */
7447                 if (intel_crtc_supports_double_wide(crtc) &&
7448                     adjusted_mode->crtc_clock > clock_limit) {
7449                         clock_limit = dev_priv->max_dotclk_freq;
7450                         pipe_config->double_wide = true;
7451                 }
7452         }
7453
7454         if (adjusted_mode->crtc_clock > clock_limit) {
7455                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7456                               adjusted_mode->crtc_clock, clock_limit,
7457                               yesno(pipe_config->double_wide));
7458                 return -EINVAL;
7459         }
7460
7461         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7462              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7463              pipe_config->base.ctm) {
7464                 /*
7465                  * There is only one pipe CSC unit per pipe, and we need that
7466                  * for output conversion from RGB->YCBCR. So if CTM is already
7467                  * applied we can't support YCBCR420 output.
7468                  */
7469                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7470                 return -EINVAL;
7471         }
7472
7473         /*
7474          * Pipe horizontal size must be even in:
7475          * - DVO ganged mode
7476          * - LVDS dual channel mode
7477          * - Double wide pipe
7478          */
7479         if (pipe_config->pipe_src_w & 1) {
7480                 if (pipe_config->double_wide) {
7481                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7482                         return -EINVAL;
7483                 }
7484
7485                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7486                     intel_is_dual_link_lvds(dev_priv)) {
7487                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7488                         return -EINVAL;
7489                 }
7490         }
7491
7492         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7493          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7494          */
7495         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7496                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7497                 return -EINVAL;
7498
7499         intel_crtc_compute_pixel_rate(pipe_config);
7500
7501         if (pipe_config->has_pch_encoder)
7502                 return ironlake_fdi_compute_config(crtc, pipe_config);
7503
7504         return 0;
7505 }
7506
7507 static void
7508 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7509 {
7510         while (*num > DATA_LINK_M_N_MASK ||
7511                *den > DATA_LINK_M_N_MASK) {
7512                 *num >>= 1;
7513                 *den >>= 1;
7514         }
7515 }
7516
7517 static void compute_m_n(unsigned int m, unsigned int n,
7518                         u32 *ret_m, u32 *ret_n,
7519                         bool constant_n)
7520 {
7521         /*
7522          * Several DP dongles in particular seem to be fussy about
7523          * too large link M/N values. Give N value as 0x8000 that
7524          * should be acceptable by specific devices. 0x8000 is the
7525          * specified fixed N value for asynchronous clock mode,
7526          * which the devices expect also in synchronous clock mode.
7527          */
7528         if (constant_n)
7529                 *ret_n = 0x8000;
7530         else
7531                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7532
7533         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7534         intel_reduce_m_n_ratio(ret_m, ret_n);
7535 }
7536
7537 void
7538 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7539                        int pixel_clock, int link_clock,
7540                        struct intel_link_m_n *m_n,
7541                        bool constant_n, bool fec_enable)
7542 {
7543         u32 data_clock = bits_per_pixel * pixel_clock;
7544
7545         if (fec_enable)
7546                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7547
7548         m_n->tu = 64;
7549         compute_m_n(data_clock,
7550                     link_clock * nlanes * 8,
7551                     &m_n->gmch_m, &m_n->gmch_n,
7552                     constant_n);
7553
7554         compute_m_n(pixel_clock, link_clock,
7555                     &m_n->link_m, &m_n->link_n,
7556                     constant_n);
7557 }
7558
7559 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7560 {
7561         /*
7562          * There may be no VBT; and if the BIOS enabled SSC we can
7563          * just keep using it to avoid unnecessary flicker.  Whereas if the
7564          * BIOS isn't using it, don't assume it will work even if the VBT
7565          * indicates as much.
7566          */
7567         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7568                 bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
7569                         DREF_SSC1_ENABLE;
7570
7571                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7572                         DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
7573                                       enableddisabled(bios_lvds_use_ssc),
7574                                       enableddisabled(dev_priv->vbt.lvds_use_ssc));
7575                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7576                 }
7577         }
7578 }
7579
7580 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7581 {
7582         if (i915_modparams.panel_use_ssc >= 0)
7583                 return i915_modparams.panel_use_ssc != 0;
7584         return dev_priv->vbt.lvds_use_ssc
7585                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7586 }
7587
7588 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7589 {
7590         return (1 << dpll->n) << 16 | dpll->m2;
7591 }
7592
7593 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7594 {
7595         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7596 }
7597
7598 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7599                                      struct intel_crtc_state *crtc_state,
7600                                      struct dpll *reduced_clock)
7601 {
7602         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7603         u32 fp, fp2 = 0;
7604
7605         if (IS_PINEVIEW(dev_priv)) {
7606                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7607                 if (reduced_clock)
7608                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7609         } else {
7610                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7611                 if (reduced_clock)
7612                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7613         }
7614
7615         crtc_state->dpll_hw_state.fp0 = fp;
7616
7617         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7618             reduced_clock) {
7619                 crtc_state->dpll_hw_state.fp1 = fp2;
7620         } else {
7621                 crtc_state->dpll_hw_state.fp1 = fp;
7622         }
7623 }
7624
7625 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7626                 pipe)
7627 {
7628         u32 reg_val;
7629
7630         /*
7631          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7632          * and set it to a reasonable value instead.
7633          */
7634         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7635         reg_val &= 0xffffff00;
7636         reg_val |= 0x00000030;
7637         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7638
7639         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7640         reg_val &= 0x00ffffff;
7641         reg_val |= 0x8c000000;
7642         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7643
7644         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7645         reg_val &= 0xffffff00;
7646         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7647
7648         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7649         reg_val &= 0x00ffffff;
7650         reg_val |= 0xb0000000;
7651         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7652 }
7653
7654 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7655                                          const struct intel_link_m_n *m_n)
7656 {
7657         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7658         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7659         enum pipe pipe = crtc->pipe;
7660
7661         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7662         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7663         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7664         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7665 }
7666
7667 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7668                                  enum transcoder transcoder)
7669 {
7670         if (IS_HASWELL(dev_priv))
7671                 return transcoder == TRANSCODER_EDP;
7672
7673         /*
7674          * Strictly speaking some registers are available before
7675          * gen7, but we only support DRRS on gen7+
7676          */
7677         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7678 }
7679
7680 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7681                                          const struct intel_link_m_n *m_n,
7682                                          const struct intel_link_m_n *m2_n2)
7683 {
7684         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7685         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7686         enum pipe pipe = crtc->pipe;
7687         enum transcoder transcoder = crtc_state->cpu_transcoder;
7688
7689         if (INTEL_GEN(dev_priv) >= 5) {
7690                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7691                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7692                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7693                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7694                 /*
7695                  *  M2_N2 registers are set only if DRRS is supported
7696                  * (to make sure the registers are not unnecessarily accessed).
7697                  */
7698                 if (m2_n2 && crtc_state->has_drrs &&
7699                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7700                         I915_WRITE(PIPE_DATA_M2(transcoder),
7701                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7702                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7703                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7704                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7705                 }
7706         } else {
7707                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7708                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7709                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7710                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7711         }
7712 }
7713
7714 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7715 {
7716         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7717
7718         if (m_n == M1_N1) {
7719                 dp_m_n = &crtc_state->dp_m_n;
7720                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7721         } else if (m_n == M2_N2) {
7722
7723                 /*
7724                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7725                  * needs to be programmed into M1_N1.
7726                  */
7727                 dp_m_n = &crtc_state->dp_m2_n2;
7728         } else {
7729                 DRM_ERROR("Unsupported divider value\n");
7730                 return;
7731         }
7732
7733         if (crtc_state->has_pch_encoder)
7734                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7735         else
7736                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7737 }
7738
7739 static void vlv_compute_dpll(struct intel_crtc *crtc,
7740                              struct intel_crtc_state *pipe_config)
7741 {
7742         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7743                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7744         if (crtc->pipe != PIPE_A)
7745                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7746
7747         /* DPLL not used with DSI, but still need the rest set up */
7748         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7749                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7750                         DPLL_EXT_BUFFER_ENABLE_VLV;
7751
7752         pipe_config->dpll_hw_state.dpll_md =
7753                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7754 }
7755
7756 static void chv_compute_dpll(struct intel_crtc *crtc,
7757                              struct intel_crtc_state *pipe_config)
7758 {
7759         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7760                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7761         if (crtc->pipe != PIPE_A)
7762                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7763
7764         /* DPLL not used with DSI, but still need the rest set up */
7765         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7766                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7767
7768         pipe_config->dpll_hw_state.dpll_md =
7769                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7770 }
7771
7772 static void vlv_prepare_pll(struct intel_crtc *crtc,
7773                             const struct intel_crtc_state *pipe_config)
7774 {
7775         struct drm_device *dev = crtc->base.dev;
7776         struct drm_i915_private *dev_priv = to_i915(dev);
7777         enum pipe pipe = crtc->pipe;
7778         u32 mdiv;
7779         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7780         u32 coreclk, reg_val;
7781
7782         /* Enable Refclk */
7783         I915_WRITE(DPLL(pipe),
7784                    pipe_config->dpll_hw_state.dpll &
7785                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7786
7787         /* No need to actually set up the DPLL with DSI */
7788         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7789                 return;
7790
7791         vlv_dpio_get(dev_priv);
7792
7793         bestn = pipe_config->dpll.n;
7794         bestm1 = pipe_config->dpll.m1;
7795         bestm2 = pipe_config->dpll.m2;
7796         bestp1 = pipe_config->dpll.p1;
7797         bestp2 = pipe_config->dpll.p2;
7798
7799         /* See eDP HDMI DPIO driver vbios notes doc */
7800
7801         /* PLL B needs special handling */
7802         if (pipe == PIPE_B)
7803                 vlv_pllb_recal_opamp(dev_priv, pipe);
7804
7805         /* Set up Tx target for periodic Rcomp update */
7806         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7807
7808         /* Disable target IRef on PLL */
7809         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7810         reg_val &= 0x00ffffff;
7811         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7812
7813         /* Disable fast lock */
7814         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7815
7816         /* Set idtafcrecal before PLL is enabled */
7817         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7818         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7819         mdiv |= ((bestn << DPIO_N_SHIFT));
7820         mdiv |= (1 << DPIO_K_SHIFT);
7821
7822         /*
7823          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7824          * but we don't support that).
7825          * Note: don't use the DAC post divider as it seems unstable.
7826          */
7827         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7828         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7829
7830         mdiv |= DPIO_ENABLE_CALIBRATION;
7831         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7832
7833         /* Set HBR and RBR LPF coefficients */
7834         if (pipe_config->port_clock == 162000 ||
7835             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7836             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7837                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7838                                  0x009f0003);
7839         else
7840                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7841                                  0x00d0000f);
7842
7843         if (intel_crtc_has_dp_encoder(pipe_config)) {
7844                 /* Use SSC source */
7845                 if (pipe == PIPE_A)
7846                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7847                                          0x0df40000);
7848                 else
7849                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7850                                          0x0df70000);
7851         } else { /* HDMI or VGA */
7852                 /* Use bend source */
7853                 if (pipe == PIPE_A)
7854                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7855                                          0x0df70000);
7856                 else
7857                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7858                                          0x0df40000);
7859         }
7860
7861         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7862         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7863         if (intel_crtc_has_dp_encoder(pipe_config))
7864                 coreclk |= 0x01000000;
7865         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7866
7867         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7868
7869         vlv_dpio_put(dev_priv);
7870 }
7871
7872 static void chv_prepare_pll(struct intel_crtc *crtc,
7873                             const struct intel_crtc_state *pipe_config)
7874 {
7875         struct drm_device *dev = crtc->base.dev;
7876         struct drm_i915_private *dev_priv = to_i915(dev);
7877         enum pipe pipe = crtc->pipe;
7878         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7879         u32 loopfilter, tribuf_calcntr;
7880         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7881         u32 dpio_val;
7882         int vco;
7883
7884         /* Enable Refclk and SSC */
7885         I915_WRITE(DPLL(pipe),
7886                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7887
7888         /* No need to actually set up the DPLL with DSI */
7889         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7890                 return;
7891
7892         bestn = pipe_config->dpll.n;
7893         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7894         bestm1 = pipe_config->dpll.m1;
7895         bestm2 = pipe_config->dpll.m2 >> 22;
7896         bestp1 = pipe_config->dpll.p1;
7897         bestp2 = pipe_config->dpll.p2;
7898         vco = pipe_config->dpll.vco;
7899         dpio_val = 0;
7900         loopfilter = 0;
7901
7902         vlv_dpio_get(dev_priv);
7903
7904         /* p1 and p2 divider */
7905         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7906                         5 << DPIO_CHV_S1_DIV_SHIFT |
7907                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7908                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7909                         1 << DPIO_CHV_K_DIV_SHIFT);
7910
7911         /* Feedback post-divider - m2 */
7912         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7913
7914         /* Feedback refclk divider - n and m1 */
7915         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7916                         DPIO_CHV_M1_DIV_BY_2 |
7917                         1 << DPIO_CHV_N_DIV_SHIFT);
7918
7919         /* M2 fraction division */
7920         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7921
7922         /* M2 fraction division enable */
7923         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7924         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7925         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7926         if (bestm2_frac)
7927                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7928         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7929
7930         /* Program digital lock detect threshold */
7931         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7932         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7933                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7934         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7935         if (!bestm2_frac)
7936                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7937         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7938
7939         /* Loop filter */
7940         if (vco == 5400000) {
7941                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7942                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7943                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7944                 tribuf_calcntr = 0x9;
7945         } else if (vco <= 6200000) {
7946                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7947                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7948                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7949                 tribuf_calcntr = 0x9;
7950         } else if (vco <= 6480000) {
7951                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7952                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7953                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7954                 tribuf_calcntr = 0x8;
7955         } else {
7956                 /* Not supported. Apply the same limits as in the max case */
7957                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7958                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7959                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7960                 tribuf_calcntr = 0;
7961         }
7962         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7963
7964         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7965         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7966         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7967         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7968
7969         /* AFC Recal */
7970         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7971                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7972                         DPIO_AFC_RECAL);
7973
7974         vlv_dpio_put(dev_priv);
7975 }
7976
7977 /**
7978  * vlv_force_pll_on - forcibly enable just the PLL
7979  * @dev_priv: i915 private structure
7980  * @pipe: pipe PLL to enable
7981  * @dpll: PLL configuration
7982  *
7983  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7984  * in cases where we need the PLL enabled even when @pipe is not going to
7985  * be enabled.
7986  */
7987 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7988                      const struct dpll *dpll)
7989 {
7990         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7991         struct intel_crtc_state *pipe_config;
7992
7993         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7994         if (!pipe_config)
7995                 return -ENOMEM;
7996
7997         pipe_config->base.crtc = &crtc->base;
7998         pipe_config->pixel_multiplier = 1;
7999         pipe_config->dpll = *dpll;
8000
8001         if (IS_CHERRYVIEW(dev_priv)) {
8002                 chv_compute_dpll(crtc, pipe_config);
8003                 chv_prepare_pll(crtc, pipe_config);
8004                 chv_enable_pll(crtc, pipe_config);
8005         } else {
8006                 vlv_compute_dpll(crtc, pipe_config);
8007                 vlv_prepare_pll(crtc, pipe_config);
8008                 vlv_enable_pll(crtc, pipe_config);
8009         }
8010
8011         kfree(pipe_config);
8012
8013         return 0;
8014 }
8015
8016 /**
8017  * vlv_force_pll_off - forcibly disable just the PLL
8018  * @dev_priv: i915 private structure
8019  * @pipe: pipe PLL to disable
8020  *
8021  * Disable the PLL for @pipe. To be used in cases where we need
8022  * the PLL enabled even when @pipe is not going to be enabled.
8023  */
8024 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8025 {
8026         if (IS_CHERRYVIEW(dev_priv))
8027                 chv_disable_pll(dev_priv, pipe);
8028         else
8029                 vlv_disable_pll(dev_priv, pipe);
8030 }
8031
8032 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8033                               struct intel_crtc_state *crtc_state,
8034                               struct dpll *reduced_clock)
8035 {
8036         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8037         u32 dpll;
8038         struct dpll *clock = &crtc_state->dpll;
8039
8040         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8041
8042         dpll = DPLL_VGA_MODE_DIS;
8043
8044         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8045                 dpll |= DPLLB_MODE_LVDS;
8046         else
8047                 dpll |= DPLLB_MODE_DAC_SERIAL;
8048
8049         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8050             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8051                 dpll |= (crtc_state->pixel_multiplier - 1)
8052                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8053         }
8054
8055         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8056             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8057                 dpll |= DPLL_SDVO_HIGH_SPEED;
8058
8059         if (intel_crtc_has_dp_encoder(crtc_state))
8060                 dpll |= DPLL_SDVO_HIGH_SPEED;
8061
8062         /* compute bitmask from p1 value */
8063         if (IS_PINEVIEW(dev_priv))
8064                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8065         else {
8066                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8067                 if (IS_G4X(dev_priv) && reduced_clock)
8068                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8069         }
8070         switch (clock->p2) {
8071         case 5:
8072                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8073                 break;
8074         case 7:
8075                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8076                 break;
8077         case 10:
8078                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8079                 break;
8080         case 14:
8081                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8082                 break;
8083         }
8084         if (INTEL_GEN(dev_priv) >= 4)
8085                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8086
8087         if (crtc_state->sdvo_tv_clock)
8088                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8089         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8090                  intel_panel_use_ssc(dev_priv))
8091                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8092         else
8093                 dpll |= PLL_REF_INPUT_DREFCLK;
8094
8095         dpll |= DPLL_VCO_ENABLE;
8096         crtc_state->dpll_hw_state.dpll = dpll;
8097
8098         if (INTEL_GEN(dev_priv) >= 4) {
8099                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8100                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8101                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8102         }
8103 }
8104
8105 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8106                               struct intel_crtc_state *crtc_state,
8107                               struct dpll *reduced_clock)
8108 {
8109         struct drm_device *dev = crtc->base.dev;
8110         struct drm_i915_private *dev_priv = to_i915(dev);
8111         u32 dpll;
8112         struct dpll *clock = &crtc_state->dpll;
8113
8114         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8115
8116         dpll = DPLL_VGA_MODE_DIS;
8117
8118         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8119                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8120         } else {
8121                 if (clock->p1 == 2)
8122                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8123                 else
8124                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8125                 if (clock->p2 == 4)
8126                         dpll |= PLL_P2_DIVIDE_BY_4;
8127         }
8128
8129         /*
8130          * Bspec:
8131          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8132          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8133          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8134          *  Enable) must be set to “1” in both the DPLL A Control Register
8135          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8136          *
8137          * For simplicity We simply keep both bits always enabled in
8138          * both DPLLS. The spec says we should disable the DVO 2X clock
8139          * when not needed, but this seems to work fine in practice.
8140          */
8141         if (IS_I830(dev_priv) ||
8142             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8143                 dpll |= DPLL_DVO_2X_MODE;
8144
8145         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8146             intel_panel_use_ssc(dev_priv))
8147                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8148         else
8149                 dpll |= PLL_REF_INPUT_DREFCLK;
8150
8151         dpll |= DPLL_VCO_ENABLE;
8152         crtc_state->dpll_hw_state.dpll = dpll;
8153 }
8154
8155 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8156 {
8157         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8158         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8159         enum pipe pipe = crtc->pipe;
8160         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8161         const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
8162         u32 crtc_vtotal, crtc_vblank_end;
8163         int vsyncshift = 0;
8164
8165         /* We need to be careful not to changed the adjusted mode, for otherwise
8166          * the hw state checker will get angry at the mismatch. */
8167         crtc_vtotal = adjusted_mode->crtc_vtotal;
8168         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8169
8170         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8171                 /* the chip adds 2 halflines automatically */
8172                 crtc_vtotal -= 1;
8173                 crtc_vblank_end -= 1;
8174
8175                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8176                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8177                 else
8178                         vsyncshift = adjusted_mode->crtc_hsync_start -
8179                                 adjusted_mode->crtc_htotal / 2;
8180                 if (vsyncshift < 0)
8181                         vsyncshift += adjusted_mode->crtc_htotal;
8182         }
8183
8184         if (INTEL_GEN(dev_priv) > 3)
8185                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8186
8187         I915_WRITE(HTOTAL(cpu_transcoder),
8188                    (adjusted_mode->crtc_hdisplay - 1) |
8189                    ((adjusted_mode->crtc_htotal - 1) << 16));
8190         I915_WRITE(HBLANK(cpu_transcoder),
8191                    (adjusted_mode->crtc_hblank_start - 1) |
8192                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
8193         I915_WRITE(HSYNC(cpu_transcoder),
8194                    (adjusted_mode->crtc_hsync_start - 1) |
8195                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
8196
8197         I915_WRITE(VTOTAL(cpu_transcoder),
8198                    (adjusted_mode->crtc_vdisplay - 1) |
8199                    ((crtc_vtotal - 1) << 16));
8200         I915_WRITE(VBLANK(cpu_transcoder),
8201                    (adjusted_mode->crtc_vblank_start - 1) |
8202                    ((crtc_vblank_end - 1) << 16));
8203         I915_WRITE(VSYNC(cpu_transcoder),
8204                    (adjusted_mode->crtc_vsync_start - 1) |
8205                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
8206
8207         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8208          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8209          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8210          * bits. */
8211         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8212             (pipe == PIPE_B || pipe == PIPE_C))
8213                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8214
8215 }
8216
8217 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8218 {
8219         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8220         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8221         enum pipe pipe = crtc->pipe;
8222
8223         /* pipesrc controls the size that is scaled from, which should
8224          * always be the user's requested size.
8225          */
8226         I915_WRITE(PIPESRC(pipe),
8227                    ((crtc_state->pipe_src_w - 1) << 16) |
8228                    (crtc_state->pipe_src_h - 1));
8229 }
8230
8231 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8232 {
8233         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
8234         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8235
8236         if (IS_GEN(dev_priv, 2))
8237                 return false;
8238
8239         if (INTEL_GEN(dev_priv) >= 9 ||
8240             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8241                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8242         else
8243                 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8244 }
8245
8246 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8247                                    struct intel_crtc_state *pipe_config)
8248 {
8249         struct drm_device *dev = crtc->base.dev;
8250         struct drm_i915_private *dev_priv = to_i915(dev);
8251         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8252         u32 tmp;
8253
8254         tmp = I915_READ(HTOTAL(cpu_transcoder));
8255         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8256         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8257
8258         if (!transcoder_is_dsi(cpu_transcoder)) {
8259                 tmp = I915_READ(HBLANK(cpu_transcoder));
8260                 pipe_config->base.adjusted_mode.crtc_hblank_start =
8261                                                         (tmp & 0xffff) + 1;
8262                 pipe_config->base.adjusted_mode.crtc_hblank_end =
8263                                                 ((tmp >> 16) & 0xffff) + 1;
8264         }
8265         tmp = I915_READ(HSYNC(cpu_transcoder));
8266         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8267         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8268
8269         tmp = I915_READ(VTOTAL(cpu_transcoder));
8270         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8271         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8272
8273         if (!transcoder_is_dsi(cpu_transcoder)) {
8274                 tmp = I915_READ(VBLANK(cpu_transcoder));
8275                 pipe_config->base.adjusted_mode.crtc_vblank_start =
8276                                                         (tmp & 0xffff) + 1;
8277                 pipe_config->base.adjusted_mode.crtc_vblank_end =
8278                                                 ((tmp >> 16) & 0xffff) + 1;
8279         }
8280         tmp = I915_READ(VSYNC(cpu_transcoder));
8281         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8282         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8283
8284         if (intel_pipe_is_interlaced(pipe_config)) {
8285                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8286                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
8287                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
8288         }
8289 }
8290
8291 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8292                                     struct intel_crtc_state *pipe_config)
8293 {
8294         struct drm_device *dev = crtc->base.dev;
8295         struct drm_i915_private *dev_priv = to_i915(dev);
8296         u32 tmp;
8297
8298         tmp = I915_READ(PIPESRC(crtc->pipe));
8299         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8300         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8301
8302         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
8303         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
8304 }
8305
8306 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8307                                  struct intel_crtc_state *pipe_config)
8308 {
8309         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
8310         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
8311         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
8312         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
8313
8314         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
8315         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
8316         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
8317         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
8318
8319         mode->flags = pipe_config->base.adjusted_mode.flags;
8320         mode->type = DRM_MODE_TYPE_DRIVER;
8321
8322         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
8323
8324         mode->hsync = drm_mode_hsync(mode);
8325         mode->vrefresh = drm_mode_vrefresh(mode);
8326         drm_mode_set_name(mode);
8327 }
8328
8329 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8330 {
8331         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8332         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8333         u32 pipeconf;
8334
8335         pipeconf = 0;
8336
8337         /* we keep both pipes enabled on 830 */
8338         if (IS_I830(dev_priv))
8339                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8340
8341         if (crtc_state->double_wide)
8342                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8343
8344         /* only g4x and later have fancy bpc/dither controls */
8345         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8346             IS_CHERRYVIEW(dev_priv)) {
8347                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8348                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8349                         pipeconf |= PIPECONF_DITHER_EN |
8350                                     PIPECONF_DITHER_TYPE_SP;
8351
8352                 switch (crtc_state->pipe_bpp) {
8353                 case 18:
8354                         pipeconf |= PIPECONF_6BPC;
8355                         break;
8356                 case 24:
8357                         pipeconf |= PIPECONF_8BPC;
8358                         break;
8359                 case 30:
8360                         pipeconf |= PIPECONF_10BPC;
8361                         break;
8362                 default:
8363                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8364                         BUG();
8365                 }
8366         }
8367
8368         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8369                 if (INTEL_GEN(dev_priv) < 4 ||
8370                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8371                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8372                 else
8373                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8374         } else {
8375                 pipeconf |= PIPECONF_PROGRESSIVE;
8376         }
8377
8378         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8379              crtc_state->limited_color_range)
8380                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8381
8382         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8383
8384         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8385         POSTING_READ(PIPECONF(crtc->pipe));
8386 }
8387
8388 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8389                                    struct intel_crtc_state *crtc_state)
8390 {
8391         struct drm_device *dev = crtc->base.dev;
8392         struct drm_i915_private *dev_priv = to_i915(dev);
8393         const struct intel_limit *limit;
8394         int refclk = 48000;
8395
8396         memset(&crtc_state->dpll_hw_state, 0,
8397                sizeof(crtc_state->dpll_hw_state));
8398
8399         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8400                 if (intel_panel_use_ssc(dev_priv)) {
8401                         refclk = dev_priv->vbt.lvds_ssc_freq;
8402                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8403                 }
8404
8405                 limit = &intel_limits_i8xx_lvds;
8406         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8407                 limit = &intel_limits_i8xx_dvo;
8408         } else {
8409                 limit = &intel_limits_i8xx_dac;
8410         }
8411
8412         if (!crtc_state->clock_set &&
8413             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8414                                  refclk, NULL, &crtc_state->dpll)) {
8415                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8416                 return -EINVAL;
8417         }
8418
8419         i8xx_compute_dpll(crtc, crtc_state, NULL);
8420
8421         return 0;
8422 }
8423
8424 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8425                                   struct intel_crtc_state *crtc_state)
8426 {
8427         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8428         const struct intel_limit *limit;
8429         int refclk = 96000;
8430
8431         memset(&crtc_state->dpll_hw_state, 0,
8432                sizeof(crtc_state->dpll_hw_state));
8433
8434         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8435                 if (intel_panel_use_ssc(dev_priv)) {
8436                         refclk = dev_priv->vbt.lvds_ssc_freq;
8437                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8438                 }
8439
8440                 if (intel_is_dual_link_lvds(dev_priv))
8441                         limit = &intel_limits_g4x_dual_channel_lvds;
8442                 else
8443                         limit = &intel_limits_g4x_single_channel_lvds;
8444         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8445                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8446                 limit = &intel_limits_g4x_hdmi;
8447         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8448                 limit = &intel_limits_g4x_sdvo;
8449         } else {
8450                 /* The option is for other outputs */
8451                 limit = &intel_limits_i9xx_sdvo;
8452         }
8453
8454         if (!crtc_state->clock_set &&
8455             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8456                                 refclk, NULL, &crtc_state->dpll)) {
8457                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8458                 return -EINVAL;
8459         }
8460
8461         i9xx_compute_dpll(crtc, crtc_state, NULL);
8462
8463         return 0;
8464 }
8465
8466 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8467                                   struct intel_crtc_state *crtc_state)
8468 {
8469         struct drm_device *dev = crtc->base.dev;
8470         struct drm_i915_private *dev_priv = to_i915(dev);
8471         const struct intel_limit *limit;
8472         int refclk = 96000;
8473
8474         memset(&crtc_state->dpll_hw_state, 0,
8475                sizeof(crtc_state->dpll_hw_state));
8476
8477         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8478                 if (intel_panel_use_ssc(dev_priv)) {
8479                         refclk = dev_priv->vbt.lvds_ssc_freq;
8480                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8481                 }
8482
8483                 limit = &intel_limits_pineview_lvds;
8484         } else {
8485                 limit = &intel_limits_pineview_sdvo;
8486         }
8487
8488         if (!crtc_state->clock_set &&
8489             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8490                                 refclk, NULL, &crtc_state->dpll)) {
8491                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8492                 return -EINVAL;
8493         }
8494
8495         i9xx_compute_dpll(crtc, crtc_state, NULL);
8496
8497         return 0;
8498 }
8499
8500 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8501                                    struct intel_crtc_state *crtc_state)
8502 {
8503         struct drm_device *dev = crtc->base.dev;
8504         struct drm_i915_private *dev_priv = to_i915(dev);
8505         const struct intel_limit *limit;
8506         int refclk = 96000;
8507
8508         memset(&crtc_state->dpll_hw_state, 0,
8509                sizeof(crtc_state->dpll_hw_state));
8510
8511         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8512                 if (intel_panel_use_ssc(dev_priv)) {
8513                         refclk = dev_priv->vbt.lvds_ssc_freq;
8514                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8515                 }
8516
8517                 limit = &intel_limits_i9xx_lvds;
8518         } else {
8519                 limit = &intel_limits_i9xx_sdvo;
8520         }
8521
8522         if (!crtc_state->clock_set &&
8523             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8524                                  refclk, NULL, &crtc_state->dpll)) {
8525                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8526                 return -EINVAL;
8527         }
8528
8529         i9xx_compute_dpll(crtc, crtc_state, NULL);
8530
8531         return 0;
8532 }
8533
8534 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8535                                   struct intel_crtc_state *crtc_state)
8536 {
8537         int refclk = 100000;
8538         const struct intel_limit *limit = &intel_limits_chv;
8539
8540         memset(&crtc_state->dpll_hw_state, 0,
8541                sizeof(crtc_state->dpll_hw_state));
8542
8543         if (!crtc_state->clock_set &&
8544             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8545                                 refclk, NULL, &crtc_state->dpll)) {
8546                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8547                 return -EINVAL;
8548         }
8549
8550         chv_compute_dpll(crtc, crtc_state);
8551
8552         return 0;
8553 }
8554
8555 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8556                                   struct intel_crtc_state *crtc_state)
8557 {
8558         int refclk = 100000;
8559         const struct intel_limit *limit = &intel_limits_vlv;
8560
8561         memset(&crtc_state->dpll_hw_state, 0,
8562                sizeof(crtc_state->dpll_hw_state));
8563
8564         if (!crtc_state->clock_set &&
8565             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8566                                 refclk, NULL, &crtc_state->dpll)) {
8567                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8568                 return -EINVAL;
8569         }
8570
8571         vlv_compute_dpll(crtc, crtc_state);
8572
8573         return 0;
8574 }
8575
8576 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8577 {
8578         if (IS_I830(dev_priv))
8579                 return false;
8580
8581         return INTEL_GEN(dev_priv) >= 4 ||
8582                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8583 }
8584
8585 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8586                                  struct intel_crtc_state *pipe_config)
8587 {
8588         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8589         u32 tmp;
8590
8591         if (!i9xx_has_pfit(dev_priv))
8592                 return;
8593
8594         tmp = I915_READ(PFIT_CONTROL);
8595         if (!(tmp & PFIT_ENABLE))
8596                 return;
8597
8598         /* Check whether the pfit is attached to our pipe. */
8599         if (INTEL_GEN(dev_priv) < 4) {
8600                 if (crtc->pipe != PIPE_B)
8601                         return;
8602         } else {
8603                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8604                         return;
8605         }
8606
8607         pipe_config->gmch_pfit.control = tmp;
8608         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8609 }
8610
8611 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8612                                struct intel_crtc_state *pipe_config)
8613 {
8614         struct drm_device *dev = crtc->base.dev;
8615         struct drm_i915_private *dev_priv = to_i915(dev);
8616         enum pipe pipe = crtc->pipe;
8617         struct dpll clock;
8618         u32 mdiv;
8619         int refclk = 100000;
8620
8621         /* In case of DSI, DPLL will not be used */
8622         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8623                 return;
8624
8625         vlv_dpio_get(dev_priv);
8626         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8627         vlv_dpio_put(dev_priv);
8628
8629         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8630         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8631         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8632         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8633         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8634
8635         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8636 }
8637
8638 static void
8639 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8640                               struct intel_initial_plane_config *plane_config)
8641 {
8642         struct drm_device *dev = crtc->base.dev;
8643         struct drm_i915_private *dev_priv = to_i915(dev);
8644         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8645         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8646         enum pipe pipe;
8647         u32 val, base, offset;
8648         int fourcc, pixel_format;
8649         unsigned int aligned_height;
8650         struct drm_framebuffer *fb;
8651         struct intel_framebuffer *intel_fb;
8652
8653         if (!plane->get_hw_state(plane, &pipe))
8654                 return;
8655
8656         WARN_ON(pipe != crtc->pipe);
8657
8658         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8659         if (!intel_fb) {
8660                 DRM_DEBUG_KMS("failed to alloc fb\n");
8661                 return;
8662         }
8663
8664         fb = &intel_fb->base;
8665
8666         fb->dev = dev;
8667
8668         val = I915_READ(DSPCNTR(i9xx_plane));
8669
8670         if (INTEL_GEN(dev_priv) >= 4) {
8671                 if (val & DISPPLANE_TILED) {
8672                         plane_config->tiling = I915_TILING_X;
8673                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8674                 }
8675
8676                 if (val & DISPPLANE_ROTATE_180)
8677                         plane_config->rotation = DRM_MODE_ROTATE_180;
8678         }
8679
8680         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8681             val & DISPPLANE_MIRROR)
8682                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8683
8684         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8685         fourcc = i9xx_format_to_fourcc(pixel_format);
8686         fb->format = drm_format_info(fourcc);
8687
8688         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8689                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8690                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8691         } else if (INTEL_GEN(dev_priv) >= 4) {
8692                 if (plane_config->tiling)
8693                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8694                 else
8695                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8696                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8697         } else {
8698                 base = I915_READ(DSPADDR(i9xx_plane));
8699         }
8700         plane_config->base = base;
8701
8702         val = I915_READ(PIPESRC(pipe));
8703         fb->width = ((val >> 16) & 0xfff) + 1;
8704         fb->height = ((val >> 0) & 0xfff) + 1;
8705
8706         val = I915_READ(DSPSTRIDE(i9xx_plane));
8707         fb->pitches[0] = val & 0xffffffc0;
8708
8709         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8710
8711         plane_config->size = fb->pitches[0] * aligned_height;
8712
8713         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8714                       crtc->base.name, plane->base.name, fb->width, fb->height,
8715                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8716                       plane_config->size);
8717
8718         plane_config->fb = intel_fb;
8719 }
8720
8721 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8722                                struct intel_crtc_state *pipe_config)
8723 {
8724         struct drm_device *dev = crtc->base.dev;
8725         struct drm_i915_private *dev_priv = to_i915(dev);
8726         enum pipe pipe = crtc->pipe;
8727         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8728         struct dpll clock;
8729         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8730         int refclk = 100000;
8731
8732         /* In case of DSI, DPLL will not be used */
8733         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8734                 return;
8735
8736         vlv_dpio_get(dev_priv);
8737         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8738         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8739         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8740         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8741         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8742         vlv_dpio_put(dev_priv);
8743
8744         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8745         clock.m2 = (pll_dw0 & 0xff) << 22;
8746         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8747                 clock.m2 |= pll_dw2 & 0x3fffff;
8748         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8749         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8750         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8751
8752         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8753 }
8754
8755 static enum intel_output_format
8756 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
8757 {
8758         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8759         u32 tmp;
8760
8761         tmp = I915_READ(PIPEMISC(crtc->pipe));
8762
8763         if (tmp & PIPEMISC_YUV420_ENABLE) {
8764                 /* We support 4:2:0 in full blend mode only */
8765                 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
8766
8767                 return INTEL_OUTPUT_FORMAT_YCBCR420;
8768         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8769                 return INTEL_OUTPUT_FORMAT_YCBCR444;
8770         } else {
8771                 return INTEL_OUTPUT_FORMAT_RGB;
8772         }
8773 }
8774
8775 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8776 {
8777         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8778         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8779         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8780         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8781         u32 tmp;
8782
8783         tmp = I915_READ(DSPCNTR(i9xx_plane));
8784
8785         if (tmp & DISPPLANE_GAMMA_ENABLE)
8786                 crtc_state->gamma_enable = true;
8787
8788         if (!HAS_GMCH(dev_priv) &&
8789             tmp & DISPPLANE_PIPE_CSC_ENABLE)
8790                 crtc_state->csc_enable = true;
8791 }
8792
8793 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8794                                  struct intel_crtc_state *pipe_config)
8795 {
8796         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8797         enum intel_display_power_domain power_domain;
8798         intel_wakeref_t wakeref;
8799         u32 tmp;
8800         bool ret;
8801
8802         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8803         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8804         if (!wakeref)
8805                 return false;
8806
8807         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8808         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8809         pipe_config->shared_dpll = NULL;
8810
8811         ret = false;
8812
8813         tmp = I915_READ(PIPECONF(crtc->pipe));
8814         if (!(tmp & PIPECONF_ENABLE))
8815                 goto out;
8816
8817         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8818             IS_CHERRYVIEW(dev_priv)) {
8819                 switch (tmp & PIPECONF_BPC_MASK) {
8820                 case PIPECONF_6BPC:
8821                         pipe_config->pipe_bpp = 18;
8822                         break;
8823                 case PIPECONF_8BPC:
8824                         pipe_config->pipe_bpp = 24;
8825                         break;
8826                 case PIPECONF_10BPC:
8827                         pipe_config->pipe_bpp = 30;
8828                         break;
8829                 default:
8830                         break;
8831                 }
8832         }
8833
8834         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8835             (tmp & PIPECONF_COLOR_RANGE_SELECT))
8836                 pipe_config->limited_color_range = true;
8837
8838         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8839                 PIPECONF_GAMMA_MODE_SHIFT;
8840
8841         if (IS_CHERRYVIEW(dev_priv))
8842                 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8843
8844         i9xx_get_pipe_color_config(pipe_config);
8845         intel_color_get_config(pipe_config);
8846
8847         if (INTEL_GEN(dev_priv) < 4)
8848                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8849
8850         intel_get_pipe_timings(crtc, pipe_config);
8851         intel_get_pipe_src_size(crtc, pipe_config);
8852
8853         i9xx_get_pfit_config(crtc, pipe_config);
8854
8855         if (INTEL_GEN(dev_priv) >= 4) {
8856                 /* No way to read it out on pipes B and C */
8857                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8858                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
8859                 else
8860                         tmp = I915_READ(DPLL_MD(crtc->pipe));
8861                 pipe_config->pixel_multiplier =
8862                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8863                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8864                 pipe_config->dpll_hw_state.dpll_md = tmp;
8865         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8866                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8867                 tmp = I915_READ(DPLL(crtc->pipe));
8868                 pipe_config->pixel_multiplier =
8869                         ((tmp & SDVO_MULTIPLIER_MASK)
8870                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8871         } else {
8872                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8873                  * port and will be fixed up in the encoder->get_config
8874                  * function. */
8875                 pipe_config->pixel_multiplier = 1;
8876         }
8877         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8878         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8879                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8880                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8881         } else {
8882                 /* Mask out read-only status bits. */
8883                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8884                                                      DPLL_PORTC_READY_MASK |
8885                                                      DPLL_PORTB_READY_MASK);
8886         }
8887
8888         if (IS_CHERRYVIEW(dev_priv))
8889                 chv_crtc_clock_get(crtc, pipe_config);
8890         else if (IS_VALLEYVIEW(dev_priv))
8891                 vlv_crtc_clock_get(crtc, pipe_config);
8892         else
8893                 i9xx_crtc_clock_get(crtc, pipe_config);
8894
8895         /*
8896          * Normally the dotclock is filled in by the encoder .get_config()
8897          * but in case the pipe is enabled w/o any ports we need a sane
8898          * default.
8899          */
8900         pipe_config->base.adjusted_mode.crtc_clock =
8901                 pipe_config->port_clock / pipe_config->pixel_multiplier;
8902
8903         ret = true;
8904
8905 out:
8906         intel_display_power_put(dev_priv, power_domain, wakeref);
8907
8908         return ret;
8909 }
8910
8911 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8912 {
8913         struct intel_encoder *encoder;
8914         int i;
8915         u32 val, final;
8916         bool has_lvds = false;
8917         bool has_cpu_edp = false;
8918         bool has_panel = false;
8919         bool has_ck505 = false;
8920         bool can_ssc = false;
8921         bool using_ssc_source = false;
8922
8923         /* We need to take the global config into account */
8924         for_each_intel_encoder(&dev_priv->drm, encoder) {
8925                 switch (encoder->type) {
8926                 case INTEL_OUTPUT_LVDS:
8927                         has_panel = true;
8928                         has_lvds = true;
8929                         break;
8930                 case INTEL_OUTPUT_EDP:
8931                         has_panel = true;
8932                         if (encoder->port == PORT_A)
8933                                 has_cpu_edp = true;
8934                         break;
8935                 default:
8936                         break;
8937                 }
8938         }
8939
8940         if (HAS_PCH_IBX(dev_priv)) {
8941                 has_ck505 = dev_priv->vbt.display_clock_mode;
8942                 can_ssc = has_ck505;
8943         } else {
8944                 has_ck505 = false;
8945                 can_ssc = true;
8946         }
8947
8948         /* Check if any DPLLs are using the SSC source */
8949         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8950                 u32 temp = I915_READ(PCH_DPLL(i));
8951
8952                 if (!(temp & DPLL_VCO_ENABLE))
8953                         continue;
8954
8955                 if ((temp & PLL_REF_INPUT_MASK) ==
8956                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8957                         using_ssc_source = true;
8958                         break;
8959                 }
8960         }
8961
8962         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8963                       has_panel, has_lvds, has_ck505, using_ssc_source);
8964
8965         /* Ironlake: try to setup display ref clock before DPLL
8966          * enabling. This is only under driver's control after
8967          * PCH B stepping, previous chipset stepping should be
8968          * ignoring this setting.
8969          */
8970         val = I915_READ(PCH_DREF_CONTROL);
8971
8972         /* As we must carefully and slowly disable/enable each source in turn,
8973          * compute the final state we want first and check if we need to
8974          * make any changes at all.
8975          */
8976         final = val;
8977         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8978         if (has_ck505)
8979                 final |= DREF_NONSPREAD_CK505_ENABLE;
8980         else
8981                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8982
8983         final &= ~DREF_SSC_SOURCE_MASK;
8984         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8985         final &= ~DREF_SSC1_ENABLE;
8986
8987         if (has_panel) {
8988                 final |= DREF_SSC_SOURCE_ENABLE;
8989
8990                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8991                         final |= DREF_SSC1_ENABLE;
8992
8993                 if (has_cpu_edp) {
8994                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8995                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8996                         else
8997                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8998                 } else
8999                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9000         } else if (using_ssc_source) {
9001                 final |= DREF_SSC_SOURCE_ENABLE;
9002                 final |= DREF_SSC1_ENABLE;
9003         }
9004
9005         if (final == val)
9006                 return;
9007
9008         /* Always enable nonspread source */
9009         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9010
9011         if (has_ck505)
9012                 val |= DREF_NONSPREAD_CK505_ENABLE;
9013         else
9014                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9015
9016         if (has_panel) {
9017                 val &= ~DREF_SSC_SOURCE_MASK;
9018                 val |= DREF_SSC_SOURCE_ENABLE;
9019
9020                 /* SSC must be turned on before enabling the CPU output  */
9021                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9022                         DRM_DEBUG_KMS("Using SSC on panel\n");
9023                         val |= DREF_SSC1_ENABLE;
9024                 } else
9025                         val &= ~DREF_SSC1_ENABLE;
9026
9027                 /* Get SSC going before enabling the outputs */
9028                 I915_WRITE(PCH_DREF_CONTROL, val);
9029                 POSTING_READ(PCH_DREF_CONTROL);
9030                 udelay(200);
9031
9032                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9033
9034                 /* Enable CPU source on CPU attached eDP */
9035                 if (has_cpu_edp) {
9036                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9037                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
9038                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9039                         } else
9040                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9041                 } else
9042                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9043
9044                 I915_WRITE(PCH_DREF_CONTROL, val);
9045                 POSTING_READ(PCH_DREF_CONTROL);
9046                 udelay(200);
9047         } else {
9048                 DRM_DEBUG_KMS("Disabling CPU source output\n");
9049
9050                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9051
9052                 /* Turn off CPU output */
9053                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9054
9055                 I915_WRITE(PCH_DREF_CONTROL, val);
9056                 POSTING_READ(PCH_DREF_CONTROL);
9057                 udelay(200);
9058
9059                 if (!using_ssc_source) {
9060                         DRM_DEBUG_KMS("Disabling SSC source\n");
9061
9062                         /* Turn off the SSC source */
9063                         val &= ~DREF_SSC_SOURCE_MASK;
9064                         val |= DREF_SSC_SOURCE_DISABLE;
9065
9066                         /* Turn off SSC1 */
9067                         val &= ~DREF_SSC1_ENABLE;
9068
9069                         I915_WRITE(PCH_DREF_CONTROL, val);
9070                         POSTING_READ(PCH_DREF_CONTROL);
9071                         udelay(200);
9072                 }
9073         }
9074
9075         BUG_ON(val != final);
9076 }
9077
9078 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9079 {
9080         u32 tmp;
9081
9082         tmp = I915_READ(SOUTH_CHICKEN2);
9083         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9084         I915_WRITE(SOUTH_CHICKEN2, tmp);
9085
9086         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9087                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9088                 DRM_ERROR("FDI mPHY reset assert timeout\n");
9089
9090         tmp = I915_READ(SOUTH_CHICKEN2);
9091         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9092         I915_WRITE(SOUTH_CHICKEN2, tmp);
9093
9094         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9095                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9096                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9097 }
9098
9099 /* WaMPhyProgramming:hsw */
9100 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9101 {
9102         u32 tmp;
9103
9104         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9105         tmp &= ~(0xFF << 24);
9106         tmp |= (0x12 << 24);
9107         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9108
9109         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9110         tmp |= (1 << 11);
9111         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9112
9113         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9114         tmp |= (1 << 11);
9115         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9116
9117         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9118         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9119         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9120
9121         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9122         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9123         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9124
9125         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9126         tmp &= ~(7 << 13);
9127         tmp |= (5 << 13);
9128         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9129
9130         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9131         tmp &= ~(7 << 13);
9132         tmp |= (5 << 13);
9133         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9134
9135         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9136         tmp &= ~0xFF;
9137         tmp |= 0x1C;
9138         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9139
9140         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9141         tmp &= ~0xFF;
9142         tmp |= 0x1C;
9143         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9144
9145         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9146         tmp &= ~(0xFF << 16);
9147         tmp |= (0x1C << 16);
9148         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9149
9150         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9151         tmp &= ~(0xFF << 16);
9152         tmp |= (0x1C << 16);
9153         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9154
9155         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9156         tmp |= (1 << 27);
9157         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9158
9159         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9160         tmp |= (1 << 27);
9161         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9162
9163         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9164         tmp &= ~(0xF << 28);
9165         tmp |= (4 << 28);
9166         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9167
9168         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9169         tmp &= ~(0xF << 28);
9170         tmp |= (4 << 28);
9171         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9172 }
9173
9174 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9175  * Programming" based on the parameters passed:
9176  * - Sequence to enable CLKOUT_DP
9177  * - Sequence to enable CLKOUT_DP without spread
9178  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9179  */
9180 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9181                                  bool with_spread, bool with_fdi)
9182 {
9183         u32 reg, tmp;
9184
9185         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9186                 with_spread = true;
9187         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9188             with_fdi, "LP PCH doesn't have FDI\n"))
9189                 with_fdi = false;
9190
9191         mutex_lock(&dev_priv->sb_lock);
9192
9193         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9194         tmp &= ~SBI_SSCCTL_DISABLE;
9195         tmp |= SBI_SSCCTL_PATHALT;
9196         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9197
9198         udelay(24);
9199
9200         if (with_spread) {
9201                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9202                 tmp &= ~SBI_SSCCTL_PATHALT;
9203                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9204
9205                 if (with_fdi) {
9206                         lpt_reset_fdi_mphy(dev_priv);
9207                         lpt_program_fdi_mphy(dev_priv);
9208                 }
9209         }
9210
9211         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9212         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9213         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9214         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9215
9216         mutex_unlock(&dev_priv->sb_lock);
9217 }
9218
9219 /* Sequence to disable CLKOUT_DP */
9220 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9221 {
9222         u32 reg, tmp;
9223
9224         mutex_lock(&dev_priv->sb_lock);
9225
9226         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9227         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9228         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9229         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9230
9231         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9232         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9233                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9234                         tmp |= SBI_SSCCTL_PATHALT;
9235                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9236                         udelay(32);
9237                 }
9238                 tmp |= SBI_SSCCTL_DISABLE;
9239                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9240         }
9241
9242         mutex_unlock(&dev_priv->sb_lock);
9243 }
9244
9245 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9246
9247 static const u16 sscdivintphase[] = {
9248         [BEND_IDX( 50)] = 0x3B23,
9249         [BEND_IDX( 45)] = 0x3B23,
9250         [BEND_IDX( 40)] = 0x3C23,
9251         [BEND_IDX( 35)] = 0x3C23,
9252         [BEND_IDX( 30)] = 0x3D23,
9253         [BEND_IDX( 25)] = 0x3D23,
9254         [BEND_IDX( 20)] = 0x3E23,
9255         [BEND_IDX( 15)] = 0x3E23,
9256         [BEND_IDX( 10)] = 0x3F23,
9257         [BEND_IDX(  5)] = 0x3F23,
9258         [BEND_IDX(  0)] = 0x0025,
9259         [BEND_IDX( -5)] = 0x0025,
9260         [BEND_IDX(-10)] = 0x0125,
9261         [BEND_IDX(-15)] = 0x0125,
9262         [BEND_IDX(-20)] = 0x0225,
9263         [BEND_IDX(-25)] = 0x0225,
9264         [BEND_IDX(-30)] = 0x0325,
9265         [BEND_IDX(-35)] = 0x0325,
9266         [BEND_IDX(-40)] = 0x0425,
9267         [BEND_IDX(-45)] = 0x0425,
9268         [BEND_IDX(-50)] = 0x0525,
9269 };
9270
9271 /*
9272  * Bend CLKOUT_DP
9273  * steps -50 to 50 inclusive, in steps of 5
9274  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9275  * change in clock period = -(steps / 10) * 5.787 ps
9276  */
9277 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9278 {
9279         u32 tmp;
9280         int idx = BEND_IDX(steps);
9281
9282         if (WARN_ON(steps % 5 != 0))
9283                 return;
9284
9285         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9286                 return;
9287
9288         mutex_lock(&dev_priv->sb_lock);
9289
9290         if (steps % 10 != 0)
9291                 tmp = 0xAAAAAAAB;
9292         else
9293                 tmp = 0x00000000;
9294         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9295
9296         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9297         tmp &= 0xffff0000;
9298         tmp |= sscdivintphase[idx];
9299         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9300
9301         mutex_unlock(&dev_priv->sb_lock);
9302 }
9303
9304 #undef BEND_IDX
9305
9306 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9307 {
9308         u32 fuse_strap = I915_READ(FUSE_STRAP);
9309         u32 ctl = I915_READ(SPLL_CTL);
9310
9311         if ((ctl & SPLL_PLL_ENABLE) == 0)
9312                 return false;
9313
9314         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9315             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9316                 return true;
9317
9318         if (IS_BROADWELL(dev_priv) &&
9319             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9320                 return true;
9321
9322         return false;
9323 }
9324
9325 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9326                                enum intel_dpll_id id)
9327 {
9328         u32 fuse_strap = I915_READ(FUSE_STRAP);
9329         u32 ctl = I915_READ(WRPLL_CTL(id));
9330
9331         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9332                 return false;
9333
9334         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9335                 return true;
9336
9337         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9338             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9339             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9340                 return true;
9341
9342         return false;
9343 }
9344
9345 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9346 {
9347         struct intel_encoder *encoder;
9348         bool pch_ssc_in_use = false;
9349         bool has_fdi = false;
9350
9351         for_each_intel_encoder(&dev_priv->drm, encoder) {
9352                 switch (encoder->type) {
9353                 case INTEL_OUTPUT_ANALOG:
9354                         has_fdi = true;
9355                         break;
9356                 default:
9357                         break;
9358                 }
9359         }
9360
9361         /*
9362          * The BIOS may have decided to use the PCH SSC
9363          * reference so we must not disable it until the
9364          * relevant PLLs have stopped relying on it. We'll
9365          * just leave the PCH SSC reference enabled in case
9366          * any active PLL is using it. It will get disabled
9367          * after runtime suspend if we don't have FDI.
9368          *
9369          * TODO: Move the whole reference clock handling
9370          * to the modeset sequence proper so that we can
9371          * actually enable/disable/reconfigure these things
9372          * safely. To do that we need to introduce a real
9373          * clock hierarchy. That would also allow us to do
9374          * clock bending finally.
9375          */
9376         if (spll_uses_pch_ssc(dev_priv)) {
9377                 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9378                 pch_ssc_in_use = true;
9379         }
9380
9381         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9382                 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9383                 pch_ssc_in_use = true;
9384         }
9385
9386         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9387                 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9388                 pch_ssc_in_use = true;
9389         }
9390
9391         if (pch_ssc_in_use)
9392                 return;
9393
9394         if (has_fdi) {
9395                 lpt_bend_clkout_dp(dev_priv, 0);
9396                 lpt_enable_clkout_dp(dev_priv, true, true);
9397         } else {
9398                 lpt_disable_clkout_dp(dev_priv);
9399         }
9400 }
9401
9402 /*
9403  * Initialize reference clocks when the driver loads
9404  */
9405 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9406 {
9407         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9408                 ironlake_init_pch_refclk(dev_priv);
9409         else if (HAS_PCH_LPT(dev_priv))
9410                 lpt_init_pch_refclk(dev_priv);
9411 }
9412
9413 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9414 {
9415         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9416         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9417         enum pipe pipe = crtc->pipe;
9418         u32 val;
9419
9420         val = 0;
9421
9422         switch (crtc_state->pipe_bpp) {
9423         case 18:
9424                 val |= PIPECONF_6BPC;
9425                 break;
9426         case 24:
9427                 val |= PIPECONF_8BPC;
9428                 break;
9429         case 30:
9430                 val |= PIPECONF_10BPC;
9431                 break;
9432         case 36:
9433                 val |= PIPECONF_12BPC;
9434                 break;
9435         default:
9436                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9437                 BUG();
9438         }
9439
9440         if (crtc_state->dither)
9441                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9442
9443         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9444                 val |= PIPECONF_INTERLACED_ILK;
9445         else
9446                 val |= PIPECONF_PROGRESSIVE;
9447
9448         /*
9449          * This would end up with an odd purple hue over
9450          * the entire display. Make sure we don't do it.
9451          */
9452         WARN_ON(crtc_state->limited_color_range &&
9453                 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9454
9455         if (crtc_state->limited_color_range)
9456                 val |= PIPECONF_COLOR_RANGE_SELECT;
9457
9458         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9459                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9460
9461         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9462
9463         I915_WRITE(PIPECONF(pipe), val);
9464         POSTING_READ(PIPECONF(pipe));
9465 }
9466
9467 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9468 {
9469         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9470         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9471         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9472         u32 val = 0;
9473
9474         if (IS_HASWELL(dev_priv) && crtc_state->dither)
9475                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9476
9477         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9478                 val |= PIPECONF_INTERLACED_ILK;
9479         else
9480                 val |= PIPECONF_PROGRESSIVE;
9481
9482         if (IS_HASWELL(dev_priv) &&
9483             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9484                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9485
9486         I915_WRITE(PIPECONF(cpu_transcoder), val);
9487         POSTING_READ(PIPECONF(cpu_transcoder));
9488 }
9489
9490 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9491 {
9492         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9493         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9494         u32 val = 0;
9495
9496         switch (crtc_state->pipe_bpp) {
9497         case 18:
9498                 val |= PIPEMISC_DITHER_6_BPC;
9499                 break;
9500         case 24:
9501                 val |= PIPEMISC_DITHER_8_BPC;
9502                 break;
9503         case 30:
9504                 val |= PIPEMISC_DITHER_10_BPC;
9505                 break;
9506         case 36:
9507                 val |= PIPEMISC_DITHER_12_BPC;
9508                 break;
9509         default:
9510                 MISSING_CASE(crtc_state->pipe_bpp);
9511                 break;
9512         }
9513
9514         if (crtc_state->dither)
9515                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9516
9517         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9518             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9519                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9520
9521         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9522                 val |= PIPEMISC_YUV420_ENABLE |
9523                         PIPEMISC_YUV420_MODE_FULL_BLEND;
9524
9525         if (INTEL_GEN(dev_priv) >= 11 &&
9526             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9527                                            BIT(PLANE_CURSOR))) == 0)
9528                 val |= PIPEMISC_HDR_MODE_PRECISION;
9529
9530         I915_WRITE(PIPEMISC(crtc->pipe), val);
9531 }
9532
9533 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9534 {
9535         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9536         u32 tmp;
9537
9538         tmp = I915_READ(PIPEMISC(crtc->pipe));
9539
9540         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9541         case PIPEMISC_DITHER_6_BPC:
9542                 return 18;
9543         case PIPEMISC_DITHER_8_BPC:
9544                 return 24;
9545         case PIPEMISC_DITHER_10_BPC:
9546                 return 30;
9547         case PIPEMISC_DITHER_12_BPC:
9548                 return 36;
9549         default:
9550                 MISSING_CASE(tmp);
9551                 return 0;
9552         }
9553 }
9554
9555 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9556 {
9557         /*
9558          * Account for spread spectrum to avoid
9559          * oversubscribing the link. Max center spread
9560          * is 2.5%; use 5% for safety's sake.
9561          */
9562         u32 bps = target_clock * bpp * 21 / 20;
9563         return DIV_ROUND_UP(bps, link_bw * 8);
9564 }
9565
9566 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9567 {
9568         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9569 }
9570
9571 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9572                                   struct intel_crtc_state *crtc_state,
9573                                   struct dpll *reduced_clock)
9574 {
9575         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9576         u32 dpll, fp, fp2;
9577         int factor;
9578
9579         /* Enable autotuning of the PLL clock (if permissible) */
9580         factor = 21;
9581         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9582                 if ((intel_panel_use_ssc(dev_priv) &&
9583                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
9584                     (HAS_PCH_IBX(dev_priv) &&
9585                      intel_is_dual_link_lvds(dev_priv)))
9586                         factor = 25;
9587         } else if (crtc_state->sdvo_tv_clock) {
9588                 factor = 20;
9589         }
9590
9591         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9592
9593         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9594                 fp |= FP_CB_TUNE;
9595
9596         if (reduced_clock) {
9597                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9598
9599                 if (reduced_clock->m < factor * reduced_clock->n)
9600                         fp2 |= FP_CB_TUNE;
9601         } else {
9602                 fp2 = fp;
9603         }
9604
9605         dpll = 0;
9606
9607         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9608                 dpll |= DPLLB_MODE_LVDS;
9609         else
9610                 dpll |= DPLLB_MODE_DAC_SERIAL;
9611
9612         dpll |= (crtc_state->pixel_multiplier - 1)
9613                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9614
9615         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9616             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9617                 dpll |= DPLL_SDVO_HIGH_SPEED;
9618
9619         if (intel_crtc_has_dp_encoder(crtc_state))
9620                 dpll |= DPLL_SDVO_HIGH_SPEED;
9621
9622         /*
9623          * The high speed IO clock is only really required for
9624          * SDVO/HDMI/DP, but we also enable it for CRT to make it
9625          * possible to share the DPLL between CRT and HDMI. Enabling
9626          * the clock needlessly does no real harm, except use up a
9627          * bit of power potentially.
9628          *
9629          * We'll limit this to IVB with 3 pipes, since it has only two
9630          * DPLLs and so DPLL sharing is the only way to get three pipes
9631          * driving PCH ports at the same time. On SNB we could do this,
9632          * and potentially avoid enabling the second DPLL, but it's not
9633          * clear if it''s a win or loss power wise. No point in doing
9634          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9635          */
9636         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
9637             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9638                 dpll |= DPLL_SDVO_HIGH_SPEED;
9639
9640         /* compute bitmask from p1 value */
9641         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9642         /* also FPA1 */
9643         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9644
9645         switch (crtc_state->dpll.p2) {
9646         case 5:
9647                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9648                 break;
9649         case 7:
9650                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9651                 break;
9652         case 10:
9653                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9654                 break;
9655         case 14:
9656                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9657                 break;
9658         }
9659
9660         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9661             intel_panel_use_ssc(dev_priv))
9662                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9663         else
9664                 dpll |= PLL_REF_INPUT_DREFCLK;
9665
9666         dpll |= DPLL_VCO_ENABLE;
9667
9668         crtc_state->dpll_hw_state.dpll = dpll;
9669         crtc_state->dpll_hw_state.fp0 = fp;
9670         crtc_state->dpll_hw_state.fp1 = fp2;
9671 }
9672
9673 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9674                                        struct intel_crtc_state *crtc_state)
9675 {
9676         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9677         struct intel_atomic_state *state =
9678                 to_intel_atomic_state(crtc_state->base.state);
9679         const struct intel_limit *limit;
9680         int refclk = 120000;
9681
9682         memset(&crtc_state->dpll_hw_state, 0,
9683                sizeof(crtc_state->dpll_hw_state));
9684
9685         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9686         if (!crtc_state->has_pch_encoder)
9687                 return 0;
9688
9689         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9690                 if (intel_panel_use_ssc(dev_priv)) {
9691                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9692                                       dev_priv->vbt.lvds_ssc_freq);
9693                         refclk = dev_priv->vbt.lvds_ssc_freq;
9694                 }
9695
9696                 if (intel_is_dual_link_lvds(dev_priv)) {
9697                         if (refclk == 100000)
9698                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9699                         else
9700                                 limit = &intel_limits_ironlake_dual_lvds;
9701                 } else {
9702                         if (refclk == 100000)
9703                                 limit = &intel_limits_ironlake_single_lvds_100m;
9704                         else
9705                                 limit = &intel_limits_ironlake_single_lvds;
9706                 }
9707         } else {
9708                 limit = &intel_limits_ironlake_dac;
9709         }
9710
9711         if (!crtc_state->clock_set &&
9712             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9713                                 refclk, NULL, &crtc_state->dpll)) {
9714                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9715                 return -EINVAL;
9716         }
9717
9718         ironlake_compute_dpll(crtc, crtc_state, NULL);
9719
9720         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9721                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9722                               pipe_name(crtc->pipe));
9723                 return -EINVAL;
9724         }
9725
9726         return 0;
9727 }
9728
9729 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9730                                          struct intel_link_m_n *m_n)
9731 {
9732         struct drm_device *dev = crtc->base.dev;
9733         struct drm_i915_private *dev_priv = to_i915(dev);
9734         enum pipe pipe = crtc->pipe;
9735
9736         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9737         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9738         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9739                 & ~TU_SIZE_MASK;
9740         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9741         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9742                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9743 }
9744
9745 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9746                                          enum transcoder transcoder,
9747                                          struct intel_link_m_n *m_n,
9748                                          struct intel_link_m_n *m2_n2)
9749 {
9750         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9751         enum pipe pipe = crtc->pipe;
9752
9753         if (INTEL_GEN(dev_priv) >= 5) {
9754                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9755                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9756                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9757                         & ~TU_SIZE_MASK;
9758                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9759                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9760                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9761
9762                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9763                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9764                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9765                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9766                                         & ~TU_SIZE_MASK;
9767                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9768                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9769                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9770                 }
9771         } else {
9772                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9773                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9774                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9775                         & ~TU_SIZE_MASK;
9776                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9777                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9778                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9779         }
9780 }
9781
9782 void intel_dp_get_m_n(struct intel_crtc *crtc,
9783                       struct intel_crtc_state *pipe_config)
9784 {
9785         if (pipe_config->has_pch_encoder)
9786                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9787         else
9788                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9789                                              &pipe_config->dp_m_n,
9790                                              &pipe_config->dp_m2_n2);
9791 }
9792
9793 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9794                                         struct intel_crtc_state *pipe_config)
9795 {
9796         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9797                                      &pipe_config->fdi_m_n, NULL);
9798 }
9799
9800 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9801                                     struct intel_crtc_state *pipe_config)
9802 {
9803         struct drm_device *dev = crtc->base.dev;
9804         struct drm_i915_private *dev_priv = to_i915(dev);
9805         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9806         u32 ps_ctrl = 0;
9807         int id = -1;
9808         int i;
9809
9810         /* find scaler attached to this pipe */
9811         for (i = 0; i < crtc->num_scalers; i++) {
9812                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9813                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9814                         id = i;
9815                         pipe_config->pch_pfit.enabled = true;
9816                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9817                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9818                         scaler_state->scalers[i].in_use = true;
9819                         break;
9820                 }
9821         }
9822
9823         scaler_state->scaler_id = id;
9824         if (id >= 0) {
9825                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9826         } else {
9827                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9828         }
9829 }
9830
9831 static void
9832 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9833                                  struct intel_initial_plane_config *plane_config)
9834 {
9835         struct drm_device *dev = crtc->base.dev;
9836         struct drm_i915_private *dev_priv = to_i915(dev);
9837         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9838         enum plane_id plane_id = plane->id;
9839         enum pipe pipe;
9840         u32 val, base, offset, stride_mult, tiling, alpha;
9841         int fourcc, pixel_format;
9842         unsigned int aligned_height;
9843         struct drm_framebuffer *fb;
9844         struct intel_framebuffer *intel_fb;
9845
9846         if (!plane->get_hw_state(plane, &pipe))
9847                 return;
9848
9849         WARN_ON(pipe != crtc->pipe);
9850
9851         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9852         if (!intel_fb) {
9853                 DRM_DEBUG_KMS("failed to alloc fb\n");
9854                 return;
9855         }
9856
9857         fb = &intel_fb->base;
9858
9859         fb->dev = dev;
9860
9861         val = I915_READ(PLANE_CTL(pipe, plane_id));
9862
9863         if (INTEL_GEN(dev_priv) >= 11)
9864                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9865         else
9866                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9867
9868         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9869                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9870                 alpha &= PLANE_COLOR_ALPHA_MASK;
9871         } else {
9872                 alpha = val & PLANE_CTL_ALPHA_MASK;
9873         }
9874
9875         fourcc = skl_format_to_fourcc(pixel_format,
9876                                       val & PLANE_CTL_ORDER_RGBX, alpha);
9877         fb->format = drm_format_info(fourcc);
9878
9879         tiling = val & PLANE_CTL_TILED_MASK;
9880         switch (tiling) {
9881         case PLANE_CTL_TILED_LINEAR:
9882                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9883                 break;
9884         case PLANE_CTL_TILED_X:
9885                 plane_config->tiling = I915_TILING_X;
9886                 fb->modifier = I915_FORMAT_MOD_X_TILED;
9887                 break;
9888         case PLANE_CTL_TILED_Y:
9889                 plane_config->tiling = I915_TILING_Y;
9890                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9891                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9892                 else
9893                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
9894                 break;
9895         case PLANE_CTL_TILED_YF:
9896                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9897                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9898                 else
9899                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9900                 break;
9901         default:
9902                 MISSING_CASE(tiling);
9903                 goto error;
9904         }
9905
9906         /*
9907          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9908          * while i915 HW rotation is clockwise, thats why this swapping.
9909          */
9910         switch (val & PLANE_CTL_ROTATE_MASK) {
9911         case PLANE_CTL_ROTATE_0:
9912                 plane_config->rotation = DRM_MODE_ROTATE_0;
9913                 break;
9914         case PLANE_CTL_ROTATE_90:
9915                 plane_config->rotation = DRM_MODE_ROTATE_270;
9916                 break;
9917         case PLANE_CTL_ROTATE_180:
9918                 plane_config->rotation = DRM_MODE_ROTATE_180;
9919                 break;
9920         case PLANE_CTL_ROTATE_270:
9921                 plane_config->rotation = DRM_MODE_ROTATE_90;
9922                 break;
9923         }
9924
9925         if (INTEL_GEN(dev_priv) >= 10 &&
9926             val & PLANE_CTL_FLIP_HORIZONTAL)
9927                 plane_config->rotation |= DRM_MODE_REFLECT_X;
9928
9929         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9930         plane_config->base = base;
9931
9932         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9933
9934         val = I915_READ(PLANE_SIZE(pipe, plane_id));
9935         fb->height = ((val >> 16) & 0xffff) + 1;
9936         fb->width = ((val >> 0) & 0xffff) + 1;
9937
9938         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9939         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9940         fb->pitches[0] = (val & 0x3ff) * stride_mult;
9941
9942         aligned_height = intel_fb_align_height(fb, 0, fb->height);
9943
9944         plane_config->size = fb->pitches[0] * aligned_height;
9945
9946         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9947                       crtc->base.name, plane->base.name, fb->width, fb->height,
9948                       fb->format->cpp[0] * 8, base, fb->pitches[0],
9949                       plane_config->size);
9950
9951         plane_config->fb = intel_fb;
9952         return;
9953
9954 error:
9955         kfree(intel_fb);
9956 }
9957
9958 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9959                                      struct intel_crtc_state *pipe_config)
9960 {
9961         struct drm_device *dev = crtc->base.dev;
9962         struct drm_i915_private *dev_priv = to_i915(dev);
9963         u32 tmp;
9964
9965         tmp = I915_READ(PF_CTL(crtc->pipe));
9966
9967         if (tmp & PF_ENABLE) {
9968                 pipe_config->pch_pfit.enabled = true;
9969                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9970                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9971
9972                 /* We currently do not free assignements of panel fitters on
9973                  * ivb/hsw (since we don't use the higher upscaling modes which
9974                  * differentiates them) so just WARN about this case for now. */
9975                 if (IS_GEN(dev_priv, 7)) {
9976                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9977                                 PF_PIPE_SEL_IVB(crtc->pipe));
9978                 }
9979         }
9980 }
9981
9982 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9983                                      struct intel_crtc_state *pipe_config)
9984 {
9985         struct drm_device *dev = crtc->base.dev;
9986         struct drm_i915_private *dev_priv = to_i915(dev);
9987         enum intel_display_power_domain power_domain;
9988         intel_wakeref_t wakeref;
9989         u32 tmp;
9990         bool ret;
9991
9992         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9993         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9994         if (!wakeref)
9995                 return false;
9996
9997         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9998         pipe_config->shared_dpll = NULL;
9999
10000         ret = false;
10001         tmp = I915_READ(PIPECONF(crtc->pipe));
10002         if (!(tmp & PIPECONF_ENABLE))
10003                 goto out;
10004
10005         switch (tmp & PIPECONF_BPC_MASK) {
10006         case PIPECONF_6BPC:
10007                 pipe_config->pipe_bpp = 18;
10008                 break;
10009         case PIPECONF_8BPC:
10010                 pipe_config->pipe_bpp = 24;
10011                 break;
10012         case PIPECONF_10BPC:
10013                 pipe_config->pipe_bpp = 30;
10014                 break;
10015         case PIPECONF_12BPC:
10016                 pipe_config->pipe_bpp = 36;
10017                 break;
10018         default:
10019                 break;
10020         }
10021
10022         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10023                 pipe_config->limited_color_range = true;
10024
10025         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10026         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10027         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10028                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10029                 break;
10030         default:
10031                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10032                 break;
10033         }
10034
10035         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10036                 PIPECONF_GAMMA_MODE_SHIFT;
10037
10038         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10039
10040         i9xx_get_pipe_color_config(pipe_config);
10041         intel_color_get_config(pipe_config);
10042
10043         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10044                 struct intel_shared_dpll *pll;
10045                 enum intel_dpll_id pll_id;
10046
10047                 pipe_config->has_pch_encoder = true;
10048
10049                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10050                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10051                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10052
10053                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10054
10055                 if (HAS_PCH_IBX(dev_priv)) {
10056                         /*
10057                          * The pipe->pch transcoder and pch transcoder->pll
10058                          * mapping is fixed.
10059                          */
10060                         pll_id = (enum intel_dpll_id) crtc->pipe;
10061                 } else {
10062                         tmp = I915_READ(PCH_DPLL_SEL);
10063                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10064                                 pll_id = DPLL_ID_PCH_PLL_B;
10065                         else
10066                                 pll_id= DPLL_ID_PCH_PLL_A;
10067                 }
10068
10069                 pipe_config->shared_dpll =
10070                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10071                 pll = pipe_config->shared_dpll;
10072
10073                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10074                                                 &pipe_config->dpll_hw_state));
10075
10076                 tmp = pipe_config->dpll_hw_state.dpll;
10077                 pipe_config->pixel_multiplier =
10078                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10079                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10080
10081                 ironlake_pch_clock_get(crtc, pipe_config);
10082         } else {
10083                 pipe_config->pixel_multiplier = 1;
10084         }
10085
10086         intel_get_pipe_timings(crtc, pipe_config);
10087         intel_get_pipe_src_size(crtc, pipe_config);
10088
10089         ironlake_get_pfit_config(crtc, pipe_config);
10090
10091         ret = true;
10092
10093 out:
10094         intel_display_power_put(dev_priv, power_domain, wakeref);
10095
10096         return ret;
10097 }
10098 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10099                                       struct intel_crtc_state *crtc_state)
10100 {
10101         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10102         struct intel_atomic_state *state =
10103                 to_intel_atomic_state(crtc_state->base.state);
10104
10105         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10106             INTEL_GEN(dev_priv) >= 11) {
10107                 struct intel_encoder *encoder =
10108                         intel_get_crtc_new_encoder(state, crtc_state);
10109
10110                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10111                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10112                                       pipe_name(crtc->pipe));
10113                         return -EINVAL;
10114                 }
10115         }
10116
10117         return 0;
10118 }
10119
10120 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10121                                    enum port port,
10122                                    struct intel_crtc_state *pipe_config)
10123 {
10124         enum intel_dpll_id id;
10125         u32 temp;
10126
10127         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10128         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10129
10130         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10131                 return;
10132
10133         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10134 }
10135
10136 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10137                                 enum port port,
10138                                 struct intel_crtc_state *pipe_config)
10139 {
10140         enum phy phy = intel_port_to_phy(dev_priv, port);
10141         enum icl_port_dpll_id port_dpll_id;
10142         enum intel_dpll_id id;
10143         u32 temp;
10144
10145         if (intel_phy_is_combo(dev_priv, phy)) {
10146                 temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10147                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10148                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10149                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10150         } else if (intel_phy_is_tc(dev_priv, phy)) {
10151                 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10152
10153                 if (clk_sel == DDI_CLK_SEL_MG) {
10154                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10155                                                                     port));
10156                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10157                 } else {
10158                         WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10159                         id = DPLL_ID_ICL_TBTPLL;
10160                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10161                 }
10162         } else {
10163                 WARN(1, "Invalid port %x\n", port);
10164                 return;
10165         }
10166
10167         pipe_config->icl_port_dplls[port_dpll_id].pll =
10168                 intel_get_shared_dpll_by_id(dev_priv, id);
10169
10170         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10171 }
10172
10173 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10174                                 enum port port,
10175                                 struct intel_crtc_state *pipe_config)
10176 {
10177         enum intel_dpll_id id;
10178
10179         switch (port) {
10180         case PORT_A:
10181                 id = DPLL_ID_SKL_DPLL0;
10182                 break;
10183         case PORT_B:
10184                 id = DPLL_ID_SKL_DPLL1;
10185                 break;
10186         case PORT_C:
10187                 id = DPLL_ID_SKL_DPLL2;
10188                 break;
10189         default:
10190                 DRM_ERROR("Incorrect port type\n");
10191                 return;
10192         }
10193
10194         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10195 }
10196
10197 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10198                                 enum port port,
10199                                 struct intel_crtc_state *pipe_config)
10200 {
10201         enum intel_dpll_id id;
10202         u32 temp;
10203
10204         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10205         id = temp >> (port * 3 + 1);
10206
10207         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10208                 return;
10209
10210         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10211 }
10212
10213 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10214                                 enum port port,
10215                                 struct intel_crtc_state *pipe_config)
10216 {
10217         enum intel_dpll_id id;
10218         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10219
10220         switch (ddi_pll_sel) {
10221         case PORT_CLK_SEL_WRPLL1:
10222                 id = DPLL_ID_WRPLL1;
10223                 break;
10224         case PORT_CLK_SEL_WRPLL2:
10225                 id = DPLL_ID_WRPLL2;
10226                 break;
10227         case PORT_CLK_SEL_SPLL:
10228                 id = DPLL_ID_SPLL;
10229                 break;
10230         case PORT_CLK_SEL_LCPLL_810:
10231                 id = DPLL_ID_LCPLL_810;
10232                 break;
10233         case PORT_CLK_SEL_LCPLL_1350:
10234                 id = DPLL_ID_LCPLL_1350;
10235                 break;
10236         case PORT_CLK_SEL_LCPLL_2700:
10237                 id = DPLL_ID_LCPLL_2700;
10238                 break;
10239         default:
10240                 MISSING_CASE(ddi_pll_sel);
10241                 /* fall through */
10242         case PORT_CLK_SEL_NONE:
10243                 return;
10244         }
10245
10246         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10247 }
10248
10249 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10250                                      struct intel_crtc_state *pipe_config,
10251                                      u64 *power_domain_mask,
10252                                      intel_wakeref_t *wakerefs)
10253 {
10254         struct drm_device *dev = crtc->base.dev;
10255         struct drm_i915_private *dev_priv = to_i915(dev);
10256         enum intel_display_power_domain power_domain;
10257         unsigned long panel_transcoder_mask = 0;
10258         unsigned long enabled_panel_transcoders = 0;
10259         enum transcoder panel_transcoder;
10260         intel_wakeref_t wf;
10261         u32 tmp;
10262
10263         if (INTEL_GEN(dev_priv) >= 11)
10264                 panel_transcoder_mask |=
10265                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10266
10267         if (HAS_TRANSCODER_EDP(dev_priv))
10268                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10269
10270         /*
10271          * The pipe->transcoder mapping is fixed with the exception of the eDP
10272          * and DSI transcoders handled below.
10273          */
10274         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10275
10276         /*
10277          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10278          * consistency and less surprising code; it's in always on power).
10279          */
10280         for_each_set_bit(panel_transcoder,
10281                          &panel_transcoder_mask,
10282                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10283                 bool force_thru = false;
10284                 enum pipe trans_pipe;
10285
10286                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10287                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10288                         continue;
10289
10290                 /*
10291                  * Log all enabled ones, only use the first one.
10292                  *
10293                  * FIXME: This won't work for two separate DSI displays.
10294                  */
10295                 enabled_panel_transcoders |= BIT(panel_transcoder);
10296                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10297                         continue;
10298
10299                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10300                 default:
10301                         WARN(1, "unknown pipe linked to transcoder %s\n",
10302                              transcoder_name(panel_transcoder));
10303                         /* fall through */
10304                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10305                         force_thru = true;
10306                         /* fall through */
10307                 case TRANS_DDI_EDP_INPUT_A_ON:
10308                         trans_pipe = PIPE_A;
10309                         break;
10310                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10311                         trans_pipe = PIPE_B;
10312                         break;
10313                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10314                         trans_pipe = PIPE_C;
10315                         break;
10316                 }
10317
10318                 if (trans_pipe == crtc->pipe) {
10319                         pipe_config->cpu_transcoder = panel_transcoder;
10320                         pipe_config->pch_pfit.force_thru = force_thru;
10321                 }
10322         }
10323
10324         /*
10325          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10326          */
10327         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10328                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10329
10330         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10331         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10332
10333         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10334         if (!wf)
10335                 return false;
10336
10337         wakerefs[power_domain] = wf;
10338         *power_domain_mask |= BIT_ULL(power_domain);
10339
10340         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10341
10342         return tmp & PIPECONF_ENABLE;
10343 }
10344
10345 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10346                                          struct intel_crtc_state *pipe_config,
10347                                          u64 *power_domain_mask,
10348                                          intel_wakeref_t *wakerefs)
10349 {
10350         struct drm_device *dev = crtc->base.dev;
10351         struct drm_i915_private *dev_priv = to_i915(dev);
10352         enum intel_display_power_domain power_domain;
10353         enum transcoder cpu_transcoder;
10354         intel_wakeref_t wf;
10355         enum port port;
10356         u32 tmp;
10357
10358         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10359                 if (port == PORT_A)
10360                         cpu_transcoder = TRANSCODER_DSI_A;
10361                 else
10362                         cpu_transcoder = TRANSCODER_DSI_C;
10363
10364                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10365                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10366
10367                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10368                 if (!wf)
10369                         continue;
10370
10371                 wakerefs[power_domain] = wf;
10372                 *power_domain_mask |= BIT_ULL(power_domain);
10373
10374                 /*
10375                  * The PLL needs to be enabled with a valid divider
10376                  * configuration, otherwise accessing DSI registers will hang
10377                  * the machine. See BSpec North Display Engine
10378                  * registers/MIPI[BXT]. We can break out here early, since we
10379                  * need the same DSI PLL to be enabled for both DSI ports.
10380                  */
10381                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10382                         break;
10383
10384                 /* XXX: this works for video mode only */
10385                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10386                 if (!(tmp & DPI_ENABLE))
10387                         continue;
10388
10389                 tmp = I915_READ(MIPI_CTRL(port));
10390                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10391                         continue;
10392
10393                 pipe_config->cpu_transcoder = cpu_transcoder;
10394                 break;
10395         }
10396
10397         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10398 }
10399
10400 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10401                                        struct intel_crtc_state *pipe_config)
10402 {
10403         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10404         struct intel_shared_dpll *pll;
10405         enum port port;
10406         u32 tmp;
10407
10408         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10409
10410         if (INTEL_GEN(dev_priv) >= 12)
10411                 port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10412         else
10413                 port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10414
10415         if (INTEL_GEN(dev_priv) >= 11)
10416                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10417         else if (IS_CANNONLAKE(dev_priv))
10418                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10419         else if (IS_GEN9_BC(dev_priv))
10420                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10421         else if (IS_GEN9_LP(dev_priv))
10422                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10423         else
10424                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10425
10426         pll = pipe_config->shared_dpll;
10427         if (pll) {
10428                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10429                                                 &pipe_config->dpll_hw_state));
10430         }
10431
10432         /*
10433          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10434          * DDI E. So just check whether this pipe is wired to DDI E and whether
10435          * the PCH transcoder is on.
10436          */
10437         if (INTEL_GEN(dev_priv) < 9 &&
10438             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10439                 pipe_config->has_pch_encoder = true;
10440
10441                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10442                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10443                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10444
10445                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10446         }
10447 }
10448
10449 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10450                                     struct intel_crtc_state *pipe_config)
10451 {
10452         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10453         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10454         enum intel_display_power_domain power_domain;
10455         u64 power_domain_mask;
10456         bool active;
10457
10458         intel_crtc_init_scalers(crtc, pipe_config);
10459
10460         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10461         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10462         if (!wf)
10463                 return false;
10464
10465         wakerefs[power_domain] = wf;
10466         power_domain_mask = BIT_ULL(power_domain);
10467
10468         pipe_config->shared_dpll = NULL;
10469
10470         active = hsw_get_transcoder_state(crtc, pipe_config,
10471                                           &power_domain_mask, wakerefs);
10472
10473         if (IS_GEN9_LP(dev_priv) &&
10474             bxt_get_dsi_transcoder_state(crtc, pipe_config,
10475                                          &power_domain_mask, wakerefs)) {
10476                 WARN_ON(active);
10477                 active = true;
10478         }
10479
10480         if (!active)
10481                 goto out;
10482
10483         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10484             INTEL_GEN(dev_priv) >= 11) {
10485                 haswell_get_ddi_port_state(crtc, pipe_config);
10486                 intel_get_pipe_timings(crtc, pipe_config);
10487         }
10488
10489         intel_get_pipe_src_size(crtc, pipe_config);
10490
10491         if (IS_HASWELL(dev_priv)) {
10492                 u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10493
10494                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10495                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10496                 else
10497                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10498         } else {
10499                 pipe_config->output_format =
10500                         bdw_get_pipemisc_output_format(crtc);
10501
10502                 /*
10503                  * Currently there is no interface defined to
10504                  * check user preference between RGB/YCBCR444
10505                  * or YCBCR420. So the only possible case for
10506                  * YCBCR444 usage is driving YCBCR420 output
10507                  * with LSPCON, when pipe is configured for
10508                  * YCBCR444 output and LSPCON takes care of
10509                  * downsampling it.
10510                  */
10511                 pipe_config->lspcon_downsampling =
10512                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
10513         }
10514
10515         pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10516
10517         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10518
10519         if (INTEL_GEN(dev_priv) >= 9) {
10520                 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10521
10522                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10523                         pipe_config->gamma_enable = true;
10524
10525                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10526                         pipe_config->csc_enable = true;
10527         } else {
10528                 i9xx_get_pipe_color_config(pipe_config);
10529         }
10530
10531         intel_color_get_config(pipe_config);
10532
10533         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10534         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10535
10536         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10537         if (wf) {
10538                 wakerefs[power_domain] = wf;
10539                 power_domain_mask |= BIT_ULL(power_domain);
10540
10541                 if (INTEL_GEN(dev_priv) >= 9)
10542                         skylake_get_pfit_config(crtc, pipe_config);
10543                 else
10544                         ironlake_get_pfit_config(crtc, pipe_config);
10545         }
10546
10547         if (hsw_crtc_supports_ips(crtc)) {
10548                 if (IS_HASWELL(dev_priv))
10549                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10550                 else {
10551                         /*
10552                          * We cannot readout IPS state on broadwell, set to
10553                          * true so we can set it to a defined state on first
10554                          * commit.
10555                          */
10556                         pipe_config->ips_enabled = true;
10557                 }
10558         }
10559
10560         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10561             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10562                 pipe_config->pixel_multiplier =
10563                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10564         } else {
10565                 pipe_config->pixel_multiplier = 1;
10566         }
10567
10568 out:
10569         for_each_power_domain(power_domain, power_domain_mask)
10570                 intel_display_power_put(dev_priv,
10571                                         power_domain, wakerefs[power_domain]);
10572
10573         return active;
10574 }
10575
10576 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10577 {
10578         struct drm_i915_private *dev_priv =
10579                 to_i915(plane_state->base.plane->dev);
10580         const struct drm_framebuffer *fb = plane_state->base.fb;
10581         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10582         u32 base;
10583
10584         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10585                 base = obj->phys_handle->busaddr;
10586         else
10587                 base = intel_plane_ggtt_offset(plane_state);
10588
10589         base += plane_state->color_plane[0].offset;
10590
10591         /* ILK+ do this automagically */
10592         if (HAS_GMCH(dev_priv) &&
10593             plane_state->base.rotation & DRM_MODE_ROTATE_180)
10594                 base += (plane_state->base.crtc_h *
10595                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10596
10597         return base;
10598 }
10599
10600 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10601 {
10602         int x = plane_state->base.crtc_x;
10603         int y = plane_state->base.crtc_y;
10604         u32 pos = 0;
10605
10606         if (x < 0) {
10607                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10608                 x = -x;
10609         }
10610         pos |= x << CURSOR_X_SHIFT;
10611
10612         if (y < 0) {
10613                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10614                 y = -y;
10615         }
10616         pos |= y << CURSOR_Y_SHIFT;
10617
10618         return pos;
10619 }
10620
10621 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10622 {
10623         const struct drm_mode_config *config =
10624                 &plane_state->base.plane->dev->mode_config;
10625         int width = plane_state->base.crtc_w;
10626         int height = plane_state->base.crtc_h;
10627
10628         return width > 0 && width <= config->cursor_width &&
10629                 height > 0 && height <= config->cursor_height;
10630 }
10631
10632 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10633 {
10634         int src_x, src_y;
10635         u32 offset;
10636         int ret;
10637
10638         ret = intel_plane_compute_gtt(plane_state);
10639         if (ret)
10640                 return ret;
10641
10642         if (!plane_state->base.visible)
10643                 return 0;
10644
10645         src_x = plane_state->base.src_x >> 16;
10646         src_y = plane_state->base.src_y >> 16;
10647
10648         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10649         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10650                                                     plane_state, 0);
10651
10652         if (src_x != 0 || src_y != 0) {
10653                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10654                 return -EINVAL;
10655         }
10656
10657         plane_state->color_plane[0].offset = offset;
10658
10659         return 0;
10660 }
10661
10662 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10663                               struct intel_plane_state *plane_state)
10664 {
10665         const struct drm_framebuffer *fb = plane_state->base.fb;
10666         int ret;
10667
10668         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10669                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10670                 return -EINVAL;
10671         }
10672
10673         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10674                                                   &crtc_state->base,
10675                                                   DRM_PLANE_HELPER_NO_SCALING,
10676                                                   DRM_PLANE_HELPER_NO_SCALING,
10677                                                   true, true);
10678         if (ret)
10679                 return ret;
10680
10681         ret = intel_cursor_check_surface(plane_state);
10682         if (ret)
10683                 return ret;
10684
10685         if (!plane_state->base.visible)
10686                 return 0;
10687
10688         ret = intel_plane_check_src_coordinates(plane_state);
10689         if (ret)
10690                 return ret;
10691
10692         return 0;
10693 }
10694
10695 static unsigned int
10696 i845_cursor_max_stride(struct intel_plane *plane,
10697                        u32 pixel_format, u64 modifier,
10698                        unsigned int rotation)
10699 {
10700         return 2048;
10701 }
10702
10703 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10704 {
10705         u32 cntl = 0;
10706
10707         if (crtc_state->gamma_enable)
10708                 cntl |= CURSOR_GAMMA_ENABLE;
10709
10710         return cntl;
10711 }
10712
10713 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10714                            const struct intel_plane_state *plane_state)
10715 {
10716         return CURSOR_ENABLE |
10717                 CURSOR_FORMAT_ARGB |
10718                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10719 }
10720
10721 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10722 {
10723         int width = plane_state->base.crtc_w;
10724
10725         /*
10726          * 845g/865g are only limited by the width of their cursors,
10727          * the height is arbitrary up to the precision of the register.
10728          */
10729         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10730 }
10731
10732 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10733                              struct intel_plane_state *plane_state)
10734 {
10735         const struct drm_framebuffer *fb = plane_state->base.fb;
10736         int ret;
10737
10738         ret = intel_check_cursor(crtc_state, plane_state);
10739         if (ret)
10740                 return ret;
10741
10742         /* if we want to turn off the cursor ignore width and height */
10743         if (!fb)
10744                 return 0;
10745
10746         /* Check for which cursor types we support */
10747         if (!i845_cursor_size_ok(plane_state)) {
10748                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10749                           plane_state->base.crtc_w,
10750                           plane_state->base.crtc_h);
10751                 return -EINVAL;
10752         }
10753
10754         WARN_ON(plane_state->base.visible &&
10755                 plane_state->color_plane[0].stride != fb->pitches[0]);
10756
10757         switch (fb->pitches[0]) {
10758         case 256:
10759         case 512:
10760         case 1024:
10761         case 2048:
10762                 break;
10763         default:
10764                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10765                               fb->pitches[0]);
10766                 return -EINVAL;
10767         }
10768
10769         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10770
10771         return 0;
10772 }
10773
10774 static void i845_update_cursor(struct intel_plane *plane,
10775                                const struct intel_crtc_state *crtc_state,
10776                                const struct intel_plane_state *plane_state)
10777 {
10778         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10779         u32 cntl = 0, base = 0, pos = 0, size = 0;
10780         unsigned long irqflags;
10781
10782         if (plane_state && plane_state->base.visible) {
10783                 unsigned int width = plane_state->base.crtc_w;
10784                 unsigned int height = plane_state->base.crtc_h;
10785
10786                 cntl = plane_state->ctl |
10787                         i845_cursor_ctl_crtc(crtc_state);
10788
10789                 size = (height << 12) | width;
10790
10791                 base = intel_cursor_base(plane_state);
10792                 pos = intel_cursor_position(plane_state);
10793         }
10794
10795         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10796
10797         /* On these chipsets we can only modify the base/size/stride
10798          * whilst the cursor is disabled.
10799          */
10800         if (plane->cursor.base != base ||
10801             plane->cursor.size != size ||
10802             plane->cursor.cntl != cntl) {
10803                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10804                 I915_WRITE_FW(CURBASE(PIPE_A), base);
10805                 I915_WRITE_FW(CURSIZE, size);
10806                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10807                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10808
10809                 plane->cursor.base = base;
10810                 plane->cursor.size = size;
10811                 plane->cursor.cntl = cntl;
10812         } else {
10813                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10814         }
10815
10816         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10817 }
10818
10819 static void i845_disable_cursor(struct intel_plane *plane,
10820                                 const struct intel_crtc_state *crtc_state)
10821 {
10822         i845_update_cursor(plane, crtc_state, NULL);
10823 }
10824
10825 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10826                                      enum pipe *pipe)
10827 {
10828         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10829         enum intel_display_power_domain power_domain;
10830         intel_wakeref_t wakeref;
10831         bool ret;
10832
10833         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10834         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10835         if (!wakeref)
10836                 return false;
10837
10838         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10839
10840         *pipe = PIPE_A;
10841
10842         intel_display_power_put(dev_priv, power_domain, wakeref);
10843
10844         return ret;
10845 }
10846
10847 static unsigned int
10848 i9xx_cursor_max_stride(struct intel_plane *plane,
10849                        u32 pixel_format, u64 modifier,
10850                        unsigned int rotation)
10851 {
10852         return plane->base.dev->mode_config.cursor_width * 4;
10853 }
10854
10855 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10856 {
10857         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10858         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10859         u32 cntl = 0;
10860
10861         if (INTEL_GEN(dev_priv) >= 11)
10862                 return cntl;
10863
10864         if (crtc_state->gamma_enable)
10865                 cntl = MCURSOR_GAMMA_ENABLE;
10866
10867         if (crtc_state->csc_enable)
10868                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10869
10870         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10871                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10872
10873         return cntl;
10874 }
10875
10876 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10877                            const struct intel_plane_state *plane_state)
10878 {
10879         struct drm_i915_private *dev_priv =
10880                 to_i915(plane_state->base.plane->dev);
10881         u32 cntl = 0;
10882
10883         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10884                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10885
10886         switch (plane_state->base.crtc_w) {
10887         case 64:
10888                 cntl |= MCURSOR_MODE_64_ARGB_AX;
10889                 break;
10890         case 128:
10891                 cntl |= MCURSOR_MODE_128_ARGB_AX;
10892                 break;
10893         case 256:
10894                 cntl |= MCURSOR_MODE_256_ARGB_AX;
10895                 break;
10896         default:
10897                 MISSING_CASE(plane_state->base.crtc_w);
10898                 return 0;
10899         }
10900
10901         if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10902                 cntl |= MCURSOR_ROTATE_180;
10903
10904         return cntl;
10905 }
10906
10907 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10908 {
10909         struct drm_i915_private *dev_priv =
10910                 to_i915(plane_state->base.plane->dev);
10911         int width = plane_state->base.crtc_w;
10912         int height = plane_state->base.crtc_h;
10913
10914         if (!intel_cursor_size_ok(plane_state))
10915                 return false;
10916
10917         /* Cursor width is limited to a few power-of-two sizes */
10918         switch (width) {
10919         case 256:
10920         case 128:
10921         case 64:
10922                 break;
10923         default:
10924                 return false;
10925         }
10926
10927         /*
10928          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10929          * height from 8 lines up to the cursor width, when the
10930          * cursor is not rotated. Everything else requires square
10931          * cursors.
10932          */
10933         if (HAS_CUR_FBC(dev_priv) &&
10934             plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10935                 if (height < 8 || height > width)
10936                         return false;
10937         } else {
10938                 if (height != width)
10939                         return false;
10940         }
10941
10942         return true;
10943 }
10944
10945 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10946                              struct intel_plane_state *plane_state)
10947 {
10948         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10949         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10950         const struct drm_framebuffer *fb = plane_state->base.fb;
10951         enum pipe pipe = plane->pipe;
10952         int ret;
10953
10954         ret = intel_check_cursor(crtc_state, plane_state);
10955         if (ret)
10956                 return ret;
10957
10958         /* if we want to turn off the cursor ignore width and height */
10959         if (!fb)
10960                 return 0;
10961
10962         /* Check for which cursor types we support */
10963         if (!i9xx_cursor_size_ok(plane_state)) {
10964                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10965                           plane_state->base.crtc_w,
10966                           plane_state->base.crtc_h);
10967                 return -EINVAL;
10968         }
10969
10970         WARN_ON(plane_state->base.visible &&
10971                 plane_state->color_plane[0].stride != fb->pitches[0]);
10972
10973         if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10974                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10975                               fb->pitches[0], plane_state->base.crtc_w);
10976                 return -EINVAL;
10977         }
10978
10979         /*
10980          * There's something wrong with the cursor on CHV pipe C.
10981          * If it straddles the left edge of the screen then
10982          * moving it away from the edge or disabling it often
10983          * results in a pipe underrun, and often that can lead to
10984          * dead pipe (constant underrun reported, and it scans
10985          * out just a solid color). To recover from that, the
10986          * display power well must be turned off and on again.
10987          * Refuse the put the cursor into that compromised position.
10988          */
10989         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10990             plane_state->base.visible && plane_state->base.crtc_x < 0) {
10991                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10992                 return -EINVAL;
10993         }
10994
10995         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10996
10997         return 0;
10998 }
10999
11000 static void i9xx_update_cursor(struct intel_plane *plane,
11001                                const struct intel_crtc_state *crtc_state,
11002                                const struct intel_plane_state *plane_state)
11003 {
11004         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11005         enum pipe pipe = plane->pipe;
11006         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11007         unsigned long irqflags;
11008
11009         if (plane_state && plane_state->base.visible) {
11010                 cntl = plane_state->ctl |
11011                         i9xx_cursor_ctl_crtc(crtc_state);
11012
11013                 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
11014                         fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
11015
11016                 base = intel_cursor_base(plane_state);
11017                 pos = intel_cursor_position(plane_state);
11018         }
11019
11020         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11021
11022         /*
11023          * On some platforms writing CURCNTR first will also
11024          * cause CURPOS to be armed by the CURBASE write.
11025          * Without the CURCNTR write the CURPOS write would
11026          * arm itself. Thus we always update CURCNTR before
11027          * CURPOS.
11028          *
11029          * On other platforms CURPOS always requires the
11030          * CURBASE write to arm the update. Additonally
11031          * a write to any of the cursor register will cancel
11032          * an already armed cursor update. Thus leaving out
11033          * the CURBASE write after CURPOS could lead to a
11034          * cursor that doesn't appear to move, or even change
11035          * shape. Thus we always write CURBASE.
11036          *
11037          * The other registers are armed by by the CURBASE write
11038          * except when the plane is getting enabled at which time
11039          * the CURCNTR write arms the update.
11040          */
11041
11042         if (INTEL_GEN(dev_priv) >= 9)
11043                 skl_write_cursor_wm(plane, crtc_state);
11044
11045         if (plane->cursor.base != base ||
11046             plane->cursor.size != fbc_ctl ||
11047             plane->cursor.cntl != cntl) {
11048                 if (HAS_CUR_FBC(dev_priv))
11049                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
11050                 I915_WRITE_FW(CURCNTR(pipe), cntl);
11051                 I915_WRITE_FW(CURPOS(pipe), pos);
11052                 I915_WRITE_FW(CURBASE(pipe), base);
11053
11054                 plane->cursor.base = base;
11055                 plane->cursor.size = fbc_ctl;
11056                 plane->cursor.cntl = cntl;
11057         } else {
11058                 I915_WRITE_FW(CURPOS(pipe), pos);
11059                 I915_WRITE_FW(CURBASE(pipe), base);
11060         }
11061
11062         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11063 }
11064
11065 static void i9xx_disable_cursor(struct intel_plane *plane,
11066                                 const struct intel_crtc_state *crtc_state)
11067 {
11068         i9xx_update_cursor(plane, crtc_state, NULL);
11069 }
11070
11071 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11072                                      enum pipe *pipe)
11073 {
11074         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11075         enum intel_display_power_domain power_domain;
11076         intel_wakeref_t wakeref;
11077         bool ret;
11078         u32 val;
11079
11080         /*
11081          * Not 100% correct for planes that can move between pipes,
11082          * but that's only the case for gen2-3 which don't have any
11083          * display power wells.
11084          */
11085         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11086         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11087         if (!wakeref)
11088                 return false;
11089
11090         val = I915_READ(CURCNTR(plane->pipe));
11091
11092         ret = val & MCURSOR_MODE;
11093
11094         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11095                 *pipe = plane->pipe;
11096         else
11097                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11098                         MCURSOR_PIPE_SELECT_SHIFT;
11099
11100         intel_display_power_put(dev_priv, power_domain, wakeref);
11101
11102         return ret;
11103 }
11104
11105 /* VESA 640x480x72Hz mode to set on the pipe */
11106 static const struct drm_display_mode load_detect_mode = {
11107         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11108                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11109 };
11110
11111 struct drm_framebuffer *
11112 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11113                          struct drm_mode_fb_cmd2 *mode_cmd)
11114 {
11115         struct intel_framebuffer *intel_fb;
11116         int ret;
11117
11118         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11119         if (!intel_fb)
11120                 return ERR_PTR(-ENOMEM);
11121
11122         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11123         if (ret)
11124                 goto err;
11125
11126         return &intel_fb->base;
11127
11128 err:
11129         kfree(intel_fb);
11130         return ERR_PTR(ret);
11131 }
11132
11133 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11134                                         struct drm_crtc *crtc)
11135 {
11136         struct drm_plane *plane;
11137         struct drm_plane_state *plane_state;
11138         int ret, i;
11139
11140         ret = drm_atomic_add_affected_planes(state, crtc);
11141         if (ret)
11142                 return ret;
11143
11144         for_each_new_plane_in_state(state, plane, plane_state, i) {
11145                 if (plane_state->crtc != crtc)
11146                         continue;
11147
11148                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11149                 if (ret)
11150                         return ret;
11151
11152                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11153         }
11154
11155         return 0;
11156 }
11157
11158 int intel_get_load_detect_pipe(struct drm_connector *connector,
11159                                const struct drm_display_mode *mode,
11160                                struct intel_load_detect_pipe *old,
11161                                struct drm_modeset_acquire_ctx *ctx)
11162 {
11163         struct intel_crtc *intel_crtc;
11164         struct intel_encoder *intel_encoder =
11165                 intel_attached_encoder(connector);
11166         struct drm_crtc *possible_crtc;
11167         struct drm_encoder *encoder = &intel_encoder->base;
11168         struct drm_crtc *crtc = NULL;
11169         struct drm_device *dev = encoder->dev;
11170         struct drm_i915_private *dev_priv = to_i915(dev);
11171         struct drm_mode_config *config = &dev->mode_config;
11172         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11173         struct drm_connector_state *connector_state;
11174         struct intel_crtc_state *crtc_state;
11175         int ret, i = -1;
11176
11177         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11178                       connector->base.id, connector->name,
11179                       encoder->base.id, encoder->name);
11180
11181         old->restore_state = NULL;
11182
11183         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11184
11185         /*
11186          * Algorithm gets a little messy:
11187          *
11188          *   - if the connector already has an assigned crtc, use it (but make
11189          *     sure it's on first)
11190          *
11191          *   - try to find the first unused crtc that can drive this connector,
11192          *     and use that if we find one
11193          */
11194
11195         /* See if we already have a CRTC for this connector */
11196         if (connector->state->crtc) {
11197                 crtc = connector->state->crtc;
11198
11199                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11200                 if (ret)
11201                         goto fail;
11202
11203                 /* Make sure the crtc and connector are running */
11204                 goto found;
11205         }
11206
11207         /* Find an unused one (if possible) */
11208         for_each_crtc(dev, possible_crtc) {
11209                 i++;
11210                 if (!(encoder->possible_crtcs & (1 << i)))
11211                         continue;
11212
11213                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11214                 if (ret)
11215                         goto fail;
11216
11217                 if (possible_crtc->state->enable) {
11218                         drm_modeset_unlock(&possible_crtc->mutex);
11219                         continue;
11220                 }
11221
11222                 crtc = possible_crtc;
11223                 break;
11224         }
11225
11226         /*
11227          * If we didn't find an unused CRTC, don't use any.
11228          */
11229         if (!crtc) {
11230                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
11231                 ret = -ENODEV;
11232                 goto fail;
11233         }
11234
11235 found:
11236         intel_crtc = to_intel_crtc(crtc);
11237
11238         state = drm_atomic_state_alloc(dev);
11239         restore_state = drm_atomic_state_alloc(dev);
11240         if (!state || !restore_state) {
11241                 ret = -ENOMEM;
11242                 goto fail;
11243         }
11244
11245         state->acquire_ctx = ctx;
11246         restore_state->acquire_ctx = ctx;
11247
11248         connector_state = drm_atomic_get_connector_state(state, connector);
11249         if (IS_ERR(connector_state)) {
11250                 ret = PTR_ERR(connector_state);
11251                 goto fail;
11252         }
11253
11254         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11255         if (ret)
11256                 goto fail;
11257
11258         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11259         if (IS_ERR(crtc_state)) {
11260                 ret = PTR_ERR(crtc_state);
11261                 goto fail;
11262         }
11263
11264         crtc_state->base.active = crtc_state->base.enable = true;
11265
11266         if (!mode)
11267                 mode = &load_detect_mode;
11268
11269         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
11270         if (ret)
11271                 goto fail;
11272
11273         ret = intel_modeset_disable_planes(state, crtc);
11274         if (ret)
11275                 goto fail;
11276
11277         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11278         if (!ret)
11279                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11280         if (!ret)
11281                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11282         if (ret) {
11283                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11284                 goto fail;
11285         }
11286
11287         ret = drm_atomic_commit(state);
11288         if (ret) {
11289                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11290                 goto fail;
11291         }
11292
11293         old->restore_state = restore_state;
11294         drm_atomic_state_put(state);
11295
11296         /* let the connector get through one full cycle before testing */
11297         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11298         return true;
11299
11300 fail:
11301         if (state) {
11302                 drm_atomic_state_put(state);
11303                 state = NULL;
11304         }
11305         if (restore_state) {
11306                 drm_atomic_state_put(restore_state);
11307                 restore_state = NULL;
11308         }
11309
11310         if (ret == -EDEADLK)
11311                 return ret;
11312
11313         return false;
11314 }
11315
11316 void intel_release_load_detect_pipe(struct drm_connector *connector,
11317                                     struct intel_load_detect_pipe *old,
11318                                     struct drm_modeset_acquire_ctx *ctx)
11319 {
11320         struct intel_encoder *intel_encoder =
11321                 intel_attached_encoder(connector);
11322         struct drm_encoder *encoder = &intel_encoder->base;
11323         struct drm_atomic_state *state = old->restore_state;
11324         int ret;
11325
11326         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11327                       connector->base.id, connector->name,
11328                       encoder->base.id, encoder->name);
11329
11330         if (!state)
11331                 return;
11332
11333         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11334         if (ret)
11335                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11336         drm_atomic_state_put(state);
11337 }
11338
11339 static int i9xx_pll_refclk(struct drm_device *dev,
11340                            const struct intel_crtc_state *pipe_config)
11341 {
11342         struct drm_i915_private *dev_priv = to_i915(dev);
11343         u32 dpll = pipe_config->dpll_hw_state.dpll;
11344
11345         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11346                 return dev_priv->vbt.lvds_ssc_freq;
11347         else if (HAS_PCH_SPLIT(dev_priv))
11348                 return 120000;
11349         else if (!IS_GEN(dev_priv, 2))
11350                 return 96000;
11351         else
11352                 return 48000;
11353 }
11354
11355 /* Returns the clock of the currently programmed mode of the given pipe. */
11356 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11357                                 struct intel_crtc_state *pipe_config)
11358 {
11359         struct drm_device *dev = crtc->base.dev;
11360         struct drm_i915_private *dev_priv = to_i915(dev);
11361         enum pipe pipe = crtc->pipe;
11362         u32 dpll = pipe_config->dpll_hw_state.dpll;
11363         u32 fp;
11364         struct dpll clock;
11365         int port_clock;
11366         int refclk = i9xx_pll_refclk(dev, pipe_config);
11367
11368         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11369                 fp = pipe_config->dpll_hw_state.fp0;
11370         else
11371                 fp = pipe_config->dpll_hw_state.fp1;
11372
11373         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11374         if (IS_PINEVIEW(dev_priv)) {
11375                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11376                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11377         } else {
11378                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11379                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11380         }
11381
11382         if (!IS_GEN(dev_priv, 2)) {
11383                 if (IS_PINEVIEW(dev_priv))
11384                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11385                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11386                 else
11387                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11388                                DPLL_FPA01_P1_POST_DIV_SHIFT);
11389
11390                 switch (dpll & DPLL_MODE_MASK) {
11391                 case DPLLB_MODE_DAC_SERIAL:
11392                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11393                                 5 : 10;
11394                         break;
11395                 case DPLLB_MODE_LVDS:
11396                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11397                                 7 : 14;
11398                         break;
11399                 default:
11400                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11401                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
11402                         return;
11403                 }
11404
11405                 if (IS_PINEVIEW(dev_priv))
11406                         port_clock = pnv_calc_dpll_params(refclk, &clock);
11407                 else
11408                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
11409         } else {
11410                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11411                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11412
11413                 if (is_lvds) {
11414                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11415                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
11416
11417                         if (lvds & LVDS_CLKB_POWER_UP)
11418                                 clock.p2 = 7;
11419                         else
11420                                 clock.p2 = 14;
11421                 } else {
11422                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
11423                                 clock.p1 = 2;
11424                         else {
11425                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11426                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11427                         }
11428                         if (dpll & PLL_P2_DIVIDE_BY_4)
11429                                 clock.p2 = 4;
11430                         else
11431                                 clock.p2 = 2;
11432                 }
11433
11434                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11435         }
11436
11437         /*
11438          * This value includes pixel_multiplier. We will use
11439          * port_clock to compute adjusted_mode.crtc_clock in the
11440          * encoder's get_config() function.
11441          */
11442         pipe_config->port_clock = port_clock;
11443 }
11444
11445 int intel_dotclock_calculate(int link_freq,
11446                              const struct intel_link_m_n *m_n)
11447 {
11448         /*
11449          * The calculation for the data clock is:
11450          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11451          * But we want to avoid losing precison if possible, so:
11452          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11453          *
11454          * and the link clock is simpler:
11455          * link_clock = (m * link_clock) / n
11456          */
11457
11458         if (!m_n->link_n)
11459                 return 0;
11460
11461         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11462 }
11463
11464 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11465                                    struct intel_crtc_state *pipe_config)
11466 {
11467         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11468
11469         /* read out port_clock from the DPLL */
11470         i9xx_crtc_clock_get(crtc, pipe_config);
11471
11472         /*
11473          * In case there is an active pipe without active ports,
11474          * we may need some idea for the dotclock anyway.
11475          * Calculate one based on the FDI configuration.
11476          */
11477         pipe_config->base.adjusted_mode.crtc_clock =
11478                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11479                                          &pipe_config->fdi_m_n);
11480 }
11481
11482 /* Returns the currently programmed mode of the given encoder. */
11483 struct drm_display_mode *
11484 intel_encoder_current_mode(struct intel_encoder *encoder)
11485 {
11486         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11487         struct intel_crtc_state *crtc_state;
11488         struct drm_display_mode *mode;
11489         struct intel_crtc *crtc;
11490         enum pipe pipe;
11491
11492         if (!encoder->get_hw_state(encoder, &pipe))
11493                 return NULL;
11494
11495         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11496
11497         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11498         if (!mode)
11499                 return NULL;
11500
11501         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11502         if (!crtc_state) {
11503                 kfree(mode);
11504                 return NULL;
11505         }
11506
11507         crtc_state->base.crtc = &crtc->base;
11508
11509         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11510                 kfree(crtc_state);
11511                 kfree(mode);
11512                 return NULL;
11513         }
11514
11515         encoder->get_config(encoder, crtc_state);
11516
11517         intel_mode_from_pipe_config(mode, crtc_state);
11518
11519         kfree(crtc_state);
11520
11521         return mode;
11522 }
11523
11524 static void intel_crtc_destroy(struct drm_crtc *crtc)
11525 {
11526         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11527
11528         drm_crtc_cleanup(crtc);
11529         kfree(intel_crtc);
11530 }
11531
11532 /**
11533  * intel_wm_need_update - Check whether watermarks need updating
11534  * @cur: current plane state
11535  * @new: new plane state
11536  *
11537  * Check current plane state versus the new one to determine whether
11538  * watermarks need to be recalculated.
11539  *
11540  * Returns true or false.
11541  */
11542 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11543                                  struct intel_plane_state *new)
11544 {
11545         /* Update watermarks on tiling or size changes. */
11546         if (new->base.visible != cur->base.visible)
11547                 return true;
11548
11549         if (!cur->base.fb || !new->base.fb)
11550                 return false;
11551
11552         if (cur->base.fb->modifier != new->base.fb->modifier ||
11553             cur->base.rotation != new->base.rotation ||
11554             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11555             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11556             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11557             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11558                 return true;
11559
11560         return false;
11561 }
11562
11563 static bool needs_scaling(const struct intel_plane_state *state)
11564 {
11565         int src_w = drm_rect_width(&state->base.src) >> 16;
11566         int src_h = drm_rect_height(&state->base.src) >> 16;
11567         int dst_w = drm_rect_width(&state->base.dst);
11568         int dst_h = drm_rect_height(&state->base.dst);
11569
11570         return (src_w != dst_w || src_h != dst_h);
11571 }
11572
11573 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11574                                     struct intel_crtc_state *crtc_state,
11575                                     const struct intel_plane_state *old_plane_state,
11576                                     struct intel_plane_state *plane_state)
11577 {
11578         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11579         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
11580         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11581         bool mode_changed = needs_modeset(crtc_state);
11582         bool was_crtc_enabled = old_crtc_state->base.active;
11583         bool is_crtc_enabled = crtc_state->base.active;
11584         bool turn_off, turn_on, visible, was_visible;
11585         int ret;
11586
11587         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11588                 ret = skl_update_scaler_plane(crtc_state, plane_state);
11589                 if (ret)
11590                         return ret;
11591         }
11592
11593         was_visible = old_plane_state->base.visible;
11594         visible = plane_state->base.visible;
11595
11596         if (!was_crtc_enabled && WARN_ON(was_visible))
11597                 was_visible = false;
11598
11599         /*
11600          * Visibility is calculated as if the crtc was on, but
11601          * after scaler setup everything depends on it being off
11602          * when the crtc isn't active.
11603          *
11604          * FIXME this is wrong for watermarks. Watermarks should also
11605          * be computed as if the pipe would be active. Perhaps move
11606          * per-plane wm computation to the .check_plane() hook, and
11607          * only combine the results from all planes in the current place?
11608          */
11609         if (!is_crtc_enabled) {
11610                 plane_state->base.visible = visible = false;
11611                 crtc_state->active_planes &= ~BIT(plane->id);
11612                 crtc_state->data_rate[plane->id] = 0;
11613         }
11614
11615         if (!was_visible && !visible)
11616                 return 0;
11617
11618         turn_off = was_visible && (!visible || mode_changed);
11619         turn_on = visible && (!was_visible || mode_changed);
11620
11621         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11622                          crtc->base.base.id, crtc->base.name,
11623                          plane->base.base.id, plane->base.name,
11624                          was_visible, visible,
11625                          turn_off, turn_on, mode_changed);
11626
11627         if (turn_on) {
11628                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11629                         crtc_state->update_wm_pre = true;
11630
11631                 /* must disable cxsr around plane enable/disable */
11632                 if (plane->id != PLANE_CURSOR)
11633                         crtc_state->disable_cxsr = true;
11634         } else if (turn_off) {
11635                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11636                         crtc_state->update_wm_post = true;
11637
11638                 /* must disable cxsr around plane enable/disable */
11639                 if (plane->id != PLANE_CURSOR)
11640                         crtc_state->disable_cxsr = true;
11641         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
11642                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11643                         /* FIXME bollocks */
11644                         crtc_state->update_wm_pre = true;
11645                         crtc_state->update_wm_post = true;
11646                 }
11647         }
11648
11649         if (visible || was_visible)
11650                 crtc_state->fb_bits |= plane->frontbuffer_bit;
11651
11652         /*
11653          * ILK/SNB DVSACNTR/Sprite Enable
11654          * IVB SPR_CTL/Sprite Enable
11655          * "When in Self Refresh Big FIFO mode, a write to enable the
11656          *  plane will be internally buffered and delayed while Big FIFO
11657          *  mode is exiting."
11658          *
11659          * Which means that enabling the sprite can take an extra frame
11660          * when we start in big FIFO mode (LP1+). Thus we need to drop
11661          * down to LP0 and wait for vblank in order to make sure the
11662          * sprite gets enabled on the next vblank after the register write.
11663          * Doing otherwise would risk enabling the sprite one frame after
11664          * we've already signalled flip completion. We can resume LP1+
11665          * once the sprite has been enabled.
11666          *
11667          *
11668          * WaCxSRDisabledForSpriteScaling:ivb
11669          * IVB SPR_SCALE/Scaling Enable
11670          * "Low Power watermarks must be disabled for at least one
11671          *  frame before enabling sprite scaling, and kept disabled
11672          *  until sprite scaling is disabled."
11673          *
11674          * ILK/SNB DVSASCALE/Scaling Enable
11675          * "When in Self Refresh Big FIFO mode, scaling enable will be
11676          *  masked off while Big FIFO mode is exiting."
11677          *
11678          * Despite the w/a only being listed for IVB we assume that
11679          * the ILK/SNB note has similar ramifications, hence we apply
11680          * the w/a on all three platforms.
11681          *
11682          * With experimental results seems this is needed also for primary
11683          * plane, not only sprite plane.
11684          */
11685         if (plane->id != PLANE_CURSOR &&
11686             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11687              IS_IVYBRIDGE(dev_priv)) &&
11688             (turn_on || (!needs_scaling(old_plane_state) &&
11689                          needs_scaling(plane_state))))
11690                 crtc_state->disable_lp_wm = true;
11691
11692         return 0;
11693 }
11694
11695 static bool encoders_cloneable(const struct intel_encoder *a,
11696                                const struct intel_encoder *b)
11697 {
11698         /* masks could be asymmetric, so check both ways */
11699         return a == b || (a->cloneable & (1 << b->type) &&
11700                           b->cloneable & (1 << a->type));
11701 }
11702
11703 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11704                                          struct intel_crtc *crtc,
11705                                          struct intel_encoder *encoder)
11706 {
11707         struct intel_encoder *source_encoder;
11708         struct drm_connector *connector;
11709         struct drm_connector_state *connector_state;
11710         int i;
11711
11712         for_each_new_connector_in_state(state, connector, connector_state, i) {
11713                 if (connector_state->crtc != &crtc->base)
11714                         continue;
11715
11716                 source_encoder =
11717                         to_intel_encoder(connector_state->best_encoder);
11718                 if (!encoders_cloneable(encoder, source_encoder))
11719                         return false;
11720         }
11721
11722         return true;
11723 }
11724
11725 static int icl_add_linked_planes(struct intel_atomic_state *state)
11726 {
11727         struct intel_plane *plane, *linked;
11728         struct intel_plane_state *plane_state, *linked_plane_state;
11729         int i;
11730
11731         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11732                 linked = plane_state->planar_linked_plane;
11733
11734                 if (!linked)
11735                         continue;
11736
11737                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11738                 if (IS_ERR(linked_plane_state))
11739                         return PTR_ERR(linked_plane_state);
11740
11741                 WARN_ON(linked_plane_state->planar_linked_plane != plane);
11742                 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
11743         }
11744
11745         return 0;
11746 }
11747
11748 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11749 {
11750         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11751         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11752         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11753         struct intel_plane *plane, *linked;
11754         struct intel_plane_state *plane_state;
11755         int i;
11756
11757         if (INTEL_GEN(dev_priv) < 11)
11758                 return 0;
11759
11760         /*
11761          * Destroy all old plane links and make the slave plane invisible
11762          * in the crtc_state->active_planes mask.
11763          */
11764         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11765                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
11766                         continue;
11767
11768                 plane_state->planar_linked_plane = NULL;
11769                 if (plane_state->planar_slave && !plane_state->base.visible) {
11770                         crtc_state->active_planes &= ~BIT(plane->id);
11771                         crtc_state->update_planes |= BIT(plane->id);
11772                 }
11773
11774                 plane_state->planar_slave = false;
11775         }
11776
11777         if (!crtc_state->nv12_planes)
11778                 return 0;
11779
11780         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11781                 struct intel_plane_state *linked_state = NULL;
11782
11783                 if (plane->pipe != crtc->pipe ||
11784                     !(crtc_state->nv12_planes & BIT(plane->id)))
11785                         continue;
11786
11787                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11788                         if (!icl_is_nv12_y_plane(linked->id))
11789                                 continue;
11790
11791                         if (crtc_state->active_planes & BIT(linked->id))
11792                                 continue;
11793
11794                         linked_state = intel_atomic_get_plane_state(state, linked);
11795                         if (IS_ERR(linked_state))
11796                                 return PTR_ERR(linked_state);
11797
11798                         break;
11799                 }
11800
11801                 if (!linked_state) {
11802                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11803                                       hweight8(crtc_state->nv12_planes));
11804
11805                         return -EINVAL;
11806                 }
11807
11808                 plane_state->planar_linked_plane = linked;
11809
11810                 linked_state->planar_slave = true;
11811                 linked_state->planar_linked_plane = plane;
11812                 crtc_state->active_planes |= BIT(linked->id);
11813                 crtc_state->update_planes |= BIT(linked->id);
11814                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11815         }
11816
11817         return 0;
11818 }
11819
11820 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11821 {
11822         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
11823         struct intel_atomic_state *state =
11824                 to_intel_atomic_state(new_crtc_state->base.state);
11825         const struct intel_crtc_state *old_crtc_state =
11826                 intel_atomic_get_old_crtc_state(state, crtc);
11827
11828         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11829 }
11830
11831 static int intel_crtc_atomic_check(struct drm_crtc *_crtc,
11832                                    struct drm_crtc_state *_crtc_state)
11833 {
11834         struct intel_crtc *crtc = to_intel_crtc(_crtc);
11835         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11836         struct intel_crtc_state *crtc_state =
11837                 to_intel_crtc_state(_crtc_state);
11838         int ret;
11839         bool mode_changed = needs_modeset(crtc_state);
11840
11841         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11842             mode_changed && !crtc_state->base.active)
11843                 crtc_state->update_wm_post = true;
11844
11845         if (mode_changed && crtc_state->base.enable &&
11846             dev_priv->display.crtc_compute_clock &&
11847             !WARN_ON(crtc_state->shared_dpll)) {
11848                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
11849                 if (ret)
11850                         return ret;
11851         }
11852
11853         /*
11854          * May need to update pipe gamma enable bits
11855          * when C8 planes are getting enabled/disabled.
11856          */
11857         if (c8_planes_changed(crtc_state))
11858                 crtc_state->base.color_mgmt_changed = true;
11859
11860         if (mode_changed || crtc_state->update_pipe ||
11861             crtc_state->base.color_mgmt_changed) {
11862                 ret = intel_color_check(crtc_state);
11863                 if (ret)
11864                         return ret;
11865         }
11866
11867         ret = 0;
11868         if (dev_priv->display.compute_pipe_wm) {
11869                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
11870                 if (ret) {
11871                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11872                         return ret;
11873                 }
11874         }
11875
11876         if (dev_priv->display.compute_intermediate_wm) {
11877                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11878                         return 0;
11879
11880                 /*
11881                  * Calculate 'intermediate' watermarks that satisfy both the
11882                  * old state and the new state.  We can program these
11883                  * immediately.
11884                  */
11885                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
11886                 if (ret) {
11887                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11888                         return ret;
11889                 }
11890         }
11891
11892         if (INTEL_GEN(dev_priv) >= 9) {
11893                 if (mode_changed || crtc_state->update_pipe)
11894                         ret = skl_update_scaler_crtc(crtc_state);
11895
11896                 if (!ret)
11897                         ret = icl_check_nv12_planes(crtc_state);
11898                 if (!ret)
11899                         ret = skl_check_pipe_max_pixel_rate(crtc, crtc_state);
11900                 if (!ret)
11901                         ret = intel_atomic_setup_scalers(dev_priv, crtc,
11902                                                          crtc_state);
11903         }
11904
11905         if (HAS_IPS(dev_priv))
11906                 crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
11907
11908         return ret;
11909 }
11910
11911 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11912         .atomic_check = intel_crtc_atomic_check,
11913 };
11914
11915 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11916 {
11917         struct intel_connector *connector;
11918         struct drm_connector_list_iter conn_iter;
11919
11920         drm_connector_list_iter_begin(dev, &conn_iter);
11921         for_each_intel_connector_iter(connector, &conn_iter) {
11922                 if (connector->base.state->crtc)
11923                         drm_connector_put(&connector->base);
11924
11925                 if (connector->base.encoder) {
11926                         connector->base.state->best_encoder =
11927                                 connector->base.encoder;
11928                         connector->base.state->crtc =
11929                                 connector->base.encoder->crtc;
11930
11931                         drm_connector_get(&connector->base);
11932                 } else {
11933                         connector->base.state->best_encoder = NULL;
11934                         connector->base.state->crtc = NULL;
11935                 }
11936         }
11937         drm_connector_list_iter_end(&conn_iter);
11938 }
11939
11940 static int
11941 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11942                       struct intel_crtc_state *pipe_config)
11943 {
11944         struct drm_connector *connector = conn_state->connector;
11945         const struct drm_display_info *info = &connector->display_info;
11946         int bpp;
11947
11948         switch (conn_state->max_bpc) {
11949         case 6 ... 7:
11950                 bpp = 6 * 3;
11951                 break;
11952         case 8 ... 9:
11953                 bpp = 8 * 3;
11954                 break;
11955         case 10 ... 11:
11956                 bpp = 10 * 3;
11957                 break;
11958         case 12:
11959                 bpp = 12 * 3;
11960                 break;
11961         default:
11962                 return -EINVAL;
11963         }
11964
11965         if (bpp < pipe_config->pipe_bpp) {
11966                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11967                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11968                               connector->base.id, connector->name,
11969                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11970                               pipe_config->pipe_bpp);
11971
11972                 pipe_config->pipe_bpp = bpp;
11973         }
11974
11975         return 0;
11976 }
11977
11978 static int
11979 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11980                           struct intel_crtc_state *pipe_config)
11981 {
11982         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11983         struct drm_atomic_state *state = pipe_config->base.state;
11984         struct drm_connector *connector;
11985         struct drm_connector_state *connector_state;
11986         int bpp, i;
11987
11988         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11989             IS_CHERRYVIEW(dev_priv)))
11990                 bpp = 10*3;
11991         else if (INTEL_GEN(dev_priv) >= 5)
11992                 bpp = 12*3;
11993         else
11994                 bpp = 8*3;
11995
11996         pipe_config->pipe_bpp = bpp;
11997
11998         /* Clamp display bpp to connector max bpp */
11999         for_each_new_connector_in_state(state, connector, connector_state, i) {
12000                 int ret;
12001
12002                 if (connector_state->crtc != &crtc->base)
12003                         continue;
12004
12005                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12006                 if (ret)
12007                         return ret;
12008         }
12009
12010         return 0;
12011 }
12012
12013 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12014 {
12015         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12016                       "type: 0x%x flags: 0x%x\n",
12017                       mode->crtc_clock,
12018                       mode->crtc_hdisplay, mode->crtc_hsync_start,
12019                       mode->crtc_hsync_end, mode->crtc_htotal,
12020                       mode->crtc_vdisplay, mode->crtc_vsync_start,
12021                       mode->crtc_vsync_end, mode->crtc_vtotal,
12022                       mode->type, mode->flags);
12023 }
12024
12025 static inline void
12026 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12027                       const char *id, unsigned int lane_count,
12028                       const struct intel_link_m_n *m_n)
12029 {
12030         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12031                       id, lane_count,
12032                       m_n->gmch_m, m_n->gmch_n,
12033                       m_n->link_m, m_n->link_n, m_n->tu);
12034 }
12035
12036 static void
12037 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12038                      const union hdmi_infoframe *frame)
12039 {
12040         if ((drm_debug & DRM_UT_KMS) == 0)
12041                 return;
12042
12043         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12044 }
12045
12046 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12047
12048 static const char * const output_type_str[] = {
12049         OUTPUT_TYPE(UNUSED),
12050         OUTPUT_TYPE(ANALOG),
12051         OUTPUT_TYPE(DVO),
12052         OUTPUT_TYPE(SDVO),
12053         OUTPUT_TYPE(LVDS),
12054         OUTPUT_TYPE(TVOUT),
12055         OUTPUT_TYPE(HDMI),
12056         OUTPUT_TYPE(DP),
12057         OUTPUT_TYPE(EDP),
12058         OUTPUT_TYPE(DSI),
12059         OUTPUT_TYPE(DDI),
12060         OUTPUT_TYPE(DP_MST),
12061 };
12062
12063 #undef OUTPUT_TYPE
12064
12065 static void snprintf_output_types(char *buf, size_t len,
12066                                   unsigned int output_types)
12067 {
12068         char *str = buf;
12069         int i;
12070
12071         str[0] = '\0';
12072
12073         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12074                 int r;
12075
12076                 if ((output_types & BIT(i)) == 0)
12077                         continue;
12078
12079                 r = snprintf(str, len, "%s%s",
12080                              str != buf ? "," : "", output_type_str[i]);
12081                 if (r >= len)
12082                         break;
12083                 str += r;
12084                 len -= r;
12085
12086                 output_types &= ~BIT(i);
12087         }
12088
12089         WARN_ON_ONCE(output_types != 0);
12090 }
12091
12092 static const char * const output_format_str[] = {
12093         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12094         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12095         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12096         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12097 };
12098
12099 static const char *output_formats(enum intel_output_format format)
12100 {
12101         if (format >= ARRAY_SIZE(output_format_str))
12102                 format = INTEL_OUTPUT_FORMAT_INVALID;
12103         return output_format_str[format];
12104 }
12105
12106 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12107 {
12108         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
12109         const struct drm_framebuffer *fb = plane_state->base.fb;
12110         struct drm_format_name_buf format_name;
12111
12112         if (!fb) {
12113                 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12114                               plane->base.base.id, plane->base.name,
12115                               yesno(plane_state->base.visible));
12116                 return;
12117         }
12118
12119         DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12120                       plane->base.base.id, plane->base.name,
12121                       fb->base.id, fb->width, fb->height,
12122                       drm_get_format_name(fb->format->format, &format_name),
12123                       yesno(plane_state->base.visible));
12124         DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12125                       plane_state->base.rotation, plane_state->scaler_id);
12126         if (plane_state->base.visible)
12127                 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12128                               DRM_RECT_FP_ARG(&plane_state->base.src),
12129                               DRM_RECT_ARG(&plane_state->base.dst));
12130 }
12131
12132 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12133                                    struct intel_atomic_state *state,
12134                                    const char *context)
12135 {
12136         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
12137         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12138         const struct intel_plane_state *plane_state;
12139         struct intel_plane *plane;
12140         char buf[64];
12141         int i;
12142
12143         DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12144                       crtc->base.base.id, crtc->base.name,
12145                       yesno(pipe_config->base.enable), context);
12146
12147         if (!pipe_config->base.enable)
12148                 goto dump_planes;
12149
12150         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12151         DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12152                       yesno(pipe_config->base.active),
12153                       buf, pipe_config->output_types,
12154                       output_formats(pipe_config->output_format));
12155
12156         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12157                       transcoder_name(pipe_config->cpu_transcoder),
12158                       pipe_config->pipe_bpp, pipe_config->dither);
12159
12160         if (pipe_config->has_pch_encoder)
12161                 intel_dump_m_n_config(pipe_config, "fdi",
12162                                       pipe_config->fdi_lanes,
12163                                       &pipe_config->fdi_m_n);
12164
12165         if (intel_crtc_has_dp_encoder(pipe_config)) {
12166                 intel_dump_m_n_config(pipe_config, "dp m_n",
12167                                 pipe_config->lane_count, &pipe_config->dp_m_n);
12168                 if (pipe_config->has_drrs)
12169                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
12170                                               pipe_config->lane_count,
12171                                               &pipe_config->dp_m2_n2);
12172         }
12173
12174         DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12175                       pipe_config->has_audio, pipe_config->has_infoframe,
12176                       pipe_config->infoframes.enable);
12177
12178         if (pipe_config->infoframes.enable &
12179             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12180                 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12181         if (pipe_config->infoframes.enable &
12182             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12183                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12184         if (pipe_config->infoframes.enable &
12185             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12186                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12187         if (pipe_config->infoframes.enable &
12188             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12189                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12190
12191         DRM_DEBUG_KMS("requested mode:\n");
12192         drm_mode_debug_printmodeline(&pipe_config->base.mode);
12193         DRM_DEBUG_KMS("adjusted mode:\n");
12194         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12195         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12196         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12197                       pipe_config->port_clock,
12198                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12199                       pipe_config->pixel_rate);
12200
12201         if (INTEL_GEN(dev_priv) >= 9)
12202                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12203                               crtc->num_scalers,
12204                               pipe_config->scaler_state.scaler_users,
12205                               pipe_config->scaler_state.scaler_id);
12206
12207         if (HAS_GMCH(dev_priv))
12208                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12209                               pipe_config->gmch_pfit.control,
12210                               pipe_config->gmch_pfit.pgm_ratios,
12211                               pipe_config->gmch_pfit.lvds_border_bits);
12212         else
12213                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12214                               pipe_config->pch_pfit.pos,
12215                               pipe_config->pch_pfit.size,
12216                               enableddisabled(pipe_config->pch_pfit.enabled),
12217                               yesno(pipe_config->pch_pfit.force_thru));
12218
12219         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12220                       pipe_config->ips_enabled, pipe_config->double_wide);
12221
12222         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12223
12224         if (IS_CHERRYVIEW(dev_priv))
12225                 DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12226                               pipe_config->cgm_mode, pipe_config->gamma_mode,
12227                               pipe_config->gamma_enable, pipe_config->csc_enable);
12228         else
12229                 DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12230                               pipe_config->csc_mode, pipe_config->gamma_mode,
12231                               pipe_config->gamma_enable, pipe_config->csc_enable);
12232
12233 dump_planes:
12234         if (!state)
12235                 return;
12236
12237         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12238                 if (plane->pipe == crtc->pipe)
12239                         intel_dump_plane_state(plane_state);
12240         }
12241 }
12242
12243 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12244 {
12245         struct drm_device *dev = state->base.dev;
12246         struct drm_connector *connector;
12247         struct drm_connector_list_iter conn_iter;
12248         unsigned int used_ports = 0;
12249         unsigned int used_mst_ports = 0;
12250         bool ret = true;
12251
12252         /*
12253          * Walk the connector list instead of the encoder
12254          * list to detect the problem on ddi platforms
12255          * where there's just one encoder per digital port.
12256          */
12257         drm_connector_list_iter_begin(dev, &conn_iter);
12258         drm_for_each_connector_iter(connector, &conn_iter) {
12259                 struct drm_connector_state *connector_state;
12260                 struct intel_encoder *encoder;
12261
12262                 connector_state =
12263                         drm_atomic_get_new_connector_state(&state->base,
12264                                                            connector);
12265                 if (!connector_state)
12266                         connector_state = connector->state;
12267
12268                 if (!connector_state->best_encoder)
12269                         continue;
12270
12271                 encoder = to_intel_encoder(connector_state->best_encoder);
12272
12273                 WARN_ON(!connector_state->crtc);
12274
12275                 switch (encoder->type) {
12276                         unsigned int port_mask;
12277                 case INTEL_OUTPUT_DDI:
12278                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
12279                                 break;
12280                         /* else, fall through */
12281                 case INTEL_OUTPUT_DP:
12282                 case INTEL_OUTPUT_HDMI:
12283                 case INTEL_OUTPUT_EDP:
12284                         port_mask = 1 << encoder->port;
12285
12286                         /* the same port mustn't appear more than once */
12287                         if (used_ports & port_mask)
12288                                 ret = false;
12289
12290                         used_ports |= port_mask;
12291                         break;
12292                 case INTEL_OUTPUT_DP_MST:
12293                         used_mst_ports |=
12294                                 1 << encoder->port;
12295                         break;
12296                 default:
12297                         break;
12298                 }
12299         }
12300         drm_connector_list_iter_end(&conn_iter);
12301
12302         /* can't mix MST and SST/HDMI on the same port */
12303         if (used_ports & used_mst_ports)
12304                 return false;
12305
12306         return ret;
12307 }
12308
12309 static int
12310 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12311 {
12312         struct drm_i915_private *dev_priv =
12313                 to_i915(crtc_state->base.crtc->dev);
12314         struct intel_crtc_state *saved_state;
12315
12316         saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12317         if (!saved_state)
12318                 return -ENOMEM;
12319
12320         /* FIXME: before the switch to atomic started, a new pipe_config was
12321          * kzalloc'd. Code that depends on any field being zero should be
12322          * fixed, so that the crtc_state can be safely duplicated. For now,
12323          * only fields that are know to not cause problems are preserved. */
12324
12325         saved_state->scaler_state = crtc_state->scaler_state;
12326         saved_state->shared_dpll = crtc_state->shared_dpll;
12327         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12328         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12329                sizeof(saved_state->icl_port_dplls));
12330         saved_state->crc_enabled = crtc_state->crc_enabled;
12331         if (IS_G4X(dev_priv) ||
12332             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12333                 saved_state->wm = crtc_state->wm;
12334
12335         /* Keep base drm_crtc_state intact, only clear our extended struct */
12336         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
12337         memcpy(&crtc_state->base + 1, &saved_state->base + 1,
12338                sizeof(*crtc_state) - sizeof(crtc_state->base));
12339
12340         kfree(saved_state);
12341         return 0;
12342 }
12343
12344 static int
12345 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12346 {
12347         struct drm_crtc *crtc = pipe_config->base.crtc;
12348         struct drm_atomic_state *state = pipe_config->base.state;
12349         struct intel_encoder *encoder;
12350         struct drm_connector *connector;
12351         struct drm_connector_state *connector_state;
12352         int base_bpp, ret;
12353         int i;
12354         bool retry = true;
12355
12356         ret = clear_intel_crtc_state(pipe_config);
12357         if (ret)
12358                 return ret;
12359
12360         pipe_config->cpu_transcoder =
12361                 (enum transcoder) to_intel_crtc(crtc)->pipe;
12362
12363         /*
12364          * Sanitize sync polarity flags based on requested ones. If neither
12365          * positive or negative polarity is requested, treat this as meaning
12366          * negative polarity.
12367          */
12368         if (!(pipe_config->base.adjusted_mode.flags &
12369               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12370                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12371
12372         if (!(pipe_config->base.adjusted_mode.flags &
12373               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12374                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12375
12376         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12377                                         pipe_config);
12378         if (ret)
12379                 return ret;
12380
12381         base_bpp = pipe_config->pipe_bpp;
12382
12383         /*
12384          * Determine the real pipe dimensions. Note that stereo modes can
12385          * increase the actual pipe size due to the frame doubling and
12386          * insertion of additional space for blanks between the frame. This
12387          * is stored in the crtc timings. We use the requested mode to do this
12388          * computation to clearly distinguish it from the adjusted mode, which
12389          * can be changed by the connectors in the below retry loop.
12390          */
12391         drm_mode_get_hv_timing(&pipe_config->base.mode,
12392                                &pipe_config->pipe_src_w,
12393                                &pipe_config->pipe_src_h);
12394
12395         for_each_new_connector_in_state(state, connector, connector_state, i) {
12396                 if (connector_state->crtc != crtc)
12397                         continue;
12398
12399                 encoder = to_intel_encoder(connector_state->best_encoder);
12400
12401                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12402                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12403                         return -EINVAL;
12404                 }
12405
12406                 /*
12407                  * Determine output_types before calling the .compute_config()
12408                  * hooks so that the hooks can use this information safely.
12409                  */
12410                 if (encoder->compute_output_type)
12411                         pipe_config->output_types |=
12412                                 BIT(encoder->compute_output_type(encoder, pipe_config,
12413                                                                  connector_state));
12414                 else
12415                         pipe_config->output_types |= BIT(encoder->type);
12416         }
12417
12418 encoder_retry:
12419         /* Ensure the port clock defaults are reset when retrying. */
12420         pipe_config->port_clock = 0;
12421         pipe_config->pixel_multiplier = 1;
12422
12423         /* Fill in default crtc timings, allow encoders to overwrite them. */
12424         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12425                               CRTC_STEREO_DOUBLE);
12426
12427         /* Pass our mode to the connectors and the CRTC to give them a chance to
12428          * adjust it according to limitations or connector properties, and also
12429          * a chance to reject the mode entirely.
12430          */
12431         for_each_new_connector_in_state(state, connector, connector_state, i) {
12432                 if (connector_state->crtc != crtc)
12433                         continue;
12434
12435                 encoder = to_intel_encoder(connector_state->best_encoder);
12436                 ret = encoder->compute_config(encoder, pipe_config,
12437                                               connector_state);
12438                 if (ret < 0) {
12439                         if (ret != -EDEADLK)
12440                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12441                                               ret);
12442                         return ret;
12443                 }
12444         }
12445
12446         /* Set default port clock if not overwritten by the encoder. Needs to be
12447          * done afterwards in case the encoder adjusts the mode. */
12448         if (!pipe_config->port_clock)
12449                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12450                         * pipe_config->pixel_multiplier;
12451
12452         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12453         if (ret == -EDEADLK)
12454                 return ret;
12455         if (ret < 0) {
12456                 DRM_DEBUG_KMS("CRTC fixup failed\n");
12457                 return ret;
12458         }
12459
12460         if (ret == RETRY) {
12461                 if (WARN(!retry, "loop in pipe configuration computation\n"))
12462                         return -EINVAL;
12463
12464                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12465                 retry = false;
12466                 goto encoder_retry;
12467         }
12468
12469         /* Dithering seems to not pass-through bits correctly when it should, so
12470          * only enable it on 6bpc panels and when its not a compliance
12471          * test requesting 6bpc video pattern.
12472          */
12473         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12474                 !pipe_config->dither_force_disable;
12475         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12476                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12477
12478         return 0;
12479 }
12480
12481 bool intel_fuzzy_clock_check(int clock1, int clock2)
12482 {
12483         int diff;
12484
12485         if (clock1 == clock2)
12486                 return true;
12487
12488         if (!clock1 || !clock2)
12489                 return false;
12490
12491         diff = abs(clock1 - clock2);
12492
12493         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12494                 return true;
12495
12496         return false;
12497 }
12498
12499 static bool
12500 intel_compare_m_n(unsigned int m, unsigned int n,
12501                   unsigned int m2, unsigned int n2,
12502                   bool exact)
12503 {
12504         if (m == m2 && n == n2)
12505                 return true;
12506
12507         if (exact || !m || !n || !m2 || !n2)
12508                 return false;
12509
12510         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12511
12512         if (n > n2) {
12513                 while (n > n2) {
12514                         m2 <<= 1;
12515                         n2 <<= 1;
12516                 }
12517         } else if (n < n2) {
12518                 while (n < n2) {
12519                         m <<= 1;
12520                         n <<= 1;
12521                 }
12522         }
12523
12524         if (n != n2)
12525                 return false;
12526
12527         return intel_fuzzy_clock_check(m, m2);
12528 }
12529
12530 static bool
12531 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12532                        const struct intel_link_m_n *m2_n2,
12533                        bool exact)
12534 {
12535         return m_n->tu == m2_n2->tu &&
12536                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12537                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12538                 intel_compare_m_n(m_n->link_m, m_n->link_n,
12539                                   m2_n2->link_m, m2_n2->link_n, exact);
12540 }
12541
12542 static bool
12543 intel_compare_infoframe(const union hdmi_infoframe *a,
12544                         const union hdmi_infoframe *b)
12545 {
12546         return memcmp(a, b, sizeof(*a)) == 0;
12547 }
12548
12549 static void
12550 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12551                                bool fastset, const char *name,
12552                                const union hdmi_infoframe *a,
12553                                const union hdmi_infoframe *b)
12554 {
12555         if (fastset) {
12556                 if ((drm_debug & DRM_UT_KMS) == 0)
12557                         return;
12558
12559                 drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name);
12560                 drm_dbg(DRM_UT_KMS, "expected:");
12561                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12562                 drm_dbg(DRM_UT_KMS, "found");
12563                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12564         } else {
12565                 drm_err("mismatch in %s infoframe", name);
12566                 drm_err("expected:");
12567                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12568                 drm_err("found");
12569                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12570         }
12571 }
12572
12573 static void __printf(3, 4)
12574 pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
12575 {
12576         struct va_format vaf;
12577         va_list args;
12578
12579         va_start(args, format);
12580         vaf.fmt = format;
12581         vaf.va = &args;
12582
12583         if (fastset)
12584                 drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf);
12585         else
12586                 drm_err("mismatch in %s %pV", name, &vaf);
12587
12588         va_end(args);
12589 }
12590
12591 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12592 {
12593         if (i915_modparams.fastboot != -1)
12594                 return i915_modparams.fastboot;
12595
12596         /* Enable fastboot by default on Skylake and newer */
12597         if (INTEL_GEN(dev_priv) >= 9)
12598                 return true;
12599
12600         /* Enable fastboot by default on VLV and CHV */
12601         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12602                 return true;
12603
12604         /* Disabled by default on all others */
12605         return false;
12606 }
12607
12608 static bool
12609 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
12610                           const struct intel_crtc_state *pipe_config,
12611                           bool fastset)
12612 {
12613         struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
12614         bool ret = true;
12615         u32 bp_gamma = 0;
12616         bool fixup_inherited = fastset &&
12617                 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12618                 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12619
12620         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12621                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12622                 ret = false;
12623         }
12624
12625 #define PIPE_CONF_CHECK_X(name) do { \
12626         if (current_config->name != pipe_config->name) { \
12627                 pipe_config_mismatch(fastset, __stringify(name), \
12628                                      "(expected 0x%08x, found 0x%08x)\n", \
12629                                      current_config->name, \
12630                                      pipe_config->name); \
12631                 ret = false; \
12632         } \
12633 } while (0)
12634
12635 #define PIPE_CONF_CHECK_I(name) do { \
12636         if (current_config->name != pipe_config->name) { \
12637                 pipe_config_mismatch(fastset, __stringify(name), \
12638                                      "(expected %i, found %i)\n", \
12639                                      current_config->name, \
12640                                      pipe_config->name); \
12641                 ret = false; \
12642         } \
12643 } while (0)
12644
12645 #define PIPE_CONF_CHECK_BOOL(name) do { \
12646         if (current_config->name != pipe_config->name) { \
12647                 pipe_config_mismatch(fastset, __stringify(name), \
12648                                      "(expected %s, found %s)\n", \
12649                                      yesno(current_config->name), \
12650                                      yesno(pipe_config->name)); \
12651                 ret = false; \
12652         } \
12653 } while (0)
12654
12655 /*
12656  * Checks state where we only read out the enabling, but not the entire
12657  * state itself (like full infoframes or ELD for audio). These states
12658  * require a full modeset on bootup to fix up.
12659  */
12660 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12661         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12662                 PIPE_CONF_CHECK_BOOL(name); \
12663         } else { \
12664                 pipe_config_mismatch(fastset, __stringify(name), \
12665                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12666                                      yesno(current_config->name), \
12667                                      yesno(pipe_config->name)); \
12668                 ret = false; \
12669         } \
12670 } while (0)
12671
12672 #define PIPE_CONF_CHECK_P(name) do { \
12673         if (current_config->name != pipe_config->name) { \
12674                 pipe_config_mismatch(fastset, __stringify(name), \
12675                                      "(expected %p, found %p)\n", \
12676                                      current_config->name, \
12677                                      pipe_config->name); \
12678                 ret = false; \
12679         } \
12680 } while (0)
12681
12682 #define PIPE_CONF_CHECK_M_N(name) do { \
12683         if (!intel_compare_link_m_n(&current_config->name, \
12684                                     &pipe_config->name,\
12685                                     !fastset)) { \
12686                 pipe_config_mismatch(fastset, __stringify(name), \
12687                                      "(expected tu %i gmch %i/%i link %i/%i, " \
12688                                      "found tu %i, gmch %i/%i link %i/%i)\n", \
12689                                      current_config->name.tu, \
12690                                      current_config->name.gmch_m, \
12691                                      current_config->name.gmch_n, \
12692                                      current_config->name.link_m, \
12693                                      current_config->name.link_n, \
12694                                      pipe_config->name.tu, \
12695                                      pipe_config->name.gmch_m, \
12696                                      pipe_config->name.gmch_n, \
12697                                      pipe_config->name.link_m, \
12698                                      pipe_config->name.link_n); \
12699                 ret = false; \
12700         } \
12701 } while (0)
12702
12703 /* This is required for BDW+ where there is only one set of registers for
12704  * switching between high and low RR.
12705  * This macro can be used whenever a comparison has to be made between one
12706  * hw state and multiple sw state variables.
12707  */
12708 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12709         if (!intel_compare_link_m_n(&current_config->name, \
12710                                     &pipe_config->name, !fastset) && \
12711             !intel_compare_link_m_n(&current_config->alt_name, \
12712                                     &pipe_config->name, !fastset)) { \
12713                 pipe_config_mismatch(fastset, __stringify(name), \
12714                                      "(expected tu %i gmch %i/%i link %i/%i, " \
12715                                      "or tu %i gmch %i/%i link %i/%i, " \
12716                                      "found tu %i, gmch %i/%i link %i/%i)\n", \
12717                                      current_config->name.tu, \
12718                                      current_config->name.gmch_m, \
12719                                      current_config->name.gmch_n, \
12720                                      current_config->name.link_m, \
12721                                      current_config->name.link_n, \
12722                                      current_config->alt_name.tu, \
12723                                      current_config->alt_name.gmch_m, \
12724                                      current_config->alt_name.gmch_n, \
12725                                      current_config->alt_name.link_m, \
12726                                      current_config->alt_name.link_n, \
12727                                      pipe_config->name.tu, \
12728                                      pipe_config->name.gmch_m, \
12729                                      pipe_config->name.gmch_n, \
12730                                      pipe_config->name.link_m, \
12731                                      pipe_config->name.link_n); \
12732                 ret = false; \
12733         } \
12734 } while (0)
12735
12736 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12737         if ((current_config->name ^ pipe_config->name) & (mask)) { \
12738                 pipe_config_mismatch(fastset, __stringify(name), \
12739                                      "(%x) (expected %i, found %i)\n", \
12740                                      (mask), \
12741                                      current_config->name & (mask), \
12742                                      pipe_config->name & (mask)); \
12743                 ret = false; \
12744         } \
12745 } while (0)
12746
12747 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12748         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12749                 pipe_config_mismatch(fastset, __stringify(name), \
12750                                      "(expected %i, found %i)\n", \
12751                                      current_config->name, \
12752                                      pipe_config->name); \
12753                 ret = false; \
12754         } \
12755 } while (0)
12756
12757 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12758         if (!intel_compare_infoframe(&current_config->infoframes.name, \
12759                                      &pipe_config->infoframes.name)) { \
12760                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
12761                                                &current_config->infoframes.name, \
12762                                                &pipe_config->infoframes.name); \
12763                 ret = false; \
12764         } \
12765 } while (0)
12766
12767 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
12768         if (current_config->name1 != pipe_config->name1) { \
12769                 pipe_config_mismatch(fastset, __stringify(name1), \
12770                                 "(expected %i, found %i, won't compare lut values)\n", \
12771                                 current_config->name1, \
12772                                 pipe_config->name1); \
12773                 ret = false;\
12774         } else { \
12775                 if (!intel_color_lut_equal(current_config->name2, \
12776                                         pipe_config->name2, pipe_config->name1, \
12777                                         bit_precision)) { \
12778                         pipe_config_mismatch(fastset, __stringify(name2), \
12779                                         "hw_state doesn't match sw_state\n"); \
12780                         ret = false; \
12781                 } \
12782         } \
12783 } while (0)
12784
12785 #define PIPE_CONF_QUIRK(quirk) \
12786         ((current_config->quirks | pipe_config->quirks) & (quirk))
12787
12788         PIPE_CONF_CHECK_I(cpu_transcoder);
12789
12790         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12791         PIPE_CONF_CHECK_I(fdi_lanes);
12792         PIPE_CONF_CHECK_M_N(fdi_m_n);
12793
12794         PIPE_CONF_CHECK_I(lane_count);
12795         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12796
12797         if (INTEL_GEN(dev_priv) < 8) {
12798                 PIPE_CONF_CHECK_M_N(dp_m_n);
12799
12800                 if (current_config->has_drrs)
12801                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
12802         } else
12803                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12804
12805         PIPE_CONF_CHECK_X(output_types);
12806
12807         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12808         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12809         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12810         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12811         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12812         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12813
12814         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12815         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12816         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12817         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12818         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12819         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12820
12821         PIPE_CONF_CHECK_I(pixel_multiplier);
12822         PIPE_CONF_CHECK_I(output_format);
12823         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12824         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12825             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12826                 PIPE_CONF_CHECK_BOOL(limited_color_range);
12827
12828         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12829         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12830         PIPE_CONF_CHECK_BOOL(has_infoframe);
12831         PIPE_CONF_CHECK_BOOL(fec_enable);
12832
12833         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12834
12835         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12836                               DRM_MODE_FLAG_INTERLACE);
12837
12838         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12839                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12840                                       DRM_MODE_FLAG_PHSYNC);
12841                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12842                                       DRM_MODE_FLAG_NHSYNC);
12843                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12844                                       DRM_MODE_FLAG_PVSYNC);
12845                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12846                                       DRM_MODE_FLAG_NVSYNC);
12847         }
12848
12849         PIPE_CONF_CHECK_X(gmch_pfit.control);
12850         /* pfit ratios are autocomputed by the hw on gen4+ */
12851         if (INTEL_GEN(dev_priv) < 4)
12852                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12853         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12854
12855         /*
12856          * Changing the EDP transcoder input mux
12857          * (A_ONOFF vs. A_ON) requires a full modeset.
12858          */
12859         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
12860
12861         if (!fastset) {
12862                 PIPE_CONF_CHECK_I(pipe_src_w);
12863                 PIPE_CONF_CHECK_I(pipe_src_h);
12864
12865                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12866                 if (current_config->pch_pfit.enabled) {
12867                         PIPE_CONF_CHECK_X(pch_pfit.pos);
12868                         PIPE_CONF_CHECK_X(pch_pfit.size);
12869                 }
12870
12871                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12872                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12873
12874                 PIPE_CONF_CHECK_X(gamma_mode);
12875                 if (IS_CHERRYVIEW(dev_priv))
12876                         PIPE_CONF_CHECK_X(cgm_mode);
12877                 else
12878                         PIPE_CONF_CHECK_X(csc_mode);
12879                 PIPE_CONF_CHECK_BOOL(gamma_enable);
12880                 PIPE_CONF_CHECK_BOOL(csc_enable);
12881
12882                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
12883                 if (bp_gamma)
12884                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma);
12885
12886         }
12887
12888         PIPE_CONF_CHECK_BOOL(double_wide);
12889
12890         PIPE_CONF_CHECK_P(shared_dpll);
12891         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12892         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12893         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12894         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12895         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12896         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12897         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12898         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12899         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12900         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12901         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12902         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12903         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12904         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12905         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12906         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12907         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12908         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12909         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12910         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12911         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12912         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12913         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12914         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12915         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12916         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12917         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12918         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12919         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12920         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12921         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12922
12923         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12924         PIPE_CONF_CHECK_X(dsi_pll.div);
12925
12926         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12927                 PIPE_CONF_CHECK_I(pipe_bpp);
12928
12929         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12930         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12931
12932         PIPE_CONF_CHECK_I(min_voltage_level);
12933
12934         PIPE_CONF_CHECK_X(infoframes.enable);
12935         PIPE_CONF_CHECK_X(infoframes.gcp);
12936         PIPE_CONF_CHECK_INFOFRAME(avi);
12937         PIPE_CONF_CHECK_INFOFRAME(spd);
12938         PIPE_CONF_CHECK_INFOFRAME(hdmi);
12939         PIPE_CONF_CHECK_INFOFRAME(drm);
12940
12941 #undef PIPE_CONF_CHECK_X
12942 #undef PIPE_CONF_CHECK_I
12943 #undef PIPE_CONF_CHECK_BOOL
12944 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12945 #undef PIPE_CONF_CHECK_P
12946 #undef PIPE_CONF_CHECK_FLAGS
12947 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12948 #undef PIPE_CONF_CHECK_COLOR_LUT
12949 #undef PIPE_CONF_QUIRK
12950
12951         return ret;
12952 }
12953
12954 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12955                                            const struct intel_crtc_state *pipe_config)
12956 {
12957         if (pipe_config->has_pch_encoder) {
12958                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12959                                                             &pipe_config->fdi_m_n);
12960                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12961
12962                 /*
12963                  * FDI already provided one idea for the dotclock.
12964                  * Yell if the encoder disagrees.
12965                  */
12966                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12967                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12968                      fdi_dotclock, dotclock);
12969         }
12970 }
12971
12972 static void verify_wm_state(struct intel_crtc *crtc,
12973                             struct intel_crtc_state *new_crtc_state)
12974 {
12975         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12976         struct skl_hw_state {
12977                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12978                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12979                 struct skl_ddb_allocation ddb;
12980                 struct skl_pipe_wm wm;
12981         } *hw;
12982         struct skl_ddb_allocation *sw_ddb;
12983         struct skl_pipe_wm *sw_wm;
12984         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12985         const enum pipe pipe = crtc->pipe;
12986         int plane, level, max_level = ilk_wm_max_level(dev_priv);
12987
12988         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active)
12989                 return;
12990
12991         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12992         if (!hw)
12993                 return;
12994
12995         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
12996         sw_wm = &new_crtc_state->wm.skl.optimal;
12997
12998         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
12999
13000         skl_ddb_get_hw_state(dev_priv, &hw->ddb);
13001         sw_ddb = &dev_priv->wm.skl_hw.ddb;
13002
13003         if (INTEL_GEN(dev_priv) >= 11 &&
13004             hw->ddb.enabled_slices != sw_ddb->enabled_slices)
13005                 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
13006                           sw_ddb->enabled_slices,
13007                           hw->ddb.enabled_slices);
13008
13009         /* planes */
13010         for_each_universal_plane(dev_priv, pipe, plane) {
13011                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13012
13013                 hw_plane_wm = &hw->wm.planes[plane];
13014                 sw_plane_wm = &sw_wm->planes[plane];
13015
13016                 /* Watermarks */
13017                 for (level = 0; level <= max_level; level++) {
13018                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13019                                                 &sw_plane_wm->wm[level]))
13020                                 continue;
13021
13022                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13023                                   pipe_name(pipe), plane + 1, level,
13024                                   sw_plane_wm->wm[level].plane_en,
13025                                   sw_plane_wm->wm[level].plane_res_b,
13026                                   sw_plane_wm->wm[level].plane_res_l,
13027                                   hw_plane_wm->wm[level].plane_en,
13028                                   hw_plane_wm->wm[level].plane_res_b,
13029                                   hw_plane_wm->wm[level].plane_res_l);
13030                 }
13031
13032                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13033                                          &sw_plane_wm->trans_wm)) {
13034                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13035                                   pipe_name(pipe), plane + 1,
13036                                   sw_plane_wm->trans_wm.plane_en,
13037                                   sw_plane_wm->trans_wm.plane_res_b,
13038                                   sw_plane_wm->trans_wm.plane_res_l,
13039                                   hw_plane_wm->trans_wm.plane_en,
13040                                   hw_plane_wm->trans_wm.plane_res_b,
13041                                   hw_plane_wm->trans_wm.plane_res_l);
13042                 }
13043
13044                 /* DDB */
13045                 hw_ddb_entry = &hw->ddb_y[plane];
13046                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13047
13048                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13049                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13050                                   pipe_name(pipe), plane + 1,
13051                                   sw_ddb_entry->start, sw_ddb_entry->end,
13052                                   hw_ddb_entry->start, hw_ddb_entry->end);
13053                 }
13054         }
13055
13056         /*
13057          * cursor
13058          * If the cursor plane isn't active, we may not have updated it's ddb
13059          * allocation. In that case since the ddb allocation will be updated
13060          * once the plane becomes visible, we can skip this check
13061          */
13062         if (1) {
13063                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13064
13065                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13066                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13067
13068                 /* Watermarks */
13069                 for (level = 0; level <= max_level; level++) {
13070                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13071                                                 &sw_plane_wm->wm[level]))
13072                                 continue;
13073
13074                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13075                                   pipe_name(pipe), level,
13076                                   sw_plane_wm->wm[level].plane_en,
13077                                   sw_plane_wm->wm[level].plane_res_b,
13078                                   sw_plane_wm->wm[level].plane_res_l,
13079                                   hw_plane_wm->wm[level].plane_en,
13080                                   hw_plane_wm->wm[level].plane_res_b,
13081                                   hw_plane_wm->wm[level].plane_res_l);
13082                 }
13083
13084                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13085                                          &sw_plane_wm->trans_wm)) {
13086                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13087                                   pipe_name(pipe),
13088                                   sw_plane_wm->trans_wm.plane_en,
13089                                   sw_plane_wm->trans_wm.plane_res_b,
13090                                   sw_plane_wm->trans_wm.plane_res_l,
13091                                   hw_plane_wm->trans_wm.plane_en,
13092                                   hw_plane_wm->trans_wm.plane_res_b,
13093                                   hw_plane_wm->trans_wm.plane_res_l);
13094                 }
13095
13096                 /* DDB */
13097                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13098                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13099
13100                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13101                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13102                                   pipe_name(pipe),
13103                                   sw_ddb_entry->start, sw_ddb_entry->end,
13104                                   hw_ddb_entry->start, hw_ddb_entry->end);
13105                 }
13106         }
13107
13108         kfree(hw);
13109 }
13110
13111 static void
13112 verify_connector_state(struct intel_atomic_state *state,
13113                        struct intel_crtc *crtc)
13114 {
13115         struct drm_connector *connector;
13116         struct drm_connector_state *new_conn_state;
13117         int i;
13118
13119         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13120                 struct drm_encoder *encoder = connector->encoder;
13121                 struct intel_crtc_state *crtc_state = NULL;
13122
13123                 if (new_conn_state->crtc != &crtc->base)
13124                         continue;
13125
13126                 if (crtc)
13127                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13128
13129                 intel_connector_verify_state(crtc_state, new_conn_state);
13130
13131                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13132                      "connector's atomic encoder doesn't match legacy encoder\n");
13133         }
13134 }
13135
13136 static void
13137 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13138 {
13139         struct intel_encoder *encoder;
13140         struct drm_connector *connector;
13141         struct drm_connector_state *old_conn_state, *new_conn_state;
13142         int i;
13143
13144         for_each_intel_encoder(&dev_priv->drm, encoder) {
13145                 bool enabled = false, found = false;
13146                 enum pipe pipe;
13147
13148                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13149                               encoder->base.base.id,
13150                               encoder->base.name);
13151
13152                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13153                                                    new_conn_state, i) {
13154                         if (old_conn_state->best_encoder == &encoder->base)
13155                                 found = true;
13156
13157                         if (new_conn_state->best_encoder != &encoder->base)
13158                                 continue;
13159                         found = enabled = true;
13160
13161                         I915_STATE_WARN(new_conn_state->crtc !=
13162                                         encoder->base.crtc,
13163                              "connector's crtc doesn't match encoder crtc\n");
13164                 }
13165
13166                 if (!found)
13167                         continue;
13168
13169                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
13170                      "encoder's enabled state mismatch "
13171                      "(expected %i, found %i)\n",
13172                      !!encoder->base.crtc, enabled);
13173
13174                 if (!encoder->base.crtc) {
13175                         bool active;
13176
13177                         active = encoder->get_hw_state(encoder, &pipe);
13178                         I915_STATE_WARN(active,
13179                              "encoder detached but still enabled on pipe %c.\n",
13180                              pipe_name(pipe));
13181                 }
13182         }
13183 }
13184
13185 static void
13186 verify_crtc_state(struct intel_crtc *crtc,
13187                   struct intel_crtc_state *old_crtc_state,
13188                   struct intel_crtc_state *new_crtc_state)
13189 {
13190         struct drm_device *dev = crtc->base.dev;
13191         struct drm_i915_private *dev_priv = to_i915(dev);
13192         struct intel_encoder *encoder;
13193         struct intel_crtc_state *pipe_config;
13194         struct drm_atomic_state *state;
13195         bool active;
13196
13197         state = old_crtc_state->base.state;
13198         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base);
13199         pipe_config = old_crtc_state;
13200         memset(pipe_config, 0, sizeof(*pipe_config));
13201         pipe_config->base.crtc = &crtc->base;
13202         pipe_config->base.state = state;
13203
13204         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13205
13206         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13207
13208         /* we keep both pipes enabled on 830 */
13209         if (IS_I830(dev_priv))
13210                 active = new_crtc_state->base.active;
13211
13212         I915_STATE_WARN(new_crtc_state->base.active != active,
13213              "crtc active state doesn't match with hw state "
13214              "(expected %i, found %i)\n", new_crtc_state->base.active, active);
13215
13216         I915_STATE_WARN(crtc->active != new_crtc_state->base.active,
13217              "transitional active state does not match atomic hw state "
13218              "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active);
13219
13220         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13221                 enum pipe pipe;
13222
13223                 active = encoder->get_hw_state(encoder, &pipe);
13224                 I915_STATE_WARN(active != new_crtc_state->base.active,
13225                         "[ENCODER:%i] active %i with crtc active %i\n",
13226                         encoder->base.base.id, active, new_crtc_state->base.active);
13227
13228                 I915_STATE_WARN(active && crtc->pipe != pipe,
13229                                 "Encoder connected to wrong pipe %c\n",
13230                                 pipe_name(pipe));
13231
13232                 if (active)
13233                         encoder->get_config(encoder, pipe_config);
13234         }
13235
13236         intel_crtc_compute_pixel_rate(pipe_config);
13237
13238         if (!new_crtc_state->base.active)
13239                 return;
13240
13241         intel_pipe_config_sanity_check(dev_priv, pipe_config);
13242
13243         if (!intel_pipe_config_compare(new_crtc_state,
13244                                        pipe_config, false)) {
13245                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13246                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13247                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13248         }
13249 }
13250
13251 static void
13252 intel_verify_planes(struct intel_atomic_state *state)
13253 {
13254         struct intel_plane *plane;
13255         const struct intel_plane_state *plane_state;
13256         int i;
13257
13258         for_each_new_intel_plane_in_state(state, plane,
13259                                           plane_state, i)
13260                 assert_plane(plane, plane_state->planar_slave ||
13261                              plane_state->base.visible);
13262 }
13263
13264 static void
13265 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13266                          struct intel_shared_dpll *pll,
13267                          struct intel_crtc *crtc,
13268                          struct intel_crtc_state *new_crtc_state)
13269 {
13270         struct intel_dpll_hw_state dpll_hw_state;
13271         unsigned int crtc_mask;
13272         bool active;
13273
13274         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13275
13276         DRM_DEBUG_KMS("%s\n", pll->info->name);
13277
13278         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13279
13280         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13281                 I915_STATE_WARN(!pll->on && pll->active_mask,
13282                      "pll in active use but not on in sw tracking\n");
13283                 I915_STATE_WARN(pll->on && !pll->active_mask,
13284                      "pll is on but not used by any active crtc\n");
13285                 I915_STATE_WARN(pll->on != active,
13286                      "pll on state mismatch (expected %i, found %i)\n",
13287                      pll->on, active);
13288         }
13289
13290         if (!crtc) {
13291                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13292                                 "more active pll users than references: %x vs %x\n",
13293                                 pll->active_mask, pll->state.crtc_mask);
13294
13295                 return;
13296         }
13297
13298         crtc_mask = drm_crtc_mask(&crtc->base);
13299
13300         if (new_crtc_state->base.active)
13301                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13302                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13303                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13304         else
13305                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13306                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13307                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13308
13309         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13310                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13311                         crtc_mask, pll->state.crtc_mask);
13312
13313         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13314                                           &dpll_hw_state,
13315                                           sizeof(dpll_hw_state)),
13316                         "pll hw state mismatch\n");
13317 }
13318
13319 static void
13320 verify_shared_dpll_state(struct intel_crtc *crtc,
13321                          struct intel_crtc_state *old_crtc_state,
13322                          struct intel_crtc_state *new_crtc_state)
13323 {
13324         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13325
13326         if (new_crtc_state->shared_dpll)
13327                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13328
13329         if (old_crtc_state->shared_dpll &&
13330             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13331                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13332                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13333
13334                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13335                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13336                                 pipe_name(drm_crtc_index(&crtc->base)));
13337                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13338                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13339                                 pipe_name(drm_crtc_index(&crtc->base)));
13340         }
13341 }
13342
13343 static void
13344 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13345                           struct intel_atomic_state *state,
13346                           struct intel_crtc_state *old_crtc_state,
13347                           struct intel_crtc_state *new_crtc_state)
13348 {
13349         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13350                 return;
13351
13352         verify_wm_state(crtc, new_crtc_state);
13353         verify_connector_state(state, crtc);
13354         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13355         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13356 }
13357
13358 static void
13359 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13360 {
13361         int i;
13362
13363         for (i = 0; i < dev_priv->num_shared_dpll; i++)
13364                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13365 }
13366
13367 static void
13368 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13369                               struct intel_atomic_state *state)
13370 {
13371         verify_encoder_state(dev_priv, state);
13372         verify_connector_state(state, NULL);
13373         verify_disabled_dpll_state(dev_priv);
13374 }
13375
13376 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
13377 {
13378         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13379         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13380
13381         /*
13382          * The scanline counter increments at the leading edge of hsync.
13383          *
13384          * On most platforms it starts counting from vtotal-1 on the
13385          * first active line. That means the scanline counter value is
13386          * always one less than what we would expect. Ie. just after
13387          * start of vblank, which also occurs at start of hsync (on the
13388          * last active line), the scanline counter will read vblank_start-1.
13389          *
13390          * On gen2 the scanline counter starts counting from 1 instead
13391          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13392          * to keep the value positive), instead of adding one.
13393          *
13394          * On HSW+ the behaviour of the scanline counter depends on the output
13395          * type. For DP ports it behaves like most other platforms, but on HDMI
13396          * there's an extra 1 line difference. So we need to add two instead of
13397          * one to the value.
13398          *
13399          * On VLV/CHV DSI the scanline counter would appear to increment
13400          * approx. 1/3 of a scanline before start of vblank. Unfortunately
13401          * that means we can't tell whether we're in vblank or not while
13402          * we're on that particular line. We must still set scanline_offset
13403          * to 1 so that the vblank timestamps come out correct when we query
13404          * the scanline counter from within the vblank interrupt handler.
13405          * However if queried just before the start of vblank we'll get an
13406          * answer that's slightly in the future.
13407          */
13408         if (IS_GEN(dev_priv, 2)) {
13409                 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
13410                 int vtotal;
13411
13412                 vtotal = adjusted_mode->crtc_vtotal;
13413                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13414                         vtotal /= 2;
13415
13416                 crtc->scanline_offset = vtotal - 1;
13417         } else if (HAS_DDI(dev_priv) &&
13418                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13419                 crtc->scanline_offset = 2;
13420         } else
13421                 crtc->scanline_offset = 1;
13422 }
13423
13424 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13425 {
13426         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13427         struct intel_crtc_state *new_crtc_state;
13428         struct intel_crtc *crtc;
13429         int i;
13430
13431         if (!dev_priv->display.crtc_compute_clock)
13432                 return;
13433
13434         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13435                 if (!needs_modeset(new_crtc_state))
13436                         continue;
13437
13438                 intel_release_shared_dplls(state, crtc);
13439         }
13440 }
13441
13442 /*
13443  * This implements the workaround described in the "notes" section of the mode
13444  * set sequence documentation. When going from no pipes or single pipe to
13445  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13446  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13447  */
13448 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13449 {
13450         struct intel_crtc_state *crtc_state;
13451         struct intel_crtc *crtc;
13452         struct intel_crtc_state *first_crtc_state = NULL;
13453         struct intel_crtc_state *other_crtc_state = NULL;
13454         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13455         int i;
13456
13457         /* look at all crtc's that are going to be enabled in during modeset */
13458         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13459                 if (!crtc_state->base.active ||
13460                     !needs_modeset(crtc_state))
13461                         continue;
13462
13463                 if (first_crtc_state) {
13464                         other_crtc_state = crtc_state;
13465                         break;
13466                 } else {
13467                         first_crtc_state = crtc_state;
13468                         first_pipe = crtc->pipe;
13469                 }
13470         }
13471
13472         /* No workaround needed? */
13473         if (!first_crtc_state)
13474                 return 0;
13475
13476         /* w/a possibly needed, check how many crtc's are already enabled. */
13477         for_each_intel_crtc(state->base.dev, crtc) {
13478                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13479                 if (IS_ERR(crtc_state))
13480                         return PTR_ERR(crtc_state);
13481
13482                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13483
13484                 if (!crtc_state->base.active ||
13485                     needs_modeset(crtc_state))
13486                         continue;
13487
13488                 /* 2 or more enabled crtcs means no need for w/a */
13489                 if (enabled_pipe != INVALID_PIPE)
13490                         return 0;
13491
13492                 enabled_pipe = crtc->pipe;
13493         }
13494
13495         if (enabled_pipe != INVALID_PIPE)
13496                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13497         else if (other_crtc_state)
13498                 other_crtc_state->hsw_workaround_pipe = first_pipe;
13499
13500         return 0;
13501 }
13502
13503 static int intel_modeset_checks(struct intel_atomic_state *state)
13504 {
13505         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13506         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13507         struct intel_crtc *crtc;
13508         int ret, i;
13509
13510         if (!check_digital_port_conflicts(state)) {
13511                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13512                 return -EINVAL;
13513         }
13514
13515         /* keep the current setting */
13516         if (!state->cdclk.force_min_cdclk_changed)
13517                 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13518
13519         state->modeset = true;
13520         state->active_pipes = dev_priv->active_pipes;
13521         state->cdclk.logical = dev_priv->cdclk.logical;
13522         state->cdclk.actual = dev_priv->cdclk.actual;
13523         state->cdclk.pipe = INVALID_PIPE;
13524
13525         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13526                                             new_crtc_state, i) {
13527                 if (new_crtc_state->base.active)
13528                         state->active_pipes |= BIT(crtc->pipe);
13529                 else
13530                         state->active_pipes &= ~BIT(crtc->pipe);
13531
13532                 if (old_crtc_state->base.active != new_crtc_state->base.active)
13533                         state->active_pipe_changes |= BIT(crtc->pipe);
13534         }
13535
13536         ret = intel_modeset_calc_cdclk(state);
13537         if (ret)
13538                 return ret;
13539
13540         intel_modeset_clear_plls(state);
13541
13542         if (IS_HASWELL(dev_priv))
13543                 return haswell_mode_set_planes_workaround(state);
13544
13545         return 0;
13546 }
13547
13548 /*
13549  * Handle calculation of various watermark data at the end of the atomic check
13550  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13551  * handlers to ensure that all derived state has been updated.
13552  */
13553 static int calc_watermark_data(struct intel_atomic_state *state)
13554 {
13555         struct drm_device *dev = state->base.dev;
13556         struct drm_i915_private *dev_priv = to_i915(dev);
13557
13558         /* Is there platform-specific watermark information to calculate? */
13559         if (dev_priv->display.compute_global_watermarks)
13560                 return dev_priv->display.compute_global_watermarks(state);
13561
13562         return 0;
13563 }
13564
13565 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
13566                                      struct intel_crtc_state *new_crtc_state)
13567 {
13568         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
13569                 return;
13570
13571         new_crtc_state->base.mode_changed = false;
13572         new_crtc_state->update_pipe = true;
13573
13574         /*
13575          * If we're not doing the full modeset we want to
13576          * keep the current M/N values as they may be
13577          * sufficiently different to the computed values
13578          * to cause problems.
13579          *
13580          * FIXME: should really copy more fuzzy state here
13581          */
13582         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
13583         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
13584         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
13585         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
13586 }
13587
13588 /**
13589  * intel_atomic_check - validate state object
13590  * @dev: drm device
13591  * @_state: state to validate
13592  */
13593 static int intel_atomic_check(struct drm_device *dev,
13594                               struct drm_atomic_state *_state)
13595 {
13596         struct drm_i915_private *dev_priv = to_i915(dev);
13597         struct intel_atomic_state *state = to_intel_atomic_state(_state);
13598         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13599         struct intel_crtc *crtc;
13600         int ret, i;
13601         bool any_ms = state->cdclk.force_min_cdclk_changed;
13602
13603         /* Catch I915_MODE_FLAG_INHERITED */
13604         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13605                                             new_crtc_state, i) {
13606                 if (new_crtc_state->base.mode.private_flags !=
13607                     old_crtc_state->base.mode.private_flags)
13608                         new_crtc_state->base.mode_changed = true;
13609         }
13610
13611         ret = drm_atomic_helper_check_modeset(dev, &state->base);
13612         if (ret)
13613                 goto fail;
13614
13615         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13616                                             new_crtc_state, i) {
13617                 if (!needs_modeset(new_crtc_state))
13618                         continue;
13619
13620                 if (!new_crtc_state->base.enable) {
13621                         any_ms = true;
13622                         continue;
13623                 }
13624
13625                 ret = intel_modeset_pipe_config(new_crtc_state);
13626                 if (ret)
13627                         goto fail;
13628
13629                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
13630
13631                 if (needs_modeset(new_crtc_state))
13632                         any_ms = true;
13633         }
13634
13635         ret = drm_dp_mst_atomic_check(&state->base);
13636         if (ret)
13637                 goto fail;
13638
13639         if (any_ms) {
13640                 ret = intel_modeset_checks(state);
13641                 if (ret)
13642                         goto fail;
13643         } else {
13644                 state->cdclk.logical = dev_priv->cdclk.logical;
13645         }
13646
13647         ret = icl_add_linked_planes(state);
13648         if (ret)
13649                 goto fail;
13650
13651         ret = drm_atomic_helper_check_planes(dev, &state->base);
13652         if (ret)
13653                 goto fail;
13654
13655         intel_fbc_choose_crtc(dev_priv, state);
13656         ret = calc_watermark_data(state);
13657         if (ret)
13658                 goto fail;
13659
13660         ret = intel_bw_atomic_check(state);
13661         if (ret)
13662                 goto fail;
13663
13664         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13665                                             new_crtc_state, i) {
13666                 if (!needs_modeset(new_crtc_state) &&
13667                     !new_crtc_state->update_pipe)
13668                         continue;
13669
13670                 intel_dump_pipe_config(new_crtc_state, state,
13671                                        needs_modeset(new_crtc_state) ?
13672                                        "[modeset]" : "[fastset]");
13673         }
13674
13675         return 0;
13676
13677  fail:
13678         if (ret == -EDEADLK)
13679                 return ret;
13680
13681         /*
13682          * FIXME would probably be nice to know which crtc specifically
13683          * caused the failure, in cases where we can pinpoint it.
13684          */
13685         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13686                                             new_crtc_state, i)
13687                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
13688
13689         return ret;
13690 }
13691
13692 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
13693 {
13694         return drm_atomic_helper_prepare_planes(state->base.dev,
13695                                                 &state->base);
13696 }
13697
13698 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13699 {
13700         struct drm_device *dev = crtc->base.dev;
13701         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13702
13703         if (!vblank->max_vblank_count)
13704                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13705
13706         return crtc->base.funcs->get_vblank_counter(&crtc->base);
13707 }
13708
13709 static void intel_update_crtc(struct intel_crtc *crtc,
13710                               struct intel_atomic_state *state,
13711                               struct intel_crtc_state *old_crtc_state,
13712                               struct intel_crtc_state *new_crtc_state)
13713 {
13714         struct drm_device *dev = state->base.dev;
13715         struct drm_i915_private *dev_priv = to_i915(dev);
13716         bool modeset = needs_modeset(new_crtc_state);
13717         struct intel_plane_state *new_plane_state =
13718                 intel_atomic_get_new_plane_state(state,
13719                                                  to_intel_plane(crtc->base.primary));
13720
13721         if (modeset) {
13722                 update_scanline_offset(new_crtc_state);
13723                 dev_priv->display.crtc_enable(new_crtc_state, state);
13724
13725                 /* vblanks work again, re-enable pipe CRC. */
13726                 intel_crtc_enable_pipe_crc(crtc);
13727         } else {
13728                 intel_pre_plane_update(old_crtc_state, new_crtc_state);
13729
13730                 if (new_crtc_state->update_pipe)
13731                         intel_encoders_update_pipe(crtc, new_crtc_state, state);
13732         }
13733
13734         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
13735                 intel_fbc_disable(crtc);
13736         else if (new_plane_state)
13737                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
13738
13739         intel_begin_crtc_commit(state, crtc);
13740
13741         if (INTEL_GEN(dev_priv) >= 9)
13742                 skl_update_planes_on_crtc(state, crtc);
13743         else
13744                 i9xx_update_planes_on_crtc(state, crtc);
13745
13746         intel_finish_crtc_commit(state, crtc);
13747 }
13748
13749 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
13750                                           struct intel_crtc_state *old_crtc_state,
13751                                           struct intel_crtc_state *new_crtc_state,
13752                                           struct intel_crtc *crtc)
13753 {
13754         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13755
13756         intel_crtc_disable_planes(state, crtc);
13757
13758         /*
13759          * We need to disable pipe CRC before disabling the pipe,
13760          * or we race against vblank off.
13761          */
13762         intel_crtc_disable_pipe_crc(crtc);
13763
13764         dev_priv->display.crtc_disable(old_crtc_state, state);
13765         crtc->active = false;
13766         intel_fbc_disable(crtc);
13767         intel_disable_shared_dpll(old_crtc_state);
13768
13769         /*
13770          * Underruns don't always raise interrupts,
13771          * so check manually.
13772          */
13773         intel_check_cpu_fifo_underruns(dev_priv);
13774         intel_check_pch_fifo_underruns(dev_priv);
13775
13776         /* FIXME unify this for all platforms */
13777         if (!new_crtc_state->base.active &&
13778             !HAS_GMCH(dev_priv) &&
13779             dev_priv->display.initial_watermarks)
13780                 dev_priv->display.initial_watermarks(state,
13781                                                      new_crtc_state);
13782 }
13783
13784 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
13785 {
13786         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
13787         struct intel_crtc *crtc;
13788         int i;
13789
13790         /*
13791          * Disable CRTC/pipes in reverse order because some features(MST in
13792          * TGL+) requires master and slave relationship between pipes, so it
13793          * should always pick the lowest pipe as master as it will be enabled
13794          * first and disable in the reverse order so the master will be the
13795          * last one to be disabled.
13796          */
13797         for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
13798                                                     new_crtc_state, i) {
13799                 if (!needs_modeset(new_crtc_state))
13800                         continue;
13801
13802                 intel_pre_plane_update(old_crtc_state, new_crtc_state);
13803
13804                 if (old_crtc_state->base.active)
13805                         intel_old_crtc_state_disables(state,
13806                                                       old_crtc_state,
13807                                                       new_crtc_state,
13808                                                       crtc);
13809         }
13810 }
13811
13812 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
13813 {
13814         struct intel_crtc *crtc;
13815         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13816         int i;
13817
13818         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13819                 if (!new_crtc_state->base.active)
13820                         continue;
13821
13822                 intel_update_crtc(crtc, state, old_crtc_state,
13823                                   new_crtc_state);
13824         }
13825 }
13826
13827 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
13828 {
13829         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13830         struct intel_crtc *crtc;
13831         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13832         unsigned int updated = 0;
13833         bool progress;
13834         enum pipe pipe;
13835         int i;
13836         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13837         u8 required_slices = state->wm_results.ddb.enabled_slices;
13838         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13839
13840         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13841                 /* ignore allocations for crtc's that have been turned off. */
13842                 if (new_crtc_state->base.active)
13843                         entries[i] = old_crtc_state->wm.skl.ddb;
13844
13845         /* If 2nd DBuf slice required, enable it here */
13846         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13847                 icl_dbuf_slices_update(dev_priv, required_slices);
13848
13849         /*
13850          * Whenever the number of active pipes changes, we need to make sure we
13851          * update the pipes in the right order so that their ddb allocations
13852          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13853          * cause pipe underruns and other bad stuff.
13854          */
13855         do {
13856                 progress = false;
13857
13858                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13859                         bool vbl_wait = false;
13860                         unsigned int cmask = drm_crtc_mask(&crtc->base);
13861
13862                         pipe = crtc->pipe;
13863
13864                         if (updated & cmask || !new_crtc_state->base.active)
13865                                 continue;
13866
13867                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
13868                                                         entries,
13869                                                         INTEL_NUM_PIPES(dev_priv), i))
13870                                 continue;
13871
13872                         updated |= cmask;
13873                         entries[i] = new_crtc_state->wm.skl.ddb;
13874
13875                         /*
13876                          * If this is an already active pipe, it's DDB changed,
13877                          * and this isn't the last pipe that needs updating
13878                          * then we need to wait for a vblank to pass for the
13879                          * new ddb allocation to take effect.
13880                          */
13881                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
13882                                                  &old_crtc_state->wm.skl.ddb) &&
13883                             !new_crtc_state->base.active_changed &&
13884                             state->wm_results.dirty_pipes != updated)
13885                                 vbl_wait = true;
13886
13887                         intel_update_crtc(crtc, state, old_crtc_state,
13888                                           new_crtc_state);
13889
13890                         if (vbl_wait)
13891                                 intel_wait_for_vblank(dev_priv, pipe);
13892
13893                         progress = true;
13894                 }
13895         } while (progress);
13896
13897         /* If 2nd DBuf slice is no more required disable it */
13898         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13899                 icl_dbuf_slices_update(dev_priv, required_slices);
13900 }
13901
13902 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13903 {
13904         struct intel_atomic_state *state, *next;
13905         struct llist_node *freed;
13906
13907         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13908         llist_for_each_entry_safe(state, next, freed, freed)
13909                 drm_atomic_state_put(&state->base);
13910 }
13911
13912 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13913 {
13914         struct drm_i915_private *dev_priv =
13915                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13916
13917         intel_atomic_helper_free_state(dev_priv);
13918 }
13919
13920 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13921 {
13922         struct wait_queue_entry wait_fence, wait_reset;
13923         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13924
13925         init_wait_entry(&wait_fence, 0);
13926         init_wait_entry(&wait_reset, 0);
13927         for (;;) {
13928                 prepare_to_wait(&intel_state->commit_ready.wait,
13929                                 &wait_fence, TASK_UNINTERRUPTIBLE);
13930                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
13931                                               I915_RESET_MODESET),
13932                                 &wait_reset, TASK_UNINTERRUPTIBLE);
13933
13934
13935                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
13936                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
13937                         break;
13938
13939                 schedule();
13940         }
13941         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13942         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
13943                                   I915_RESET_MODESET),
13944                     &wait_reset);
13945 }
13946
13947 static void intel_atomic_cleanup_work(struct work_struct *work)
13948 {
13949         struct drm_atomic_state *state =
13950                 container_of(work, struct drm_atomic_state, commit_work);
13951         struct drm_i915_private *i915 = to_i915(state->dev);
13952
13953         drm_atomic_helper_cleanup_planes(&i915->drm, state);
13954         drm_atomic_helper_commit_cleanup_done(state);
13955         drm_atomic_state_put(state);
13956
13957         intel_atomic_helper_free_state(i915);
13958 }
13959
13960 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
13961 {
13962         struct drm_device *dev = state->base.dev;
13963         struct drm_i915_private *dev_priv = to_i915(dev);
13964         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
13965         struct intel_crtc *crtc;
13966         u64 put_domains[I915_MAX_PIPES] = {};
13967         intel_wakeref_t wakeref = 0;
13968         int i;
13969
13970         intel_atomic_commit_fence_wait(state);
13971
13972         drm_atomic_helper_wait_for_dependencies(&state->base);
13973
13974         if (state->modeset)
13975                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13976
13977         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13978                                             new_crtc_state, i) {
13979                 if (needs_modeset(new_crtc_state) ||
13980                     new_crtc_state->update_pipe) {
13981
13982                         put_domains[crtc->pipe] =
13983                                 modeset_get_crtc_power_domains(new_crtc_state);
13984                 }
13985         }
13986
13987         intel_commit_modeset_disables(state);
13988
13989         /* FIXME: Eventually get rid of our crtc->config pointer */
13990         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
13991                 crtc->config = new_crtc_state;
13992
13993         if (state->modeset) {
13994                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
13995
13996                 intel_set_cdclk_pre_plane_update(dev_priv,
13997                                                  &state->cdclk.actual,
13998                                                  &dev_priv->cdclk.actual,
13999                                                  state->cdclk.pipe);
14000
14001                 /*
14002                  * SKL workaround: bspec recommends we disable the SAGV when we
14003                  * have more then one pipe enabled
14004                  */
14005                 if (!intel_can_enable_sagv(state))
14006                         intel_disable_sagv(dev_priv);
14007
14008                 intel_modeset_verify_disabled(dev_priv, state);
14009         }
14010
14011         /* Complete the events for pipes that have now been disabled */
14012         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14013                 bool modeset = needs_modeset(new_crtc_state);
14014
14015                 /* Complete events for now disable pipes here. */
14016                 if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) {
14017                         spin_lock_irq(&dev->event_lock);
14018                         drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event);
14019                         spin_unlock_irq(&dev->event_lock);
14020
14021                         new_crtc_state->base.event = NULL;
14022                 }
14023         }
14024
14025         if (state->modeset)
14026                 intel_encoders_update_prepare(state);
14027
14028         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
14029         dev_priv->display.commit_modeset_enables(state);
14030
14031         if (state->modeset) {
14032                 intel_encoders_update_complete(state);
14033
14034                 intel_set_cdclk_post_plane_update(dev_priv,
14035                                                   &state->cdclk.actual,
14036                                                   &dev_priv->cdclk.actual,
14037                                                   state->cdclk.pipe);
14038         }
14039
14040         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14041          * already, but still need the state for the delayed optimization. To
14042          * fix this:
14043          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14044          * - schedule that vblank worker _before_ calling hw_done
14045          * - at the start of commit_tail, cancel it _synchrously
14046          * - switch over to the vblank wait helper in the core after that since
14047          *   we don't need out special handling any more.
14048          */
14049         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14050
14051         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14052                 if (new_crtc_state->base.active &&
14053                     !needs_modeset(new_crtc_state) &&
14054                     (new_crtc_state->base.color_mgmt_changed ||
14055                      new_crtc_state->update_pipe))
14056                         intel_color_load_luts(new_crtc_state);
14057         }
14058
14059         /*
14060          * Now that the vblank has passed, we can go ahead and program the
14061          * optimal watermarks on platforms that need two-step watermark
14062          * programming.
14063          *
14064          * TODO: Move this (and other cleanup) to an async worker eventually.
14065          */
14066         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14067                 if (dev_priv->display.optimize_watermarks)
14068                         dev_priv->display.optimize_watermarks(state,
14069                                                               new_crtc_state);
14070         }
14071
14072         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14073                 intel_post_plane_update(old_crtc_state);
14074
14075                 if (put_domains[i])
14076                         modeset_put_power_domains(dev_priv, put_domains[i]);
14077
14078                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14079         }
14080
14081         if (state->modeset)
14082                 intel_verify_planes(state);
14083
14084         if (state->modeset && intel_can_enable_sagv(state))
14085                 intel_enable_sagv(dev_priv);
14086
14087         drm_atomic_helper_commit_hw_done(&state->base);
14088
14089         if (state->modeset) {
14090                 /* As one of the primary mmio accessors, KMS has a high
14091                  * likelihood of triggering bugs in unclaimed access. After we
14092                  * finish modesetting, see if an error has been flagged, and if
14093                  * so enable debugging for the next modeset - and hope we catch
14094                  * the culprit.
14095                  */
14096                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
14097                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
14098         }
14099         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14100
14101         /*
14102          * Defer the cleanup of the old state to a separate worker to not
14103          * impede the current task (userspace for blocking modesets) that
14104          * are executed inline. For out-of-line asynchronous modesets/flips,
14105          * deferring to a new worker seems overkill, but we would place a
14106          * schedule point (cond_resched()) here anyway to keep latencies
14107          * down.
14108          */
14109         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
14110         queue_work(system_highpri_wq, &state->base.commit_work);
14111 }
14112
14113 static void intel_atomic_commit_work(struct work_struct *work)
14114 {
14115         struct intel_atomic_state *state =
14116                 container_of(work, struct intel_atomic_state, base.commit_work);
14117
14118         intel_atomic_commit_tail(state);
14119 }
14120
14121 static int __i915_sw_fence_call
14122 intel_atomic_commit_ready(struct i915_sw_fence *fence,
14123                           enum i915_sw_fence_notify notify)
14124 {
14125         struct intel_atomic_state *state =
14126                 container_of(fence, struct intel_atomic_state, commit_ready);
14127
14128         switch (notify) {
14129         case FENCE_COMPLETE:
14130                 /* we do blocking waits in the worker, nothing to do here */
14131                 break;
14132         case FENCE_FREE:
14133                 {
14134                         struct intel_atomic_helper *helper =
14135                                 &to_i915(state->base.dev)->atomic_helper;
14136
14137                         if (llist_add(&state->freed, &helper->free_list))
14138                                 schedule_work(&helper->free_work);
14139                         break;
14140                 }
14141         }
14142
14143         return NOTIFY_DONE;
14144 }
14145
14146 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
14147 {
14148         struct intel_plane_state *old_plane_state, *new_plane_state;
14149         struct intel_plane *plane;
14150         int i;
14151
14152         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14153                                              new_plane_state, i)
14154                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
14155                                         to_intel_frontbuffer(new_plane_state->base.fb),
14156                                         plane->frontbuffer_bit);
14157 }
14158
14159 static int intel_atomic_commit(struct drm_device *dev,
14160                                struct drm_atomic_state *_state,
14161                                bool nonblock)
14162 {
14163         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14164         struct drm_i915_private *dev_priv = to_i915(dev);
14165         int ret = 0;
14166
14167         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
14168
14169         drm_atomic_state_get(&state->base);
14170         i915_sw_fence_init(&state->commit_ready,
14171                            intel_atomic_commit_ready);
14172
14173         /*
14174          * The intel_legacy_cursor_update() fast path takes care
14175          * of avoiding the vblank waits for simple cursor
14176          * movement and flips. For cursor on/off and size changes,
14177          * we want to perform the vblank waits so that watermark
14178          * updates happen during the correct frames. Gen9+ have
14179          * double buffered watermarks and so shouldn't need this.
14180          *
14181          * Unset state->legacy_cursor_update before the call to
14182          * drm_atomic_helper_setup_commit() because otherwise
14183          * drm_atomic_helper_wait_for_flip_done() is a noop and
14184          * we get FIFO underruns because we didn't wait
14185          * for vblank.
14186          *
14187          * FIXME doing watermarks and fb cleanup from a vblank worker
14188          * (assuming we had any) would solve these problems.
14189          */
14190         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
14191                 struct intel_crtc_state *new_crtc_state;
14192                 struct intel_crtc *crtc;
14193                 int i;
14194
14195                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14196                         if (new_crtc_state->wm.need_postvbl_update ||
14197                             new_crtc_state->update_wm_post)
14198                                 state->base.legacy_cursor_update = false;
14199         }
14200
14201         ret = intel_atomic_prepare_commit(state);
14202         if (ret) {
14203                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
14204                 i915_sw_fence_commit(&state->commit_ready);
14205                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14206                 return ret;
14207         }
14208
14209         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
14210         if (!ret)
14211                 ret = drm_atomic_helper_swap_state(&state->base, true);
14212
14213         if (ret) {
14214                 i915_sw_fence_commit(&state->commit_ready);
14215
14216                 drm_atomic_helper_cleanup_planes(dev, &state->base);
14217                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14218                 return ret;
14219         }
14220         dev_priv->wm.distrust_bios_wm = false;
14221         intel_shared_dpll_swap_state(state);
14222         intel_atomic_track_fbs(state);
14223
14224         if (state->modeset) {
14225                 memcpy(dev_priv->min_cdclk, state->min_cdclk,
14226                        sizeof(state->min_cdclk));
14227                 memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
14228                        sizeof(state->min_voltage_level));
14229                 dev_priv->active_pipes = state->active_pipes;
14230                 dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
14231
14232                 intel_cdclk_swap_state(state);
14233         }
14234
14235         drm_atomic_state_get(&state->base);
14236         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
14237
14238         i915_sw_fence_commit(&state->commit_ready);
14239         if (nonblock && state->modeset) {
14240                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
14241         } else if (nonblock) {
14242                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
14243         } else {
14244                 if (state->modeset)
14245                         flush_workqueue(dev_priv->modeset_wq);
14246                 intel_atomic_commit_tail(state);
14247         }
14248
14249         return 0;
14250 }
14251
14252 struct wait_rps_boost {
14253         struct wait_queue_entry wait;
14254
14255         struct drm_crtc *crtc;
14256         struct i915_request *request;
14257 };
14258
14259 static int do_rps_boost(struct wait_queue_entry *_wait,
14260                         unsigned mode, int sync, void *key)
14261 {
14262         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
14263         struct i915_request *rq = wait->request;
14264
14265         /*
14266          * If we missed the vblank, but the request is already running it
14267          * is reasonable to assume that it will complete before the next
14268          * vblank without our intervention, so leave RPS alone.
14269          */
14270         if (!i915_request_started(rq))
14271                 gen6_rps_boost(rq);
14272         i915_request_put(rq);
14273
14274         drm_crtc_vblank_put(wait->crtc);
14275
14276         list_del(&wait->wait.entry);
14277         kfree(wait);
14278         return 1;
14279 }
14280
14281 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
14282                                        struct dma_fence *fence)
14283 {
14284         struct wait_rps_boost *wait;
14285
14286         if (!dma_fence_is_i915(fence))
14287                 return;
14288
14289         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
14290                 return;
14291
14292         if (drm_crtc_vblank_get(crtc))
14293                 return;
14294
14295         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
14296         if (!wait) {
14297                 drm_crtc_vblank_put(crtc);
14298                 return;
14299         }
14300
14301         wait->request = to_request(dma_fence_get(fence));
14302         wait->crtc = crtc;
14303
14304         wait->wait.func = do_rps_boost;
14305         wait->wait.flags = 0;
14306
14307         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
14308 }
14309
14310 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
14311 {
14312         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
14313         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14314         struct drm_framebuffer *fb = plane_state->base.fb;
14315         struct i915_vma *vma;
14316
14317         if (plane->id == PLANE_CURSOR &&
14318             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
14319                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14320                 const int align = intel_cursor_alignment(dev_priv);
14321                 int err;
14322
14323                 err = i915_gem_object_attach_phys(obj, align);
14324                 if (err)
14325                         return err;
14326         }
14327
14328         vma = intel_pin_and_fence_fb_obj(fb,
14329                                          &plane_state->view,
14330                                          intel_plane_uses_fence(plane_state),
14331                                          &plane_state->flags);
14332         if (IS_ERR(vma))
14333                 return PTR_ERR(vma);
14334
14335         plane_state->vma = vma;
14336
14337         return 0;
14338 }
14339
14340 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
14341 {
14342         struct i915_vma *vma;
14343
14344         vma = fetch_and_zero(&old_plane_state->vma);
14345         if (vma)
14346                 intel_unpin_fb_vma(vma, old_plane_state->flags);
14347 }
14348
14349 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
14350 {
14351         struct i915_sched_attr attr = {
14352                 .priority = I915_PRIORITY_DISPLAY,
14353         };
14354
14355         i915_gem_object_wait_priority(obj, 0, &attr);
14356 }
14357
14358 /**
14359  * intel_prepare_plane_fb - Prepare fb for usage on plane
14360  * @plane: drm plane to prepare for
14361  * @new_state: the plane state being prepared
14362  *
14363  * Prepares a framebuffer for usage on a display plane.  Generally this
14364  * involves pinning the underlying object and updating the frontbuffer tracking
14365  * bits.  Some older platforms need special physical address handling for
14366  * cursor planes.
14367  *
14368  * Must be called with struct_mutex held.
14369  *
14370  * Returns 0 on success, negative error code on failure.
14371  */
14372 int
14373 intel_prepare_plane_fb(struct drm_plane *plane,
14374                        struct drm_plane_state *new_state)
14375 {
14376         struct intel_atomic_state *intel_state =
14377                 to_intel_atomic_state(new_state->state);
14378         struct drm_i915_private *dev_priv = to_i915(plane->dev);
14379         struct drm_framebuffer *fb = new_state->fb;
14380         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14381         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14382         int ret;
14383
14384         if (old_obj) {
14385                 struct intel_crtc_state *crtc_state =
14386                         intel_atomic_get_new_crtc_state(intel_state,
14387                                                         to_intel_crtc(plane->state->crtc));
14388
14389                 /* Big Hammer, we also need to ensure that any pending
14390                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14391                  * current scanout is retired before unpinning the old
14392                  * framebuffer. Note that we rely on userspace rendering
14393                  * into the buffer attached to the pipe they are waiting
14394                  * on. If not, userspace generates a GPU hang with IPEHR
14395                  * point to the MI_WAIT_FOR_EVENT.
14396                  *
14397                  * This should only fail upon a hung GPU, in which case we
14398                  * can safely continue.
14399                  */
14400                 if (needs_modeset(crtc_state)) {
14401                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14402                                                               old_obj->base.resv, NULL,
14403                                                               false, 0,
14404                                                               GFP_KERNEL);
14405                         if (ret < 0)
14406                                 return ret;
14407                 }
14408         }
14409
14410         if (new_state->fence) { /* explicit fencing */
14411                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
14412                                                     new_state->fence,
14413                                                     I915_FENCE_TIMEOUT,
14414                                                     GFP_KERNEL);
14415                 if (ret < 0)
14416                         return ret;
14417         }
14418
14419         if (!obj)
14420                 return 0;
14421
14422         ret = i915_gem_object_pin_pages(obj);
14423         if (ret)
14424                 return ret;
14425
14426         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14427         if (ret) {
14428                 i915_gem_object_unpin_pages(obj);
14429                 return ret;
14430         }
14431
14432         ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
14433
14434         mutex_unlock(&dev_priv->drm.struct_mutex);
14435         i915_gem_object_unpin_pages(obj);
14436         if (ret)
14437                 return ret;
14438
14439         fb_obj_bump_render_priority(obj);
14440         intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
14441
14442         if (!new_state->fence) { /* implicit fencing */
14443                 struct dma_fence *fence;
14444
14445                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14446                                                       obj->base.resv, NULL,
14447                                                       false, I915_FENCE_TIMEOUT,
14448                                                       GFP_KERNEL);
14449                 if (ret < 0)
14450                         return ret;
14451
14452                 fence = dma_resv_get_excl_rcu(obj->base.resv);
14453                 if (fence) {
14454                         add_rps_boost_after_vblank(new_state->crtc, fence);
14455                         dma_fence_put(fence);
14456                 }
14457         } else {
14458                 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
14459         }
14460
14461         /*
14462          * We declare pageflips to be interactive and so merit a small bias
14463          * towards upclocking to deliver the frame on time. By only changing
14464          * the RPS thresholds to sample more regularly and aim for higher
14465          * clocks we can hopefully deliver low power workloads (like kodi)
14466          * that are not quite steady state without resorting to forcing
14467          * maximum clocks following a vblank miss (see do_rps_boost()).
14468          */
14469         if (!intel_state->rps_interactive) {
14470                 intel_rps_mark_interactive(dev_priv, true);
14471                 intel_state->rps_interactive = true;
14472         }
14473
14474         return 0;
14475 }
14476
14477 /**
14478  * intel_cleanup_plane_fb - Cleans up an fb after plane use
14479  * @plane: drm plane to clean up for
14480  * @old_state: the state from the previous modeset
14481  *
14482  * Cleans up a framebuffer that has just been removed from a plane.
14483  *
14484  * Must be called with struct_mutex held.
14485  */
14486 void
14487 intel_cleanup_plane_fb(struct drm_plane *plane,
14488                        struct drm_plane_state *old_state)
14489 {
14490         struct intel_atomic_state *intel_state =
14491                 to_intel_atomic_state(old_state->state);
14492         struct drm_i915_private *dev_priv = to_i915(plane->dev);
14493
14494         if (intel_state->rps_interactive) {
14495                 intel_rps_mark_interactive(dev_priv, false);
14496                 intel_state->rps_interactive = false;
14497         }
14498
14499         /* Should only be called after a successful intel_prepare_plane_fb()! */
14500         mutex_lock(&dev_priv->drm.struct_mutex);
14501         intel_plane_unpin_fb(to_intel_plane_state(old_state));
14502         mutex_unlock(&dev_priv->drm.struct_mutex);
14503 }
14504
14505 int
14506 skl_max_scale(const struct intel_crtc_state *crtc_state,
14507               const struct drm_format_info *format)
14508 {
14509         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14510         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14511         int max_scale;
14512         int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
14513
14514         if (!crtc_state->base.enable)
14515                 return DRM_PLANE_HELPER_NO_SCALING;
14516
14517         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14518         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14519
14520         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
14521                 max_dotclk *= 2;
14522
14523         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
14524                 return DRM_PLANE_HELPER_NO_SCALING;
14525
14526         /*
14527          * skl max scale is lower of:
14528          *    close to 3 but not 3, -1 is for that purpose
14529          *            or
14530          *    cdclk/crtc_clock
14531          */
14532         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
14533             !drm_format_info_is_yuv_semiplanar(format))
14534                 tmpclk1 = 0x30000 - 1;
14535         else
14536                 tmpclk1 = 0x20000 - 1;
14537         tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14538         max_scale = min(tmpclk1, tmpclk2);
14539
14540         return max_scale;
14541 }
14542
14543 static void intel_begin_crtc_commit(struct intel_atomic_state *state,
14544                                     struct intel_crtc *crtc)
14545 {
14546         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14547         struct intel_crtc_state *old_crtc_state =
14548                 intel_atomic_get_old_crtc_state(state, crtc);
14549         struct intel_crtc_state *new_crtc_state =
14550                 intel_atomic_get_new_crtc_state(state, crtc);
14551         bool modeset = needs_modeset(new_crtc_state);
14552
14553         /* Perform vblank evasion around commit operation */
14554         intel_pipe_update_start(new_crtc_state);
14555
14556         if (modeset)
14557                 goto out;
14558
14559         if (new_crtc_state->base.color_mgmt_changed ||
14560             new_crtc_state->update_pipe)
14561                 intel_color_commit(new_crtc_state);
14562
14563         if (new_crtc_state->update_pipe)
14564                 intel_update_pipe_config(old_crtc_state, new_crtc_state);
14565         else if (INTEL_GEN(dev_priv) >= 9)
14566                 skl_detach_scalers(new_crtc_state);
14567
14568         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14569                 bdw_set_pipemisc(new_crtc_state);
14570
14571 out:
14572         if (dev_priv->display.atomic_update_watermarks)
14573                 dev_priv->display.atomic_update_watermarks(state,
14574                                                            new_crtc_state);
14575 }
14576
14577 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14578                                   struct intel_crtc_state *crtc_state)
14579 {
14580         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14581
14582         if (!IS_GEN(dev_priv, 2))
14583                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14584
14585         if (crtc_state->has_pch_encoder) {
14586                 enum pipe pch_transcoder =
14587                         intel_crtc_pch_transcoder(crtc);
14588
14589                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14590         }
14591 }
14592
14593 static void intel_finish_crtc_commit(struct intel_atomic_state *state,
14594                                      struct intel_crtc *crtc)
14595 {
14596         struct intel_crtc_state *old_crtc_state =
14597                 intel_atomic_get_old_crtc_state(state, crtc);
14598         struct intel_crtc_state *new_crtc_state =
14599                 intel_atomic_get_new_crtc_state(state, crtc);
14600
14601         intel_pipe_update_end(new_crtc_state);
14602
14603         if (new_crtc_state->update_pipe &&
14604             !needs_modeset(new_crtc_state) &&
14605             old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14606                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14607 }
14608
14609 /**
14610  * intel_plane_destroy - destroy a plane
14611  * @plane: plane to destroy
14612  *
14613  * Common destruction function for all types of planes (primary, cursor,
14614  * sprite).
14615  */
14616 void intel_plane_destroy(struct drm_plane *plane)
14617 {
14618         drm_plane_cleanup(plane);
14619         kfree(to_intel_plane(plane));
14620 }
14621
14622 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14623                                             u32 format, u64 modifier)
14624 {
14625         switch (modifier) {
14626         case DRM_FORMAT_MOD_LINEAR:
14627         case I915_FORMAT_MOD_X_TILED:
14628                 break;
14629         default:
14630                 return false;
14631         }
14632
14633         switch (format) {
14634         case DRM_FORMAT_C8:
14635         case DRM_FORMAT_RGB565:
14636         case DRM_FORMAT_XRGB1555:
14637         case DRM_FORMAT_XRGB8888:
14638                 return modifier == DRM_FORMAT_MOD_LINEAR ||
14639                         modifier == I915_FORMAT_MOD_X_TILED;
14640         default:
14641                 return false;
14642         }
14643 }
14644
14645 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14646                                             u32 format, u64 modifier)
14647 {
14648         switch (modifier) {
14649         case DRM_FORMAT_MOD_LINEAR:
14650         case I915_FORMAT_MOD_X_TILED:
14651                 break;
14652         default:
14653                 return false;
14654         }
14655
14656         switch (format) {
14657         case DRM_FORMAT_C8:
14658         case DRM_FORMAT_RGB565:
14659         case DRM_FORMAT_XRGB8888:
14660         case DRM_FORMAT_XBGR8888:
14661         case DRM_FORMAT_XRGB2101010:
14662         case DRM_FORMAT_XBGR2101010:
14663                 return modifier == DRM_FORMAT_MOD_LINEAR ||
14664                         modifier == I915_FORMAT_MOD_X_TILED;
14665         default:
14666                 return false;
14667         }
14668 }
14669
14670 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14671                                               u32 format, u64 modifier)
14672 {
14673         return modifier == DRM_FORMAT_MOD_LINEAR &&
14674                 format == DRM_FORMAT_ARGB8888;
14675 }
14676
14677 static const struct drm_plane_funcs i965_plane_funcs = {
14678         .update_plane = drm_atomic_helper_update_plane,
14679         .disable_plane = drm_atomic_helper_disable_plane,
14680         .destroy = intel_plane_destroy,
14681         .atomic_duplicate_state = intel_plane_duplicate_state,
14682         .atomic_destroy_state = intel_plane_destroy_state,
14683         .format_mod_supported = i965_plane_format_mod_supported,
14684 };
14685
14686 static const struct drm_plane_funcs i8xx_plane_funcs = {
14687         .update_plane = drm_atomic_helper_update_plane,
14688         .disable_plane = drm_atomic_helper_disable_plane,
14689         .destroy = intel_plane_destroy,
14690         .atomic_duplicate_state = intel_plane_duplicate_state,
14691         .atomic_destroy_state = intel_plane_destroy_state,
14692         .format_mod_supported = i8xx_plane_format_mod_supported,
14693 };
14694
14695 static int
14696 intel_legacy_cursor_update(struct drm_plane *plane,
14697                            struct drm_crtc *crtc,
14698                            struct drm_framebuffer *fb,
14699                            int crtc_x, int crtc_y,
14700                            unsigned int crtc_w, unsigned int crtc_h,
14701                            u32 src_x, u32 src_y,
14702                            u32 src_w, u32 src_h,
14703                            struct drm_modeset_acquire_ctx *ctx)
14704 {
14705         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14706         struct drm_plane_state *old_plane_state, *new_plane_state;
14707         struct intel_plane *intel_plane = to_intel_plane(plane);
14708         struct intel_crtc_state *crtc_state =
14709                 to_intel_crtc_state(crtc->state);
14710         struct intel_crtc_state *new_crtc_state;
14711         int ret;
14712
14713         /*
14714          * When crtc is inactive or there is a modeset pending,
14715          * wait for it to complete in the slowpath
14716          */
14717         if (!crtc_state->base.active || needs_modeset(crtc_state) ||
14718             crtc_state->update_pipe)
14719                 goto slow;
14720
14721         old_plane_state = plane->state;
14722         /*
14723          * Don't do an async update if there is an outstanding commit modifying
14724          * the plane.  This prevents our async update's changes from getting
14725          * overridden by a previous synchronous update's state.
14726          */
14727         if (old_plane_state->commit &&
14728             !try_wait_for_completion(&old_plane_state->commit->hw_done))
14729                 goto slow;
14730
14731         /*
14732          * If any parameters change that may affect watermarks,
14733          * take the slowpath. Only changing fb or position should be
14734          * in the fastpath.
14735          */
14736         if (old_plane_state->crtc != crtc ||
14737             old_plane_state->src_w != src_w ||
14738             old_plane_state->src_h != src_h ||
14739             old_plane_state->crtc_w != crtc_w ||
14740             old_plane_state->crtc_h != crtc_h ||
14741             !old_plane_state->fb != !fb)
14742                 goto slow;
14743
14744         new_plane_state = intel_plane_duplicate_state(plane);
14745         if (!new_plane_state)
14746                 return -ENOMEM;
14747
14748         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14749         if (!new_crtc_state) {
14750                 ret = -ENOMEM;
14751                 goto out_free;
14752         }
14753
14754         drm_atomic_set_fb_for_plane(new_plane_state, fb);
14755
14756         new_plane_state->src_x = src_x;
14757         new_plane_state->src_y = src_y;
14758         new_plane_state->src_w = src_w;
14759         new_plane_state->src_h = src_h;
14760         new_plane_state->crtc_x = crtc_x;
14761         new_plane_state->crtc_y = crtc_y;
14762         new_plane_state->crtc_w = crtc_w;
14763         new_plane_state->crtc_h = crtc_h;
14764
14765         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14766                                                   to_intel_plane_state(old_plane_state),
14767                                                   to_intel_plane_state(new_plane_state));
14768         if (ret)
14769                 goto out_free;
14770
14771         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14772         if (ret)
14773                 goto out_free;
14774
14775         ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14776         if (ret)
14777                 goto out_unlock;
14778
14779         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP);
14780         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb),
14781                                 to_intel_frontbuffer(fb),
14782                                 intel_plane->frontbuffer_bit);
14783
14784         /* Swap plane state */
14785         plane->state = new_plane_state;
14786
14787         /*
14788          * We cannot swap crtc_state as it may be in use by an atomic commit or
14789          * page flip that's running simultaneously. If we swap crtc_state and
14790          * destroy the old state, we will cause a use-after-free there.
14791          *
14792          * Only update active_planes, which is needed for our internal
14793          * bookkeeping. Either value will do the right thing when updating
14794          * planes atomically. If the cursor was part of the atomic update then
14795          * we would have taken the slowpath.
14796          */
14797         crtc_state->active_planes = new_crtc_state->active_planes;
14798
14799         if (plane->state->visible)
14800                 intel_update_plane(intel_plane, crtc_state,
14801                                    to_intel_plane_state(plane->state));
14802         else
14803                 intel_disable_plane(intel_plane, crtc_state);
14804
14805         intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14806
14807 out_unlock:
14808         mutex_unlock(&dev_priv->drm.struct_mutex);
14809 out_free:
14810         if (new_crtc_state)
14811                 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14812         if (ret)
14813                 intel_plane_destroy_state(plane, new_plane_state);
14814         else
14815                 intel_plane_destroy_state(plane, old_plane_state);
14816         return ret;
14817
14818 slow:
14819         return drm_atomic_helper_update_plane(plane, crtc, fb,
14820                                               crtc_x, crtc_y, crtc_w, crtc_h,
14821                                               src_x, src_y, src_w, src_h, ctx);
14822 }
14823
14824 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14825         .update_plane = intel_legacy_cursor_update,
14826         .disable_plane = drm_atomic_helper_disable_plane,
14827         .destroy = intel_plane_destroy,
14828         .atomic_duplicate_state = intel_plane_duplicate_state,
14829         .atomic_destroy_state = intel_plane_destroy_state,
14830         .format_mod_supported = intel_cursor_format_mod_supported,
14831 };
14832
14833 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14834                                enum i9xx_plane_id i9xx_plane)
14835 {
14836         if (!HAS_FBC(dev_priv))
14837                 return false;
14838
14839         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14840                 return i9xx_plane == PLANE_A; /* tied to pipe A */
14841         else if (IS_IVYBRIDGE(dev_priv))
14842                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14843                         i9xx_plane == PLANE_C;
14844         else if (INTEL_GEN(dev_priv) >= 4)
14845                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14846         else
14847                 return i9xx_plane == PLANE_A;
14848 }
14849
14850 static struct intel_plane *
14851 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14852 {
14853         struct intel_plane *plane;
14854         const struct drm_plane_funcs *plane_funcs;
14855         unsigned int supported_rotations;
14856         unsigned int possible_crtcs;
14857         const u64 *modifiers;
14858         const u32 *formats;
14859         int num_formats;
14860         int ret, zpos;
14861
14862         if (INTEL_GEN(dev_priv) >= 9)
14863                 return skl_universal_plane_create(dev_priv, pipe,
14864                                                   PLANE_PRIMARY);
14865
14866         plane = intel_plane_alloc();
14867         if (IS_ERR(plane))
14868                 return plane;
14869
14870         plane->pipe = pipe;
14871         /*
14872          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14873          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14874          */
14875         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14876                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14877         else
14878                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14879         plane->id = PLANE_PRIMARY;
14880         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14881
14882         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14883         if (plane->has_fbc) {
14884                 struct intel_fbc *fbc = &dev_priv->fbc;
14885
14886                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14887         }
14888
14889         if (INTEL_GEN(dev_priv) >= 4) {
14890                 formats = i965_primary_formats;
14891                 num_formats = ARRAY_SIZE(i965_primary_formats);
14892                 modifiers = i9xx_format_modifiers;
14893
14894                 plane->max_stride = i9xx_plane_max_stride;
14895                 plane->update_plane = i9xx_update_plane;
14896                 plane->disable_plane = i9xx_disable_plane;
14897                 plane->get_hw_state = i9xx_plane_get_hw_state;
14898                 plane->check_plane = i9xx_plane_check;
14899
14900                 plane_funcs = &i965_plane_funcs;
14901         } else {
14902                 formats = i8xx_primary_formats;
14903                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14904                 modifiers = i9xx_format_modifiers;
14905
14906                 plane->max_stride = i9xx_plane_max_stride;
14907                 plane->update_plane = i9xx_update_plane;
14908                 plane->disable_plane = i9xx_disable_plane;
14909                 plane->get_hw_state = i9xx_plane_get_hw_state;
14910                 plane->check_plane = i9xx_plane_check;
14911
14912                 plane_funcs = &i8xx_plane_funcs;
14913         }
14914
14915         possible_crtcs = BIT(pipe);
14916
14917         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14918                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14919                                                possible_crtcs, plane_funcs,
14920                                                formats, num_formats, modifiers,
14921                                                DRM_PLANE_TYPE_PRIMARY,
14922                                                "primary %c", pipe_name(pipe));
14923         else
14924                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14925                                                possible_crtcs, plane_funcs,
14926                                                formats, num_formats, modifiers,
14927                                                DRM_PLANE_TYPE_PRIMARY,
14928                                                "plane %c",
14929                                                plane_name(plane->i9xx_plane));
14930         if (ret)
14931                 goto fail;
14932
14933         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14934                 supported_rotations =
14935                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14936                         DRM_MODE_REFLECT_X;
14937         } else if (INTEL_GEN(dev_priv) >= 4) {
14938                 supported_rotations =
14939                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14940         } else {
14941                 supported_rotations = DRM_MODE_ROTATE_0;
14942         }
14943
14944         if (INTEL_GEN(dev_priv) >= 4)
14945                 drm_plane_create_rotation_property(&plane->base,
14946                                                    DRM_MODE_ROTATE_0,
14947                                                    supported_rotations);
14948
14949         zpos = 0;
14950         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
14951
14952         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14953
14954         return plane;
14955
14956 fail:
14957         intel_plane_free(plane);
14958
14959         return ERR_PTR(ret);
14960 }
14961
14962 static struct intel_plane *
14963 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14964                           enum pipe pipe)
14965 {
14966         unsigned int possible_crtcs;
14967         struct intel_plane *cursor;
14968         int ret, zpos;
14969
14970         cursor = intel_plane_alloc();
14971         if (IS_ERR(cursor))
14972                 return cursor;
14973
14974         cursor->pipe = pipe;
14975         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14976         cursor->id = PLANE_CURSOR;
14977         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14978
14979         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14980                 cursor->max_stride = i845_cursor_max_stride;
14981                 cursor->update_plane = i845_update_cursor;
14982                 cursor->disable_plane = i845_disable_cursor;
14983                 cursor->get_hw_state = i845_cursor_get_hw_state;
14984                 cursor->check_plane = i845_check_cursor;
14985         } else {
14986                 cursor->max_stride = i9xx_cursor_max_stride;
14987                 cursor->update_plane = i9xx_update_cursor;
14988                 cursor->disable_plane = i9xx_disable_cursor;
14989                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14990                 cursor->check_plane = i9xx_check_cursor;
14991         }
14992
14993         cursor->cursor.base = ~0;
14994         cursor->cursor.cntl = ~0;
14995
14996         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14997                 cursor->cursor.size = ~0;
14998
14999         possible_crtcs = BIT(pipe);
15000
15001         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
15002                                        possible_crtcs, &intel_cursor_plane_funcs,
15003                                        intel_cursor_formats,
15004                                        ARRAY_SIZE(intel_cursor_formats),
15005                                        cursor_format_modifiers,
15006                                        DRM_PLANE_TYPE_CURSOR,
15007                                        "cursor %c", pipe_name(pipe));
15008         if (ret)
15009                 goto fail;
15010
15011         if (INTEL_GEN(dev_priv) >= 4)
15012                 drm_plane_create_rotation_property(&cursor->base,
15013                                                    DRM_MODE_ROTATE_0,
15014                                                    DRM_MODE_ROTATE_0 |
15015                                                    DRM_MODE_ROTATE_180);
15016
15017         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
15018         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
15019
15020         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15021
15022         return cursor;
15023
15024 fail:
15025         intel_plane_free(cursor);
15026
15027         return ERR_PTR(ret);
15028 }
15029
15030 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
15031                                     struct intel_crtc_state *crtc_state)
15032 {
15033         struct intel_crtc_scaler_state *scaler_state =
15034                 &crtc_state->scaler_state;
15035         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15036         int i;
15037
15038         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
15039         if (!crtc->num_scalers)
15040                 return;
15041
15042         for (i = 0; i < crtc->num_scalers; i++) {
15043                 struct intel_scaler *scaler = &scaler_state->scalers[i];
15044
15045                 scaler->in_use = 0;
15046                 scaler->mode = 0;
15047         }
15048
15049         scaler_state->scaler_id = -1;
15050 }
15051
15052 #define INTEL_CRTC_FUNCS \
15053         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
15054         .set_config = drm_atomic_helper_set_config, \
15055         .destroy = intel_crtc_destroy, \
15056         .page_flip = drm_atomic_helper_page_flip, \
15057         .atomic_duplicate_state = intel_crtc_duplicate_state, \
15058         .atomic_destroy_state = intel_crtc_destroy_state, \
15059         .set_crc_source = intel_crtc_set_crc_source, \
15060         .verify_crc_source = intel_crtc_verify_crc_source, \
15061         .get_crc_sources = intel_crtc_get_crc_sources
15062
15063 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15064         INTEL_CRTC_FUNCS,
15065
15066         .get_vblank_counter = g4x_get_vblank_counter,
15067         .enable_vblank = bdw_enable_vblank,
15068         .disable_vblank = bdw_disable_vblank,
15069 };
15070
15071 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15072         INTEL_CRTC_FUNCS,
15073
15074         .get_vblank_counter = g4x_get_vblank_counter,
15075         .enable_vblank = ilk_enable_vblank,
15076         .disable_vblank = ilk_disable_vblank,
15077 };
15078
15079 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15080         INTEL_CRTC_FUNCS,
15081
15082         .get_vblank_counter = g4x_get_vblank_counter,
15083         .enable_vblank = i965_enable_vblank,
15084         .disable_vblank = i965_disable_vblank,
15085 };
15086
15087 static const struct drm_crtc_funcs i965_crtc_funcs = {
15088         INTEL_CRTC_FUNCS,
15089
15090         .get_vblank_counter = i915_get_vblank_counter,
15091         .enable_vblank = i965_enable_vblank,
15092         .disable_vblank = i965_disable_vblank,
15093 };
15094
15095 static const struct drm_crtc_funcs i945gm_crtc_funcs = {
15096         INTEL_CRTC_FUNCS,
15097
15098         .get_vblank_counter = i915_get_vblank_counter,
15099         .enable_vblank = i945gm_enable_vblank,
15100         .disable_vblank = i945gm_disable_vblank,
15101 };
15102
15103 static const struct drm_crtc_funcs i915_crtc_funcs = {
15104         INTEL_CRTC_FUNCS,
15105
15106         .get_vblank_counter = i915_get_vblank_counter,
15107         .enable_vblank = i8xx_enable_vblank,
15108         .disable_vblank = i8xx_disable_vblank,
15109 };
15110
15111 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15112         INTEL_CRTC_FUNCS,
15113
15114         /* no hw vblank counter */
15115         .enable_vblank = i8xx_enable_vblank,
15116         .disable_vblank = i8xx_disable_vblank,
15117 };
15118
15119 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15120 {
15121         const struct drm_crtc_funcs *funcs;
15122         struct intel_crtc *intel_crtc;
15123         struct intel_crtc_state *crtc_state = NULL;
15124         struct intel_plane *primary = NULL;
15125         struct intel_plane *cursor = NULL;
15126         int sprite, ret;
15127
15128         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
15129         if (!intel_crtc)
15130                 return -ENOMEM;
15131
15132         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
15133         if (!crtc_state) {
15134                 ret = -ENOMEM;
15135                 goto fail;
15136         }
15137         __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
15138         intel_crtc->config = crtc_state;
15139
15140         primary = intel_primary_plane_create(dev_priv, pipe);
15141         if (IS_ERR(primary)) {
15142                 ret = PTR_ERR(primary);
15143                 goto fail;
15144         }
15145         intel_crtc->plane_ids_mask |= BIT(primary->id);
15146
15147         for_each_sprite(dev_priv, pipe, sprite) {
15148                 struct intel_plane *plane;
15149
15150                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15151                 if (IS_ERR(plane)) {
15152                         ret = PTR_ERR(plane);
15153                         goto fail;
15154                 }
15155                 intel_crtc->plane_ids_mask |= BIT(plane->id);
15156         }
15157
15158         cursor = intel_cursor_plane_create(dev_priv, pipe);
15159         if (IS_ERR(cursor)) {
15160                 ret = PTR_ERR(cursor);
15161                 goto fail;
15162         }
15163         intel_crtc->plane_ids_mask |= BIT(cursor->id);
15164
15165         if (HAS_GMCH(dev_priv)) {
15166                 if (IS_CHERRYVIEW(dev_priv) ||
15167                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15168                         funcs = &g4x_crtc_funcs;
15169                 else if (IS_GEN(dev_priv, 4))
15170                         funcs = &i965_crtc_funcs;
15171                 else if (IS_I945GM(dev_priv))
15172                         funcs = &i945gm_crtc_funcs;
15173                 else if (IS_GEN(dev_priv, 3))
15174                         funcs = &i915_crtc_funcs;
15175                 else
15176                         funcs = &i8xx_crtc_funcs;
15177         } else {
15178                 if (INTEL_GEN(dev_priv) >= 8)
15179                         funcs = &bdw_crtc_funcs;
15180                 else
15181                         funcs = &ilk_crtc_funcs;
15182         }
15183
15184         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
15185                                         &primary->base, &cursor->base,
15186                                         funcs, "pipe %c", pipe_name(pipe));
15187         if (ret)
15188                 goto fail;
15189
15190         intel_crtc->pipe = pipe;
15191
15192         /* initialize shared scalers */
15193         intel_crtc_init_scalers(intel_crtc, crtc_state);
15194
15195         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15196                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15197         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
15198
15199         if (INTEL_GEN(dev_priv) < 9) {
15200                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15201
15202                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15203                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15204                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
15205         }
15206
15207         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
15208
15209         intel_color_init(intel_crtc);
15210
15211         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15212
15213         return 0;
15214
15215 fail:
15216         /*
15217          * drm_mode_config_cleanup() will free up any
15218          * crtcs/planes already initialized.
15219          */
15220         kfree(crtc_state);
15221         kfree(intel_crtc);
15222
15223         return ret;
15224 }
15225
15226 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15227                                       struct drm_file *file)
15228 {
15229         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15230         struct drm_crtc *drmmode_crtc;
15231         struct intel_crtc *crtc;
15232
15233         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15234         if (!drmmode_crtc)
15235                 return -ENOENT;
15236
15237         crtc = to_intel_crtc(drmmode_crtc);
15238         pipe_from_crtc_id->pipe = crtc->pipe;
15239
15240         return 0;
15241 }
15242
15243 static int intel_encoder_clones(struct intel_encoder *encoder)
15244 {
15245         struct drm_device *dev = encoder->base.dev;
15246         struct intel_encoder *source_encoder;
15247         int index_mask = 0;
15248         int entry = 0;
15249
15250         for_each_intel_encoder(dev, source_encoder) {
15251                 if (encoders_cloneable(encoder, source_encoder))
15252                         index_mask |= (1 << entry);
15253
15254                 entry++;
15255         }
15256
15257         return index_mask;
15258 }
15259
15260 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
15261 {
15262         struct drm_device *dev = encoder->base.dev;
15263         struct intel_crtc *crtc;
15264         u32 possible_crtcs = 0;
15265
15266         for_each_intel_crtc(dev, crtc) {
15267                 if (encoder->crtc_mask & BIT(crtc->pipe))
15268                         possible_crtcs |= drm_crtc_mask(&crtc->base);
15269         }
15270
15271         return possible_crtcs;
15272 }
15273
15274 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
15275 {
15276         if (!IS_MOBILE(dev_priv))
15277                 return false;
15278
15279         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
15280                 return false;
15281
15282         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
15283                 return false;
15284
15285         return true;
15286 }
15287
15288 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
15289 {
15290         if (INTEL_GEN(dev_priv) >= 9)
15291                 return false;
15292
15293         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
15294                 return false;
15295
15296         if (HAS_PCH_LPT_H(dev_priv) &&
15297             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
15298                 return false;
15299
15300         /* DDI E can't be used if DDI A requires 4 lanes */
15301         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
15302                 return false;
15303
15304         if (!dev_priv->vbt.int_crt_support)
15305                 return false;
15306
15307         return true;
15308 }
15309
15310 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
15311 {
15312         int pps_num;
15313         int pps_idx;
15314
15315         if (HAS_DDI(dev_priv))
15316                 return;
15317         /*
15318          * This w/a is needed at least on CPT/PPT, but to be sure apply it
15319          * everywhere where registers can be write protected.
15320          */
15321         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15322                 pps_num = 2;
15323         else
15324                 pps_num = 1;
15325
15326         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
15327                 u32 val = I915_READ(PP_CONTROL(pps_idx));
15328
15329                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
15330                 I915_WRITE(PP_CONTROL(pps_idx), val);
15331         }
15332 }
15333
15334 static void intel_pps_init(struct drm_i915_private *dev_priv)
15335 {
15336         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
15337                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
15338         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15339                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
15340         else
15341                 dev_priv->pps_mmio_base = PPS_BASE;
15342
15343         intel_pps_unlock_regs_wa(dev_priv);
15344 }
15345
15346 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
15347 {
15348         struct intel_encoder *encoder;
15349         bool dpd_is_edp = false;
15350
15351         intel_pps_init(dev_priv);
15352
15353         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
15354                 return;
15355
15356         if (INTEL_GEN(dev_priv) >= 12) {
15357                 intel_ddi_init(dev_priv, PORT_A);
15358                 intel_ddi_init(dev_priv, PORT_B);
15359                 intel_ddi_init(dev_priv, PORT_D);
15360                 intel_ddi_init(dev_priv, PORT_E);
15361                 intel_ddi_init(dev_priv, PORT_F);
15362                 intel_ddi_init(dev_priv, PORT_G);
15363                 intel_ddi_init(dev_priv, PORT_H);
15364                 intel_ddi_init(dev_priv, PORT_I);
15365                 icl_dsi_init(dev_priv);
15366         } else if (IS_ELKHARTLAKE(dev_priv)) {
15367                 intel_ddi_init(dev_priv, PORT_A);
15368                 intel_ddi_init(dev_priv, PORT_B);
15369                 intel_ddi_init(dev_priv, PORT_C);
15370                 intel_ddi_init(dev_priv, PORT_D);
15371                 icl_dsi_init(dev_priv);
15372         } else if (IS_GEN(dev_priv, 11)) {
15373                 intel_ddi_init(dev_priv, PORT_A);
15374                 intel_ddi_init(dev_priv, PORT_B);
15375                 intel_ddi_init(dev_priv, PORT_C);
15376                 intel_ddi_init(dev_priv, PORT_D);
15377                 intel_ddi_init(dev_priv, PORT_E);
15378                 /*
15379                  * On some ICL SKUs port F is not present. No strap bits for
15380                  * this, so rely on VBT.
15381                  * Work around broken VBTs on SKUs known to have no port F.
15382                  */
15383                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
15384                     intel_bios_is_port_present(dev_priv, PORT_F))
15385                         intel_ddi_init(dev_priv, PORT_F);
15386
15387                 icl_dsi_init(dev_priv);
15388         } else if (IS_GEN9_LP(dev_priv)) {
15389                 /*
15390                  * FIXME: Broxton doesn't support port detection via the
15391                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
15392                  * detect the ports.
15393                  */
15394                 intel_ddi_init(dev_priv, PORT_A);
15395                 intel_ddi_init(dev_priv, PORT_B);
15396                 intel_ddi_init(dev_priv, PORT_C);
15397
15398                 vlv_dsi_init(dev_priv);
15399         } else if (HAS_DDI(dev_priv)) {
15400                 int found;
15401
15402                 if (intel_ddi_crt_present(dev_priv))
15403                         intel_crt_init(dev_priv);
15404
15405                 /*
15406                  * Haswell uses DDI functions to detect digital outputs.
15407                  * On SKL pre-D0 the strap isn't connected, so we assume
15408                  * it's there.
15409                  */
15410                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
15411                 /* WaIgnoreDDIAStrap: skl */
15412                 if (found || IS_GEN9_BC(dev_priv))
15413                         intel_ddi_init(dev_priv, PORT_A);
15414
15415                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
15416                  * register */
15417                 found = I915_READ(SFUSE_STRAP);
15418
15419                 if (found & SFUSE_STRAP_DDIB_DETECTED)
15420                         intel_ddi_init(dev_priv, PORT_B);
15421                 if (found & SFUSE_STRAP_DDIC_DETECTED)
15422                         intel_ddi_init(dev_priv, PORT_C);
15423                 if (found & SFUSE_STRAP_DDID_DETECTED)
15424                         intel_ddi_init(dev_priv, PORT_D);
15425                 if (found & SFUSE_STRAP_DDIF_DETECTED)
15426                         intel_ddi_init(dev_priv, PORT_F);
15427                 /*
15428                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
15429                  */
15430                 if (IS_GEN9_BC(dev_priv) &&
15431                     intel_bios_is_port_present(dev_priv, PORT_E))
15432                         intel_ddi_init(dev_priv, PORT_E);
15433
15434         } else if (HAS_PCH_SPLIT(dev_priv)) {
15435                 int found;
15436
15437                 /*
15438                  * intel_edp_init_connector() depends on this completing first,
15439                  * to prevent the registration of both eDP and LVDS and the
15440                  * incorrect sharing of the PPS.
15441                  */
15442                 intel_lvds_init(dev_priv);
15443                 intel_crt_init(dev_priv);
15444
15445                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
15446
15447                 if (ilk_has_edp_a(dev_priv))
15448                         intel_dp_init(dev_priv, DP_A, PORT_A);
15449
15450                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
15451                         /* PCH SDVOB multiplex with HDMIB */
15452                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
15453                         if (!found)
15454                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
15455                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
15456                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
15457                 }
15458
15459                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
15460                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
15461
15462                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
15463                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
15464
15465                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
15466                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
15467
15468                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
15469                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
15470         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15471                 bool has_edp, has_port;
15472
15473                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
15474                         intel_crt_init(dev_priv);
15475
15476                 /*
15477                  * The DP_DETECTED bit is the latched state of the DDC
15478                  * SDA pin at boot. However since eDP doesn't require DDC
15479                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
15480                  * eDP ports may have been muxed to an alternate function.
15481                  * Thus we can't rely on the DP_DETECTED bit alone to detect
15482                  * eDP ports. Consult the VBT as well as DP_DETECTED to
15483                  * detect eDP ports.
15484                  *
15485                  * Sadly the straps seem to be missing sometimes even for HDMI
15486                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
15487                  * and VBT for the presence of the port. Additionally we can't
15488                  * trust the port type the VBT declares as we've seen at least
15489                  * HDMI ports that the VBT claim are DP or eDP.
15490                  */
15491                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
15492                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
15493                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
15494                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
15495                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
15496                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
15497
15498                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
15499                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
15500                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
15501                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
15502                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
15503                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
15504
15505                 if (IS_CHERRYVIEW(dev_priv)) {
15506                         /*
15507                          * eDP not supported on port D,
15508                          * so no need to worry about it
15509                          */
15510                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
15511                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
15512                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
15513                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
15514                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
15515                 }
15516
15517                 vlv_dsi_init(dev_priv);
15518         } else if (IS_PINEVIEW(dev_priv)) {
15519                 intel_lvds_init(dev_priv);
15520                 intel_crt_init(dev_priv);
15521         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
15522                 bool found = false;
15523
15524                 if (IS_MOBILE(dev_priv))
15525                         intel_lvds_init(dev_priv);
15526
15527                 intel_crt_init(dev_priv);
15528
15529                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15530                         DRM_DEBUG_KMS("probing SDVOB\n");
15531                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
15532                         if (!found && IS_G4X(dev_priv)) {
15533                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
15534                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
15535                         }
15536
15537                         if (!found && IS_G4X(dev_priv))
15538                                 intel_dp_init(dev_priv, DP_B, PORT_B);
15539                 }
15540
15541                 /* Before G4X SDVOC doesn't have its own detect register */
15542
15543                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15544                         DRM_DEBUG_KMS("probing SDVOC\n");
15545                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
15546                 }
15547
15548                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
15549
15550                         if (IS_G4X(dev_priv)) {
15551                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
15552                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
15553                         }
15554                         if (IS_G4X(dev_priv))
15555                                 intel_dp_init(dev_priv, DP_C, PORT_C);
15556                 }
15557
15558                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
15559                         intel_dp_init(dev_priv, DP_D, PORT_D);
15560
15561                 if (SUPPORTS_TV(dev_priv))
15562                         intel_tv_init(dev_priv);
15563         } else if (IS_GEN(dev_priv, 2)) {
15564                 if (IS_I85X(dev_priv))
15565                         intel_lvds_init(dev_priv);
15566
15567                 intel_crt_init(dev_priv);
15568                 intel_dvo_init(dev_priv);
15569         }
15570
15571         intel_psr_init(dev_priv);
15572
15573         for_each_intel_encoder(&dev_priv->drm, encoder) {
15574                 encoder->base.possible_crtcs =
15575                         intel_encoder_possible_crtcs(encoder);
15576                 encoder->base.possible_clones =
15577                         intel_encoder_clones(encoder);
15578         }
15579
15580         intel_init_pch_refclk(dev_priv);
15581
15582         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
15583 }
15584
15585 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15586 {
15587         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15588
15589         drm_framebuffer_cleanup(fb);
15590         intel_frontbuffer_put(intel_fb->frontbuffer);
15591
15592         kfree(intel_fb);
15593 }
15594
15595 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
15596                                                 struct drm_file *file,
15597                                                 unsigned int *handle)
15598 {
15599         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15600
15601         if (obj->userptr.mm) {
15602                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15603                 return -EINVAL;
15604         }
15605
15606         return drm_gem_handle_create(file, &obj->base, handle);
15607 }
15608
15609 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15610                                         struct drm_file *file,
15611                                         unsigned flags, unsigned color,
15612                                         struct drm_clip_rect *clips,
15613                                         unsigned num_clips)
15614 {
15615         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15616
15617         i915_gem_object_flush_if_display(obj);
15618         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
15619
15620         return 0;
15621 }
15622
15623 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15624         .destroy = intel_user_framebuffer_destroy,
15625         .create_handle = intel_user_framebuffer_create_handle,
15626         .dirty = intel_user_framebuffer_dirty,
15627 };
15628
15629 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15630                                   struct drm_i915_gem_object *obj,
15631                                   struct drm_mode_fb_cmd2 *mode_cmd)
15632 {
15633         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
15634         struct drm_framebuffer *fb = &intel_fb->base;
15635         u32 max_stride;
15636         unsigned int tiling, stride;
15637         int ret = -EINVAL;
15638         int i;
15639
15640         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
15641         if (!intel_fb->frontbuffer)
15642                 return -ENOMEM;
15643
15644         i915_gem_object_lock(obj);
15645         tiling = i915_gem_object_get_tiling(obj);
15646         stride = i915_gem_object_get_stride(obj);
15647         i915_gem_object_unlock(obj);
15648
15649         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15650                 /*
15651                  * If there's a fence, enforce that
15652                  * the fb modifier and tiling mode match.
15653                  */
15654                 if (tiling != I915_TILING_NONE &&
15655                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15656                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
15657                         goto err;
15658                 }
15659         } else {
15660                 if (tiling == I915_TILING_X) {
15661                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15662                 } else if (tiling == I915_TILING_Y) {
15663                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
15664                         goto err;
15665                 }
15666         }
15667
15668         if (!drm_any_plane_has_format(&dev_priv->drm,
15669                                       mode_cmd->pixel_format,
15670                                       mode_cmd->modifier[0])) {
15671                 struct drm_format_name_buf format_name;
15672
15673                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15674                               drm_get_format_name(mode_cmd->pixel_format,
15675                                                   &format_name),
15676                               mode_cmd->modifier[0]);
15677                 goto err;
15678         }
15679
15680         /*
15681          * gen2/3 display engine uses the fence if present,
15682          * so the tiling mode must match the fb modifier exactly.
15683          */
15684         if (INTEL_GEN(dev_priv) < 4 &&
15685             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15686                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
15687                 goto err;
15688         }
15689
15690         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
15691                                          mode_cmd->modifier[0]);
15692         if (mode_cmd->pitches[0] > max_stride) {
15693                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
15694                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
15695                               "tiled" : "linear",
15696                               mode_cmd->pitches[0], max_stride);
15697                 goto err;
15698         }
15699
15700         /*
15701          * If there's a fence, enforce that
15702          * the fb pitch and fence stride match.
15703          */
15704         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15705                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15706                               mode_cmd->pitches[0], stride);
15707                 goto err;
15708         }
15709
15710         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15711         if (mode_cmd->offsets[0] != 0)
15712                 goto err;
15713
15714         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
15715
15716         for (i = 0; i < fb->format->num_planes; i++) {
15717                 u32 stride_alignment;
15718
15719                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15720                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
15721                         goto err;
15722                 }
15723
15724                 stride_alignment = intel_fb_stride_alignment(fb, i);
15725
15726                 /*
15727                  * Display WA #0531: skl,bxt,kbl,glk
15728                  *
15729                  * Render decompression and plane width > 3840
15730                  * combined with horizontal panning requires the
15731                  * plane stride to be a multiple of 4. We'll just
15732                  * require the entire fb to accommodate that to avoid
15733                  * potential runtime errors at plane configuration time.
15734                  */
15735                 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
15736                     is_ccs_modifier(fb->modifier))
15737                         stride_alignment *= 4;
15738
15739                 if (fb->pitches[i] & (stride_alignment - 1)) {
15740                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15741                                       i, fb->pitches[i], stride_alignment);
15742                         goto err;
15743                 }
15744
15745                 fb->obj[i] = &obj->base;
15746         }
15747
15748         ret = intel_fill_fb_info(dev_priv, fb);
15749         if (ret)
15750                 goto err;
15751
15752         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
15753         if (ret) {
15754                 DRM_ERROR("framebuffer init failed %d\n", ret);
15755                 goto err;
15756         }
15757
15758         return 0;
15759
15760 err:
15761         intel_frontbuffer_put(intel_fb->frontbuffer);
15762         return ret;
15763 }
15764
15765 static struct drm_framebuffer *
15766 intel_user_framebuffer_create(struct drm_device *dev,
15767                               struct drm_file *filp,
15768                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
15769 {
15770         struct drm_framebuffer *fb;
15771         struct drm_i915_gem_object *obj;
15772         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15773
15774         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15775         if (!obj)
15776                 return ERR_PTR(-ENOENT);
15777
15778         fb = intel_framebuffer_create(obj, &mode_cmd);
15779         i915_gem_object_put(obj);
15780
15781         return fb;
15782 }
15783
15784 static void intel_atomic_state_free(struct drm_atomic_state *state)
15785 {
15786         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15787
15788         drm_atomic_state_default_release(state);
15789
15790         i915_sw_fence_fini(&intel_state->commit_ready);
15791
15792         kfree(state);
15793 }
15794
15795 static enum drm_mode_status
15796 intel_mode_valid(struct drm_device *dev,
15797                  const struct drm_display_mode *mode)
15798 {
15799         struct drm_i915_private *dev_priv = to_i915(dev);
15800         int hdisplay_max, htotal_max;
15801         int vdisplay_max, vtotal_max;
15802
15803         /*
15804          * Can't reject DBLSCAN here because Xorg ddxen can add piles
15805          * of DBLSCAN modes to the output's mode list when they detect
15806          * the scaling mode property on the connector. And they don't
15807          * ask the kernel to validate those modes in any way until
15808          * modeset time at which point the client gets a protocol error.
15809          * So in order to not upset those clients we silently ignore the
15810          * DBLSCAN flag on such connectors. For other connectors we will
15811          * reject modes with the DBLSCAN flag in encoder->compute_config().
15812          * And we always reject DBLSCAN modes in connector->mode_valid()
15813          * as we never want such modes on the connector's mode list.
15814          */
15815
15816         if (mode->vscan > 1)
15817                 return MODE_NO_VSCAN;
15818
15819         if (mode->flags & DRM_MODE_FLAG_HSKEW)
15820                 return MODE_H_ILLEGAL;
15821
15822         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15823                            DRM_MODE_FLAG_NCSYNC |
15824                            DRM_MODE_FLAG_PCSYNC))
15825                 return MODE_HSYNC;
15826
15827         if (mode->flags & (DRM_MODE_FLAG_BCAST |
15828                            DRM_MODE_FLAG_PIXMUX |
15829                            DRM_MODE_FLAG_CLKDIV2))
15830                 return MODE_BAD;
15831
15832         /* Transcoder timing limits */
15833         if (INTEL_GEN(dev_priv) >= 11) {
15834                 hdisplay_max = 16384;
15835                 vdisplay_max = 8192;
15836                 htotal_max = 16384;
15837                 vtotal_max = 8192;
15838         } else if (INTEL_GEN(dev_priv) >= 9 ||
15839                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15840                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15841                 vdisplay_max = 4096;
15842                 htotal_max = 8192;
15843                 vtotal_max = 8192;
15844         } else if (INTEL_GEN(dev_priv) >= 3) {
15845                 hdisplay_max = 4096;
15846                 vdisplay_max = 4096;
15847                 htotal_max = 8192;
15848                 vtotal_max = 8192;
15849         } else {
15850                 hdisplay_max = 2048;
15851                 vdisplay_max = 2048;
15852                 htotal_max = 4096;
15853                 vtotal_max = 4096;
15854         }
15855
15856         if (mode->hdisplay > hdisplay_max ||
15857             mode->hsync_start > htotal_max ||
15858             mode->hsync_end > htotal_max ||
15859             mode->htotal > htotal_max)
15860                 return MODE_H_ILLEGAL;
15861
15862         if (mode->vdisplay > vdisplay_max ||
15863             mode->vsync_start > vtotal_max ||
15864             mode->vsync_end > vtotal_max ||
15865             mode->vtotal > vtotal_max)
15866                 return MODE_V_ILLEGAL;
15867
15868         return MODE_OK;
15869 }
15870
15871 enum drm_mode_status
15872 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
15873                                 const struct drm_display_mode *mode)
15874 {
15875         int plane_width_max, plane_height_max;
15876
15877         /*
15878          * intel_mode_valid() should be
15879          * sufficient on older platforms.
15880          */
15881         if (INTEL_GEN(dev_priv) < 9)
15882                 return MODE_OK;
15883
15884         /*
15885          * Most people will probably want a fullscreen
15886          * plane so let's not advertize modes that are
15887          * too big for that.
15888          */
15889         if (INTEL_GEN(dev_priv) >= 11) {
15890                 plane_width_max = 5120;
15891                 plane_height_max = 4320;
15892         } else {
15893                 plane_width_max = 5120;
15894                 plane_height_max = 4096;
15895         }
15896
15897         if (mode->hdisplay > plane_width_max)
15898                 return MODE_H_ILLEGAL;
15899
15900         if (mode->vdisplay > plane_height_max)
15901                 return MODE_V_ILLEGAL;
15902
15903         return MODE_OK;
15904 }
15905
15906 static const struct drm_mode_config_funcs intel_mode_funcs = {
15907         .fb_create = intel_user_framebuffer_create,
15908         .get_format_info = intel_get_format_info,
15909         .output_poll_changed = intel_fbdev_output_poll_changed,
15910         .mode_valid = intel_mode_valid,
15911         .atomic_check = intel_atomic_check,
15912         .atomic_commit = intel_atomic_commit,
15913         .atomic_state_alloc = intel_atomic_state_alloc,
15914         .atomic_state_clear = intel_atomic_state_clear,
15915         .atomic_state_free = intel_atomic_state_free,
15916 };
15917
15918 /**
15919  * intel_init_display_hooks - initialize the display modesetting hooks
15920  * @dev_priv: device private
15921  */
15922 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15923 {
15924         intel_init_cdclk_hooks(dev_priv);
15925
15926         if (INTEL_GEN(dev_priv) >= 9) {
15927                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15928                 dev_priv->display.get_initial_plane_config =
15929                         skylake_get_initial_plane_config;
15930                 dev_priv->display.crtc_compute_clock =
15931                         haswell_crtc_compute_clock;
15932                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15933                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15934         } else if (HAS_DDI(dev_priv)) {
15935                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15936                 dev_priv->display.get_initial_plane_config =
15937                         i9xx_get_initial_plane_config;
15938                 dev_priv->display.crtc_compute_clock =
15939                         haswell_crtc_compute_clock;
15940                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15941                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15942         } else if (HAS_PCH_SPLIT(dev_priv)) {
15943                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15944                 dev_priv->display.get_initial_plane_config =
15945                         i9xx_get_initial_plane_config;
15946                 dev_priv->display.crtc_compute_clock =
15947                         ironlake_crtc_compute_clock;
15948                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15949                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15950         } else if (IS_CHERRYVIEW(dev_priv)) {
15951                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15952                 dev_priv->display.get_initial_plane_config =
15953                         i9xx_get_initial_plane_config;
15954                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15955                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15956                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15957         } else if (IS_VALLEYVIEW(dev_priv)) {
15958                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15959                 dev_priv->display.get_initial_plane_config =
15960                         i9xx_get_initial_plane_config;
15961                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15962                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15963                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15964         } else if (IS_G4X(dev_priv)) {
15965                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15966                 dev_priv->display.get_initial_plane_config =
15967                         i9xx_get_initial_plane_config;
15968                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15969                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15970                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15971         } else if (IS_PINEVIEW(dev_priv)) {
15972                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15973                 dev_priv->display.get_initial_plane_config =
15974                         i9xx_get_initial_plane_config;
15975                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15976                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15977                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15978         } else if (!IS_GEN(dev_priv, 2)) {
15979                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15980                 dev_priv->display.get_initial_plane_config =
15981                         i9xx_get_initial_plane_config;
15982                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15983                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15984                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15985         } else {
15986                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15987                 dev_priv->display.get_initial_plane_config =
15988                         i9xx_get_initial_plane_config;
15989                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15990                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15991                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15992         }
15993
15994         if (IS_GEN(dev_priv, 5)) {
15995                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15996         } else if (IS_GEN(dev_priv, 6)) {
15997                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15998         } else if (IS_IVYBRIDGE(dev_priv)) {
15999                 /* FIXME: detect B0+ stepping and use auto training */
16000                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16001         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
16002                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
16003         }
16004
16005         if (INTEL_GEN(dev_priv) >= 9)
16006                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16007         else
16008                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16009
16010 }
16011
16012 void intel_modeset_init_hw(struct drm_i915_private *i915)
16013 {
16014         intel_update_cdclk(i915);
16015         intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
16016         i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
16017 }
16018
16019 /*
16020  * Calculate what we think the watermarks should be for the state we've read
16021  * out of the hardware and then immediately program those watermarks so that
16022  * we ensure the hardware settings match our internal state.
16023  *
16024  * We can calculate what we think WM's should be by creating a duplicate of the
16025  * current state (which was constructed during hardware readout) and running it
16026  * through the atomic check code to calculate new watermark values in the
16027  * state object.
16028  */
16029 static void sanitize_watermarks(struct drm_device *dev)
16030 {
16031         struct drm_i915_private *dev_priv = to_i915(dev);
16032         struct drm_atomic_state *state;
16033         struct intel_atomic_state *intel_state;
16034         struct intel_crtc *crtc;
16035         struct intel_crtc_state *crtc_state;
16036         struct drm_modeset_acquire_ctx ctx;
16037         int ret;
16038         int i;
16039
16040         /* Only supported on platforms that use atomic watermark design */
16041         if (!dev_priv->display.optimize_watermarks)
16042                 return;
16043
16044         /*
16045          * We need to hold connection_mutex before calling duplicate_state so
16046          * that the connector loop is protected.
16047          */
16048         drm_modeset_acquire_init(&ctx, 0);
16049 retry:
16050         ret = drm_modeset_lock_all_ctx(dev, &ctx);
16051         if (ret == -EDEADLK) {
16052                 drm_modeset_backoff(&ctx);
16053                 goto retry;
16054         } else if (WARN_ON(ret)) {
16055                 goto fail;
16056         }
16057
16058         state = drm_atomic_helper_duplicate_state(dev, &ctx);
16059         if (WARN_ON(IS_ERR(state)))
16060                 goto fail;
16061
16062         intel_state = to_intel_atomic_state(state);
16063
16064         /*
16065          * Hardware readout is the only time we don't want to calculate
16066          * intermediate watermarks (since we don't trust the current
16067          * watermarks).
16068          */
16069         if (!HAS_GMCH(dev_priv))
16070                 intel_state->skip_intermediate_wm = true;
16071
16072         ret = intel_atomic_check(dev, state);
16073         if (ret) {
16074                 /*
16075                  * If we fail here, it means that the hardware appears to be
16076                  * programmed in a way that shouldn't be possible, given our
16077                  * understanding of watermark requirements.  This might mean a
16078                  * mistake in the hardware readout code or a mistake in the
16079                  * watermark calculations for a given platform.  Raise a WARN
16080                  * so that this is noticeable.
16081                  *
16082                  * If this actually happens, we'll have to just leave the
16083                  * BIOS-programmed watermarks untouched and hope for the best.
16084                  */
16085                 WARN(true, "Could not determine valid watermarks for inherited state\n");
16086                 goto put_state;
16087         }
16088
16089         /* Write calculated watermark values back */
16090         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16091                 crtc_state->wm.need_postvbl_update = true;
16092                 dev_priv->display.optimize_watermarks(intel_state, crtc_state);
16093
16094                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16095         }
16096
16097 put_state:
16098         drm_atomic_state_put(state);
16099 fail:
16100         drm_modeset_drop_locks(&ctx);
16101         drm_modeset_acquire_fini(&ctx);
16102 }
16103
16104 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16105 {
16106         if (IS_GEN(dev_priv, 5)) {
16107                 u32 fdi_pll_clk =
16108                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16109
16110                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16111         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16112                 dev_priv->fdi_pll_freq = 270000;
16113         } else {
16114                 return;
16115         }
16116
16117         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16118 }
16119
16120 static int intel_initial_commit(struct drm_device *dev)
16121 {
16122         struct drm_atomic_state *state = NULL;
16123         struct drm_modeset_acquire_ctx ctx;
16124         struct drm_crtc *crtc;
16125         struct drm_crtc_state *crtc_state;
16126         int ret = 0;
16127
16128         state = drm_atomic_state_alloc(dev);
16129         if (!state)
16130                 return -ENOMEM;
16131
16132         drm_modeset_acquire_init(&ctx, 0);
16133
16134 retry:
16135         state->acquire_ctx = &ctx;
16136
16137         drm_for_each_crtc(crtc, dev) {
16138                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
16139                 if (IS_ERR(crtc_state)) {
16140                         ret = PTR_ERR(crtc_state);
16141                         goto out;
16142                 }
16143
16144                 if (crtc_state->active) {
16145                         ret = drm_atomic_add_affected_planes(state, crtc);
16146                         if (ret)
16147                                 goto out;
16148
16149                         /*
16150                          * FIXME hack to force a LUT update to avoid the
16151                          * plane update forcing the pipe gamma on without
16152                          * having a proper LUT loaded. Remove once we
16153                          * have readout for pipe gamma enable.
16154                          */
16155                         crtc_state->color_mgmt_changed = true;
16156                 }
16157         }
16158
16159         ret = drm_atomic_commit(state);
16160
16161 out:
16162         if (ret == -EDEADLK) {
16163                 drm_atomic_state_clear(state);
16164                 drm_modeset_backoff(&ctx);
16165                 goto retry;
16166         }
16167
16168         drm_atomic_state_put(state);
16169
16170         drm_modeset_drop_locks(&ctx);
16171         drm_modeset_acquire_fini(&ctx);
16172
16173         return ret;
16174 }
16175
16176 static void intel_mode_config_init(struct drm_i915_private *i915)
16177 {
16178         struct drm_mode_config *mode_config = &i915->drm.mode_config;
16179
16180         drm_mode_config_init(&i915->drm);
16181
16182         mode_config->min_width = 0;
16183         mode_config->min_height = 0;
16184
16185         mode_config->preferred_depth = 24;
16186         mode_config->prefer_shadow = 1;
16187
16188         mode_config->allow_fb_modifiers = true;
16189
16190         mode_config->funcs = &intel_mode_funcs;
16191
16192         /*
16193          * Maximum framebuffer dimensions, chosen to match
16194          * the maximum render engine surface size on gen4+.
16195          */
16196         if (INTEL_GEN(i915) >= 7) {
16197                 mode_config->max_width = 16384;
16198                 mode_config->max_height = 16384;
16199         } else if (INTEL_GEN(i915) >= 4) {
16200                 mode_config->max_width = 8192;
16201                 mode_config->max_height = 8192;
16202         } else if (IS_GEN(i915, 3)) {
16203                 mode_config->max_width = 4096;
16204                 mode_config->max_height = 4096;
16205         } else {
16206                 mode_config->max_width = 2048;
16207                 mode_config->max_height = 2048;
16208         }
16209
16210         if (IS_I845G(i915) || IS_I865G(i915)) {
16211                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
16212                 mode_config->cursor_height = 1023;
16213         } else if (IS_GEN(i915, 2)) {
16214                 mode_config->cursor_width = 64;
16215                 mode_config->cursor_height = 64;
16216         } else {
16217                 mode_config->cursor_width = 256;
16218                 mode_config->cursor_height = 256;
16219         }
16220 }
16221
16222 int intel_modeset_init(struct drm_i915_private *i915)
16223 {
16224         struct drm_device *dev = &i915->drm;
16225         enum pipe pipe;
16226         struct intel_crtc *crtc;
16227         int ret;
16228
16229         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
16230         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
16231                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
16232
16233         intel_mode_config_init(i915);
16234
16235         ret = intel_bw_init(i915);
16236         if (ret)
16237                 return ret;
16238
16239         init_llist_head(&i915->atomic_helper.free_list);
16240         INIT_WORK(&i915->atomic_helper.free_work,
16241                   intel_atomic_helper_free_state_worker);
16242
16243         intel_init_quirks(i915);
16244
16245         intel_fbc_init(i915);
16246
16247         intel_init_pm(i915);
16248
16249         intel_panel_sanitize_ssc(i915);
16250
16251         DRM_DEBUG_KMS("%d display pipe%s available.\n",
16252                       INTEL_NUM_PIPES(i915),
16253                       INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
16254
16255         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
16256                 for_each_pipe(i915, pipe) {
16257                         ret = intel_crtc_init(i915, pipe);
16258                         if (ret) {
16259                                 drm_mode_config_cleanup(dev);
16260                                 return ret;
16261                         }
16262                 }
16263         }
16264
16265         intel_shared_dpll_init(dev);
16266         intel_update_fdi_pll_freq(i915);
16267
16268         intel_update_czclk(i915);
16269         intel_modeset_init_hw(i915);
16270
16271         intel_hdcp_component_init(i915);
16272
16273         if (i915->max_cdclk_freq == 0)
16274                 intel_update_max_cdclk(i915);
16275
16276         /* Just disable it once at startup */
16277         intel_vga_disable(i915);
16278         intel_setup_outputs(i915);
16279
16280         drm_modeset_lock_all(dev);
16281         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
16282         drm_modeset_unlock_all(dev);
16283
16284         for_each_intel_crtc(dev, crtc) {
16285                 struct intel_initial_plane_config plane_config = {};
16286
16287                 if (!crtc->active)
16288                         continue;
16289
16290                 /*
16291                  * Note that reserving the BIOS fb up front prevents us
16292                  * from stuffing other stolen allocations like the ring
16293                  * on top.  This prevents some ugliness at boot time, and
16294                  * can even allow for smooth boot transitions if the BIOS
16295                  * fb is large enough for the active pipe configuration.
16296                  */
16297                 i915->display.get_initial_plane_config(crtc, &plane_config);
16298
16299                 /*
16300                  * If the fb is shared between multiple heads, we'll
16301                  * just get the first one.
16302                  */
16303                 intel_find_initial_plane_obj(crtc, &plane_config);
16304         }
16305
16306         /*
16307          * Make sure hardware watermarks really match the state we read out.
16308          * Note that we need to do this after reconstructing the BIOS fb's
16309          * since the watermark calculation done here will use pstate->fb.
16310          */
16311         if (!HAS_GMCH(i915))
16312                 sanitize_watermarks(dev);
16313
16314         /*
16315          * Force all active planes to recompute their states. So that on
16316          * mode_setcrtc after probe, all the intel_plane_state variables
16317          * are already calculated and there is no assert_plane warnings
16318          * during bootup.
16319          */
16320         ret = intel_initial_commit(dev);
16321         if (ret)
16322                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
16323
16324         return 0;
16325 }
16326
16327 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16328 {
16329         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16330         /* 640x480@60Hz, ~25175 kHz */
16331         struct dpll clock = {
16332                 .m1 = 18,
16333                 .m2 = 7,
16334                 .p1 = 13,
16335                 .p2 = 4,
16336                 .n = 2,
16337         };
16338         u32 dpll, fp;
16339         int i;
16340
16341         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
16342
16343         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
16344                       pipe_name(pipe), clock.vco, clock.dot);
16345
16346         fp = i9xx_dpll_compute_fp(&clock);
16347         dpll = DPLL_DVO_2X_MODE |
16348                 DPLL_VGA_MODE_DIS |
16349                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
16350                 PLL_P2_DIVIDE_BY_4 |
16351                 PLL_REF_INPUT_DREFCLK |
16352                 DPLL_VCO_ENABLE;
16353
16354         I915_WRITE(FP0(pipe), fp);
16355         I915_WRITE(FP1(pipe), fp);
16356
16357         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
16358         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
16359         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
16360         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
16361         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
16362         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
16363         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
16364
16365         /*
16366          * Apparently we need to have VGA mode enabled prior to changing
16367          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
16368          * dividers, even though the register value does change.
16369          */
16370         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
16371         I915_WRITE(DPLL(pipe), dpll);
16372
16373         /* Wait for the clocks to stabilize. */
16374         POSTING_READ(DPLL(pipe));
16375         udelay(150);
16376
16377         /* The pixel multiplier can only be updated once the
16378          * DPLL is enabled and the clocks are stable.
16379          *
16380          * So write it again.
16381          */
16382         I915_WRITE(DPLL(pipe), dpll);
16383
16384         /* We do this three times for luck */
16385         for (i = 0; i < 3 ; i++) {
16386                 I915_WRITE(DPLL(pipe), dpll);
16387                 POSTING_READ(DPLL(pipe));
16388                 udelay(150); /* wait for warmup */
16389         }
16390
16391         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
16392         POSTING_READ(PIPECONF(pipe));
16393
16394         intel_wait_for_pipe_scanline_moving(crtc);
16395 }
16396
16397 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16398 {
16399         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16400
16401         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
16402                       pipe_name(pipe));
16403
16404         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
16405         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
16406         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
16407         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
16408         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
16409
16410         I915_WRITE(PIPECONF(pipe), 0);
16411         POSTING_READ(PIPECONF(pipe));
16412
16413         intel_wait_for_pipe_scanline_stopped(crtc);
16414
16415         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
16416         POSTING_READ(DPLL(pipe));
16417 }
16418
16419 static void
16420 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
16421 {
16422         struct intel_crtc *crtc;
16423
16424         if (INTEL_GEN(dev_priv) >= 4)
16425                 return;
16426
16427         for_each_intel_crtc(&dev_priv->drm, crtc) {
16428                 struct intel_plane *plane =
16429                         to_intel_plane(crtc->base.primary);
16430                 struct intel_crtc *plane_crtc;
16431                 enum pipe pipe;
16432
16433                 if (!plane->get_hw_state(plane, &pipe))
16434                         continue;
16435
16436                 if (pipe == crtc->pipe)
16437                         continue;
16438
16439                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
16440                               plane->base.base.id, plane->base.name);
16441
16442                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16443                 intel_plane_disable_noatomic(plane_crtc, plane);
16444         }
16445 }
16446
16447 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
16448 {
16449         struct drm_device *dev = crtc->base.dev;
16450         struct intel_encoder *encoder;
16451
16452         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
16453                 return true;
16454
16455         return false;
16456 }
16457
16458 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
16459 {
16460         struct drm_device *dev = encoder->base.dev;
16461         struct intel_connector *connector;
16462
16463         for_each_connector_on_encoder(dev, &encoder->base, connector)
16464                 return connector;
16465
16466         return NULL;
16467 }
16468
16469 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
16470                               enum pipe pch_transcoder)
16471 {
16472         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
16473                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
16474 }
16475
16476 static void intel_sanitize_crtc(struct intel_crtc *crtc,
16477                                 struct drm_modeset_acquire_ctx *ctx)
16478 {
16479         struct drm_device *dev = crtc->base.dev;
16480         struct drm_i915_private *dev_priv = to_i915(dev);
16481         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
16482         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
16483
16484         /* Clear any frame start delays used for debugging left by the BIOS */
16485         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
16486                 i915_reg_t reg = PIPECONF(cpu_transcoder);
16487
16488                 I915_WRITE(reg,
16489                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
16490         }
16491
16492         if (crtc_state->base.active) {
16493                 struct intel_plane *plane;
16494
16495                 /* Disable everything but the primary plane */
16496                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
16497                         const struct intel_plane_state *plane_state =
16498                                 to_intel_plane_state(plane->base.state);
16499
16500                         if (plane_state->base.visible &&
16501                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
16502                                 intel_plane_disable_noatomic(crtc, plane);
16503                 }
16504
16505                 /*
16506                  * Disable any background color set by the BIOS, but enable the
16507                  * gamma and CSC to match how we program our planes.
16508                  */
16509                 if (INTEL_GEN(dev_priv) >= 9)
16510                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
16511                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
16512                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
16513         }
16514
16515         /* Adjust the state of the output pipe according to whether we
16516          * have active connectors/encoders. */
16517         if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
16518                 intel_crtc_disable_noatomic(&crtc->base, ctx);
16519
16520         if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
16521                 /*
16522                  * We start out with underrun reporting disabled to avoid races.
16523                  * For correct bookkeeping mark this on active crtcs.
16524                  *
16525                  * Also on gmch platforms we dont have any hardware bits to
16526                  * disable the underrun reporting. Which means we need to start
16527                  * out with underrun reporting disabled also on inactive pipes,
16528                  * since otherwise we'll complain about the garbage we read when
16529                  * e.g. coming up after runtime pm.
16530                  *
16531                  * No protection against concurrent access is required - at
16532                  * worst a fifo underrun happens which also sets this to false.
16533                  */
16534                 crtc->cpu_fifo_underrun_disabled = true;
16535                 /*
16536                  * We track the PCH trancoder underrun reporting state
16537                  * within the crtc. With crtc for pipe A housing the underrun
16538                  * reporting state for PCH transcoder A, crtc for pipe B housing
16539                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
16540                  * and marking underrun reporting as disabled for the non-existing
16541                  * PCH transcoders B and C would prevent enabling the south
16542                  * error interrupt (see cpt_can_enable_serr_int()).
16543                  */
16544                 if (has_pch_trancoder(dev_priv, crtc->pipe))
16545                         crtc->pch_fifo_underrun_disabled = true;
16546         }
16547 }
16548
16549 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
16550 {
16551         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
16552
16553         /*
16554          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
16555          * the hardware when a high res displays plugged in. DPLL P
16556          * divider is zero, and the pipe timings are bonkers. We'll
16557          * try to disable everything in that case.
16558          *
16559          * FIXME would be nice to be able to sanitize this state
16560          * without several WARNs, but for now let's take the easy
16561          * road.
16562          */
16563         return IS_GEN(dev_priv, 6) &&
16564                 crtc_state->base.active &&
16565                 crtc_state->shared_dpll &&
16566                 crtc_state->port_clock == 0;
16567 }
16568
16569 static void intel_sanitize_encoder(struct intel_encoder *encoder)
16570 {
16571         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
16572         struct intel_connector *connector;
16573         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
16574         struct intel_crtc_state *crtc_state = crtc ?
16575                 to_intel_crtc_state(crtc->base.state) : NULL;
16576
16577         /* We need to check both for a crtc link (meaning that the
16578          * encoder is active and trying to read from a pipe) and the
16579          * pipe itself being active. */
16580         bool has_active_crtc = crtc_state &&
16581                 crtc_state->base.active;
16582
16583         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
16584                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
16585                               pipe_name(crtc->pipe));
16586                 has_active_crtc = false;
16587         }
16588
16589         connector = intel_encoder_find_connector(encoder);
16590         if (connector && !has_active_crtc) {
16591                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16592                               encoder->base.base.id,
16593                               encoder->base.name);
16594
16595                 /* Connector is active, but has no active pipe. This is
16596                  * fallout from our resume register restoring. Disable
16597                  * the encoder manually again. */
16598                 if (crtc_state) {
16599                         struct drm_encoder *best_encoder;
16600
16601                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16602                                       encoder->base.base.id,
16603                                       encoder->base.name);
16604
16605                         /* avoid oopsing in case the hooks consult best_encoder */
16606                         best_encoder = connector->base.state->best_encoder;
16607                         connector->base.state->best_encoder = &encoder->base;
16608
16609                         if (encoder->disable)
16610                                 encoder->disable(encoder, crtc_state,
16611                                                  connector->base.state);
16612                         if (encoder->post_disable)
16613                                 encoder->post_disable(encoder, crtc_state,
16614                                                       connector->base.state);
16615
16616                         connector->base.state->best_encoder = best_encoder;
16617                 }
16618                 encoder->base.crtc = NULL;
16619
16620                 /* Inconsistent output/port/pipe state happens presumably due to
16621                  * a bug in one of the get_hw_state functions. Or someplace else
16622                  * in our code, like the register restore mess on resume. Clamp
16623                  * things to off as a safer default. */
16624
16625                 connector->base.dpms = DRM_MODE_DPMS_OFF;
16626                 connector->base.encoder = NULL;
16627         }
16628
16629         /* notify opregion of the sanitized encoder state */
16630         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
16631
16632         if (INTEL_GEN(dev_priv) >= 11)
16633                 icl_sanitize_encoder_pll_mapping(encoder);
16634 }
16635
16636 /* FIXME read out full plane state for all planes */
16637 static void readout_plane_state(struct drm_i915_private *dev_priv)
16638 {
16639         struct intel_plane *plane;
16640         struct intel_crtc *crtc;
16641
16642         for_each_intel_plane(&dev_priv->drm, plane) {
16643                 struct intel_plane_state *plane_state =
16644                         to_intel_plane_state(plane->base.state);
16645                 struct intel_crtc_state *crtc_state;
16646                 enum pipe pipe = PIPE_A;
16647                 bool visible;
16648
16649                 visible = plane->get_hw_state(plane, &pipe);
16650
16651                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16652                 crtc_state = to_intel_crtc_state(crtc->base.state);
16653
16654                 intel_set_plane_visible(crtc_state, plane_state, visible);
16655
16656                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16657                               plane->base.base.id, plane->base.name,
16658                               enableddisabled(visible), pipe_name(pipe));
16659         }
16660
16661         for_each_intel_crtc(&dev_priv->drm, crtc) {
16662                 struct intel_crtc_state *crtc_state =
16663                         to_intel_crtc_state(crtc->base.state);
16664
16665                 fixup_active_planes(crtc_state);
16666         }
16667 }
16668
16669 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16670 {
16671         struct drm_i915_private *dev_priv = to_i915(dev);
16672         enum pipe pipe;
16673         struct intel_crtc *crtc;
16674         struct intel_encoder *encoder;
16675         struct intel_connector *connector;
16676         struct drm_connector_list_iter conn_iter;
16677         int i;
16678
16679         dev_priv->active_pipes = 0;
16680
16681         for_each_intel_crtc(dev, crtc) {
16682                 struct intel_crtc_state *crtc_state =
16683                         to_intel_crtc_state(crtc->base.state);
16684
16685                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16686                 memset(crtc_state, 0, sizeof(*crtc_state));
16687                 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
16688
16689                 crtc_state->base.active = crtc_state->base.enable =
16690                         dev_priv->display.get_pipe_config(crtc, crtc_state);
16691
16692                 crtc->base.enabled = crtc_state->base.enable;
16693                 crtc->active = crtc_state->base.active;
16694
16695                 if (crtc_state->base.active)
16696                         dev_priv->active_pipes |= BIT(crtc->pipe);
16697
16698                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16699                               crtc->base.base.id, crtc->base.name,
16700                               enableddisabled(crtc_state->base.active));
16701         }
16702
16703         readout_plane_state(dev_priv);
16704
16705         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16706                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16707
16708                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16709                                                         &pll->state.hw_state);
16710
16711                 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
16712                     pll->info->id == DPLL_ID_EHL_DPLL4) {
16713                         pll->wakeref = intel_display_power_get(dev_priv,
16714                                                                POWER_DOMAIN_DPLL_DC_OFF);
16715                 }
16716
16717                 pll->state.crtc_mask = 0;
16718                 for_each_intel_crtc(dev, crtc) {
16719                         struct intel_crtc_state *crtc_state =
16720                                 to_intel_crtc_state(crtc->base.state);
16721
16722                         if (crtc_state->base.active &&
16723                             crtc_state->shared_dpll == pll)
16724                                 pll->state.crtc_mask |= 1 << crtc->pipe;
16725                 }
16726                 pll->active_mask = pll->state.crtc_mask;
16727
16728                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16729                               pll->info->name, pll->state.crtc_mask, pll->on);
16730         }
16731
16732         for_each_intel_encoder(dev, encoder) {
16733                 pipe = 0;
16734
16735                 if (encoder->get_hw_state(encoder, &pipe)) {
16736                         struct intel_crtc_state *crtc_state;
16737
16738                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16739                         crtc_state = to_intel_crtc_state(crtc->base.state);
16740
16741                         encoder->base.crtc = &crtc->base;
16742                         encoder->get_config(encoder, crtc_state);
16743                 } else {
16744                         encoder->base.crtc = NULL;
16745                 }
16746
16747                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16748                               encoder->base.base.id, encoder->base.name,
16749                               enableddisabled(encoder->base.crtc),
16750                               pipe_name(pipe));
16751         }
16752
16753         drm_connector_list_iter_begin(dev, &conn_iter);
16754         for_each_intel_connector_iter(connector, &conn_iter) {
16755                 if (connector->get_hw_state(connector)) {
16756                         connector->base.dpms = DRM_MODE_DPMS_ON;
16757
16758                         encoder = connector->encoder;
16759                         connector->base.encoder = &encoder->base;
16760
16761                         if (encoder->base.crtc &&
16762                             encoder->base.crtc->state->active) {
16763                                 /*
16764                                  * This has to be done during hardware readout
16765                                  * because anything calling .crtc_disable may
16766                                  * rely on the connector_mask being accurate.
16767                                  */
16768                                 encoder->base.crtc->state->connector_mask |=
16769                                         drm_connector_mask(&connector->base);
16770                                 encoder->base.crtc->state->encoder_mask |=
16771                                         drm_encoder_mask(&encoder->base);
16772                         }
16773
16774                 } else {
16775                         connector->base.dpms = DRM_MODE_DPMS_OFF;
16776                         connector->base.encoder = NULL;
16777                 }
16778                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16779                               connector->base.base.id, connector->base.name,
16780                               enableddisabled(connector->base.encoder));
16781         }
16782         drm_connector_list_iter_end(&conn_iter);
16783
16784         for_each_intel_crtc(dev, crtc) {
16785                 struct intel_bw_state *bw_state =
16786                         to_intel_bw_state(dev_priv->bw_obj.state);
16787                 struct intel_crtc_state *crtc_state =
16788                         to_intel_crtc_state(crtc->base.state);
16789                 struct intel_plane *plane;
16790                 int min_cdclk = 0;
16791
16792                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16793                 if (crtc_state->base.active) {
16794                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16795                         crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16796                         crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16797                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16798                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16799
16800                         /*
16801                          * The initial mode needs to be set in order to keep
16802                          * the atomic core happy. It wants a valid mode if the
16803                          * crtc's enabled, so we do the above call.
16804                          *
16805                          * But we don't set all the derived state fully, hence
16806                          * set a flag to indicate that a full recalculation is
16807                          * needed on the next commit.
16808                          */
16809                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16810
16811                         intel_crtc_compute_pixel_rate(crtc_state);
16812
16813                         if (dev_priv->display.modeset_calc_cdclk) {
16814                                 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16815                                 if (WARN_ON(min_cdclk < 0))
16816                                         min_cdclk = 0;
16817                         }
16818
16819                         drm_calc_timestamping_constants(&crtc->base,
16820                                                         &crtc_state->base.adjusted_mode);
16821                         update_scanline_offset(crtc_state);
16822                 }
16823
16824                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16825                 dev_priv->min_voltage_level[crtc->pipe] =
16826                         crtc_state->min_voltage_level;
16827
16828                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
16829                         const struct intel_plane_state *plane_state =
16830                                 to_intel_plane_state(plane->base.state);
16831
16832                         /*
16833                          * FIXME don't have the fb yet, so can't
16834                          * use intel_plane_data_rate() :(
16835                          */
16836                         if (plane_state->base.visible)
16837                                 crtc_state->data_rate[plane->id] =
16838                                         4 * crtc_state->pixel_rate;
16839                 }
16840
16841                 intel_bw_crtc_update(bw_state, crtc_state);
16842
16843                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16844         }
16845 }
16846
16847 static void
16848 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16849 {
16850         struct intel_encoder *encoder;
16851
16852         for_each_intel_encoder(&dev_priv->drm, encoder) {
16853                 struct intel_crtc_state *crtc_state;
16854
16855                 if (!encoder->get_power_domains)
16856                         continue;
16857
16858                 /*
16859                  * MST-primary and inactive encoders don't have a crtc state
16860                  * and neither of these require any power domain references.
16861                  */
16862                 if (!encoder->base.crtc)
16863                         continue;
16864
16865                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16866                 encoder->get_power_domains(encoder, crtc_state);
16867         }
16868 }
16869
16870 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16871 {
16872         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16873         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16874                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16875                            DARBF_GATING_DIS);
16876
16877         if (IS_HASWELL(dev_priv)) {
16878                 /*
16879                  * WaRsPkgCStateDisplayPMReq:hsw
16880                  * System hang if this isn't done before disabling all planes!
16881                  */
16882                 I915_WRITE(CHICKEN_PAR1_1,
16883                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16884         }
16885 }
16886
16887 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16888                                        enum port port, i915_reg_t hdmi_reg)
16889 {
16890         u32 val = I915_READ(hdmi_reg);
16891
16892         if (val & SDVO_ENABLE ||
16893             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16894                 return;
16895
16896         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16897                       port_name(port));
16898
16899         val &= ~SDVO_PIPE_SEL_MASK;
16900         val |= SDVO_PIPE_SEL(PIPE_A);
16901
16902         I915_WRITE(hdmi_reg, val);
16903 }
16904
16905 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16906                                      enum port port, i915_reg_t dp_reg)
16907 {
16908         u32 val = I915_READ(dp_reg);
16909
16910         if (val & DP_PORT_EN ||
16911             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16912                 return;
16913
16914         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16915                       port_name(port));
16916
16917         val &= ~DP_PIPE_SEL_MASK;
16918         val |= DP_PIPE_SEL(PIPE_A);
16919
16920         I915_WRITE(dp_reg, val);
16921 }
16922
16923 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16924 {
16925         /*
16926          * The BIOS may select transcoder B on some of the PCH
16927          * ports even it doesn't enable the port. This would trip
16928          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16929          * Sanitize the transcoder select bits to prevent that. We
16930          * assume that the BIOS never actually enabled the port,
16931          * because if it did we'd actually have to toggle the port
16932          * on and back off to make the transcoder A select stick
16933          * (see. intel_dp_link_down(), intel_disable_hdmi(),
16934          * intel_disable_sdvo()).
16935          */
16936         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16937         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16938         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16939
16940         /* PCH SDVOB multiplex with HDMIB */
16941         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16942         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16943         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16944 }
16945
16946 /* Scan out the current hw modeset state,
16947  * and sanitizes it to the current state
16948  */
16949 static void
16950 intel_modeset_setup_hw_state(struct drm_device *dev,
16951                              struct drm_modeset_acquire_ctx *ctx)
16952 {
16953         struct drm_i915_private *dev_priv = to_i915(dev);
16954         struct intel_crtc_state *crtc_state;
16955         struct intel_encoder *encoder;
16956         struct intel_crtc *crtc;
16957         intel_wakeref_t wakeref;
16958         int i;
16959
16960         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16961
16962         intel_early_display_was(dev_priv);
16963         intel_modeset_readout_hw_state(dev);
16964
16965         /* HW state is read out, now we need to sanitize this mess. */
16966
16967         /* Sanitize the TypeC port mode upfront, encoders depend on this */
16968         for_each_intel_encoder(dev, encoder) {
16969                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
16970
16971                 /* We need to sanitize only the MST primary port. */
16972                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
16973                     intel_phy_is_tc(dev_priv, phy))
16974                         intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
16975         }
16976
16977         get_encoder_power_domains(dev_priv);
16978
16979         if (HAS_PCH_IBX(dev_priv))
16980                 ibx_sanitize_pch_ports(dev_priv);
16981
16982         /*
16983          * intel_sanitize_plane_mapping() may need to do vblank
16984          * waits, so we need vblank interrupts restored beforehand.
16985          */
16986         for_each_intel_crtc(&dev_priv->drm, crtc) {
16987                 crtc_state = to_intel_crtc_state(crtc->base.state);
16988
16989                 drm_crtc_vblank_reset(&crtc->base);
16990
16991                 if (crtc_state->base.active)
16992                         intel_crtc_vblank_on(crtc_state);
16993         }
16994
16995         intel_sanitize_plane_mapping(dev_priv);
16996
16997         for_each_intel_encoder(dev, encoder)
16998                 intel_sanitize_encoder(encoder);
16999
17000         for_each_intel_crtc(&dev_priv->drm, crtc) {
17001                 crtc_state = to_intel_crtc_state(crtc->base.state);
17002                 intel_sanitize_crtc(crtc, ctx);
17003                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
17004         }
17005
17006         intel_modeset_update_connector_atomic_state(dev);
17007
17008         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17009                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17010
17011                 if (!pll->on || pll->active_mask)
17012                         continue;
17013
17014                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17015                               pll->info->name);
17016
17017                 pll->info->funcs->disable(dev_priv, pll);
17018                 pll->on = false;
17019         }
17020
17021         if (IS_G4X(dev_priv)) {
17022                 g4x_wm_get_hw_state(dev_priv);
17023                 g4x_wm_sanitize(dev_priv);
17024         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17025                 vlv_wm_get_hw_state(dev_priv);
17026                 vlv_wm_sanitize(dev_priv);
17027         } else if (INTEL_GEN(dev_priv) >= 9) {
17028                 skl_wm_get_hw_state(dev_priv);
17029         } else if (HAS_PCH_SPLIT(dev_priv)) {
17030                 ilk_wm_get_hw_state(dev_priv);
17031         }
17032
17033         for_each_intel_crtc(dev, crtc) {
17034                 u64 put_domains;
17035
17036                 crtc_state = to_intel_crtc_state(crtc->base.state);
17037                 put_domains = modeset_get_crtc_power_domains(crtc_state);
17038                 if (WARN_ON(put_domains))
17039                         modeset_put_power_domains(dev_priv, put_domains);
17040         }
17041
17042         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17043
17044         intel_fbc_init_pipe_state(dev_priv);
17045 }
17046
17047 void intel_display_resume(struct drm_device *dev)
17048 {
17049         struct drm_i915_private *dev_priv = to_i915(dev);
17050         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17051         struct drm_modeset_acquire_ctx ctx;
17052         int ret;
17053
17054         dev_priv->modeset_restore_state = NULL;
17055         if (state)
17056                 state->acquire_ctx = &ctx;
17057
17058         drm_modeset_acquire_init(&ctx, 0);
17059
17060         while (1) {
17061                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
17062                 if (ret != -EDEADLK)
17063                         break;
17064
17065                 drm_modeset_backoff(&ctx);
17066         }
17067
17068         if (!ret)
17069                 ret = __intel_display_resume(dev, state, &ctx);
17070
17071         intel_enable_ipc(dev_priv);
17072         drm_modeset_drop_locks(&ctx);
17073         drm_modeset_acquire_fini(&ctx);
17074
17075         if (ret)
17076                 DRM_ERROR("Restoring old state failed with %i\n", ret);
17077         if (state)
17078                 drm_atomic_state_put(state);
17079 }
17080
17081 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
17082 {
17083         struct intel_connector *connector;
17084         struct drm_connector_list_iter conn_iter;
17085
17086         /* Kill all the work that may have been queued by hpd. */
17087         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
17088         for_each_intel_connector_iter(connector, &conn_iter) {
17089                 if (connector->modeset_retry_work.func)
17090                         cancel_work_sync(&connector->modeset_retry_work);
17091                 if (connector->hdcp.shim) {
17092                         cancel_delayed_work_sync(&connector->hdcp.check_work);
17093                         cancel_work_sync(&connector->hdcp.prop_work);
17094                 }
17095         }
17096         drm_connector_list_iter_end(&conn_iter);
17097 }
17098
17099 void intel_modeset_driver_remove(struct drm_i915_private *i915)
17100 {
17101         flush_workqueue(i915->flip_wq);
17102         flush_workqueue(i915->modeset_wq);
17103
17104         flush_work(&i915->atomic_helper.free_work);
17105         WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
17106
17107         /*
17108          * Interrupts and polling as the first thing to avoid creating havoc.
17109          * Too much stuff here (turning of connectors, ...) would
17110          * experience fancy races otherwise.
17111          */
17112         intel_irq_uninstall(i915);
17113
17114         /*
17115          * Due to the hpd irq storm handling the hotplug work can re-arm the
17116          * poll handlers. Hence disable polling after hpd handling is shut down.
17117          */
17118         intel_hpd_poll_fini(i915);
17119
17120         /* poll work can call into fbdev, hence clean that up afterwards */
17121         intel_fbdev_fini(i915);
17122
17123         intel_unregister_dsm_handler();
17124
17125         intel_fbc_global_disable(i915);
17126
17127         /* flush any delayed tasks or pending work */
17128         flush_scheduled_work();
17129
17130         intel_hdcp_component_fini(i915);
17131
17132         drm_mode_config_cleanup(&i915->drm);
17133
17134         intel_overlay_cleanup(i915);
17135
17136         intel_gmbus_teardown(i915);
17137
17138         destroy_workqueue(i915->flip_wq);
17139         destroy_workqueue(i915->modeset_wq);
17140
17141         intel_fbc_cleanup_cfb(i915);
17142 }
17143
17144 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
17145
17146 struct intel_display_error_state {
17147
17148         u32 power_well_driver;
17149
17150         struct intel_cursor_error_state {
17151                 u32 control;
17152                 u32 position;
17153                 u32 base;
17154                 u32 size;
17155         } cursor[I915_MAX_PIPES];
17156
17157         struct intel_pipe_error_state {
17158                 bool power_domain_on;
17159                 u32 source;
17160                 u32 stat;
17161         } pipe[I915_MAX_PIPES];
17162
17163         struct intel_plane_error_state {
17164                 u32 control;
17165                 u32 stride;
17166                 u32 size;
17167                 u32 pos;
17168                 u32 addr;
17169                 u32 surface;
17170                 u32 tile_offset;
17171         } plane[I915_MAX_PIPES];
17172
17173         struct intel_transcoder_error_state {
17174                 bool available;
17175                 bool power_domain_on;
17176                 enum transcoder cpu_transcoder;
17177
17178                 u32 conf;
17179
17180                 u32 htotal;
17181                 u32 hblank;
17182                 u32 hsync;
17183                 u32 vtotal;
17184                 u32 vblank;
17185                 u32 vsync;
17186         } transcoder[5];
17187 };
17188
17189 struct intel_display_error_state *
17190 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
17191 {
17192         struct intel_display_error_state *error;
17193         int transcoders[] = {
17194                 TRANSCODER_A,
17195                 TRANSCODER_B,
17196                 TRANSCODER_C,
17197                 TRANSCODER_D,
17198                 TRANSCODER_EDP,
17199         };
17200         int i;
17201
17202         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
17203
17204         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
17205                 return NULL;
17206
17207         error = kzalloc(sizeof(*error), GFP_ATOMIC);
17208         if (error == NULL)
17209                 return NULL;
17210
17211         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17212                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
17213
17214         for_each_pipe(dev_priv, i) {
17215                 error->pipe[i].power_domain_on =
17216                         __intel_display_power_is_enabled(dev_priv,
17217                                                          POWER_DOMAIN_PIPE(i));
17218                 if (!error->pipe[i].power_domain_on)
17219                         continue;
17220
17221                 error->cursor[i].control = I915_READ(CURCNTR(i));
17222                 error->cursor[i].position = I915_READ(CURPOS(i));
17223                 error->cursor[i].base = I915_READ(CURBASE(i));
17224
17225                 error->plane[i].control = I915_READ(DSPCNTR(i));
17226                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
17227                 if (INTEL_GEN(dev_priv) <= 3) {
17228                         error->plane[i].size = I915_READ(DSPSIZE(i));
17229                         error->plane[i].pos = I915_READ(DSPPOS(i));
17230                 }
17231                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17232                         error->plane[i].addr = I915_READ(DSPADDR(i));
17233                 if (INTEL_GEN(dev_priv) >= 4) {
17234                         error->plane[i].surface = I915_READ(DSPSURF(i));
17235                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
17236                 }
17237
17238                 error->pipe[i].source = I915_READ(PIPESRC(i));
17239
17240                 if (HAS_GMCH(dev_priv))
17241                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
17242         }
17243
17244         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17245                 enum transcoder cpu_transcoder = transcoders[i];
17246
17247                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
17248                         continue;
17249
17250                 error->transcoder[i].available = true;
17251                 error->transcoder[i].power_domain_on =
17252                         __intel_display_power_is_enabled(dev_priv,
17253                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
17254                 if (!error->transcoder[i].power_domain_on)
17255                         continue;
17256
17257                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
17258
17259                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
17260                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
17261                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
17262                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
17263                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
17264                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
17265                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
17266         }
17267
17268         return error;
17269 }
17270
17271 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
17272
17273 void
17274 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
17275                                 struct intel_display_error_state *error)
17276 {
17277         struct drm_i915_private *dev_priv = m->i915;
17278         int i;
17279
17280         if (!error)
17281                 return;
17282
17283         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
17284         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17285                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
17286                            error->power_well_driver);
17287         for_each_pipe(dev_priv, i) {
17288                 err_printf(m, "Pipe [%d]:\n", i);
17289                 err_printf(m, "  Power: %s\n",
17290                            onoff(error->pipe[i].power_domain_on));
17291                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
17292                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
17293
17294                 err_printf(m, "Plane [%d]:\n", i);
17295                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
17296                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
17297                 if (INTEL_GEN(dev_priv) <= 3) {
17298                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
17299                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
17300                 }
17301                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17302                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
17303                 if (INTEL_GEN(dev_priv) >= 4) {
17304                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
17305                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
17306                 }
17307
17308                 err_printf(m, "Cursor [%d]:\n", i);
17309                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
17310                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
17311                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
17312         }
17313
17314         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17315                 if (!error->transcoder[i].available)
17316                         continue;
17317
17318                 err_printf(m, "CPU transcoder: %s\n",
17319                            transcoder_name(error->transcoder[i].cpu_transcoder));
17320                 err_printf(m, "  Power: %s\n",
17321                            onoff(error->transcoder[i].power_domain_on));
17322                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
17323                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
17324                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
17325                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
17326                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
17327                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
17328                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
17329         }
17330 }
17331
17332 #endif