]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: Fix a potential integer overflow with framebuffers extending past 4 GiB
[linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include "intel_frontbuffer.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 #include "i915_gem_clflush.h"
41 #include "intel_dsi.h"
42 #include "i915_trace.h"
43 #include <drm/drm_atomic.h>
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_dp_helper.h>
46 #include <drm/drm_crtc_helper.h>
47 #include <drm/drm_plane_helper.h>
48 #include <drm/drm_rect.h>
49 #include <linux/dma_remapping.h>
50 #include <linux/reservation.h>
51
52 /* Primary plane formats for gen <= 3 */
53 static const uint32_t i8xx_primary_formats[] = {
54         DRM_FORMAT_C8,
55         DRM_FORMAT_RGB565,
56         DRM_FORMAT_XRGB1555,
57         DRM_FORMAT_XRGB8888,
58 };
59
60 /* Primary plane formats for gen >= 4 */
61 static const uint32_t i965_primary_formats[] = {
62         DRM_FORMAT_C8,
63         DRM_FORMAT_RGB565,
64         DRM_FORMAT_XRGB8888,
65         DRM_FORMAT_XBGR8888,
66         DRM_FORMAT_XRGB2101010,
67         DRM_FORMAT_XBGR2101010,
68 };
69
70 static const uint64_t i9xx_format_modifiers[] = {
71         I915_FORMAT_MOD_X_TILED,
72         DRM_FORMAT_MOD_LINEAR,
73         DRM_FORMAT_MOD_INVALID
74 };
75
76 static const uint32_t skl_primary_formats[] = {
77         DRM_FORMAT_C8,
78         DRM_FORMAT_RGB565,
79         DRM_FORMAT_XRGB8888,
80         DRM_FORMAT_XBGR8888,
81         DRM_FORMAT_ARGB8888,
82         DRM_FORMAT_ABGR8888,
83         DRM_FORMAT_XRGB2101010,
84         DRM_FORMAT_XBGR2101010,
85         DRM_FORMAT_YUYV,
86         DRM_FORMAT_YVYU,
87         DRM_FORMAT_UYVY,
88         DRM_FORMAT_VYUY,
89 };
90
91 static const uint32_t skl_pri_planar_formats[] = {
92         DRM_FORMAT_C8,
93         DRM_FORMAT_RGB565,
94         DRM_FORMAT_XRGB8888,
95         DRM_FORMAT_XBGR8888,
96         DRM_FORMAT_ARGB8888,
97         DRM_FORMAT_ABGR8888,
98         DRM_FORMAT_XRGB2101010,
99         DRM_FORMAT_XBGR2101010,
100         DRM_FORMAT_YUYV,
101         DRM_FORMAT_YVYU,
102         DRM_FORMAT_UYVY,
103         DRM_FORMAT_VYUY,
104         DRM_FORMAT_NV12,
105 };
106
107 static const uint64_t skl_format_modifiers_noccs[] = {
108         I915_FORMAT_MOD_Yf_TILED,
109         I915_FORMAT_MOD_Y_TILED,
110         I915_FORMAT_MOD_X_TILED,
111         DRM_FORMAT_MOD_LINEAR,
112         DRM_FORMAT_MOD_INVALID
113 };
114
115 static const uint64_t skl_format_modifiers_ccs[] = {
116         I915_FORMAT_MOD_Yf_TILED_CCS,
117         I915_FORMAT_MOD_Y_TILED_CCS,
118         I915_FORMAT_MOD_Yf_TILED,
119         I915_FORMAT_MOD_Y_TILED,
120         I915_FORMAT_MOD_X_TILED,
121         DRM_FORMAT_MOD_LINEAR,
122         DRM_FORMAT_MOD_INVALID
123 };
124
125 /* Cursor formats */
126 static const uint32_t intel_cursor_formats[] = {
127         DRM_FORMAT_ARGB8888,
128 };
129
130 static const uint64_t cursor_format_modifiers[] = {
131         DRM_FORMAT_MOD_LINEAR,
132         DRM_FORMAT_MOD_INVALID
133 };
134
135 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
136                                 struct intel_crtc_state *pipe_config);
137 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
138                                    struct intel_crtc_state *pipe_config);
139
140 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
141                                   struct drm_i915_gem_object *obj,
142                                   struct drm_mode_fb_cmd2 *mode_cmd);
143 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
144 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
145 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
146 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
147                                          struct intel_link_m_n *m_n,
148                                          struct intel_link_m_n *m2_n2);
149 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
150 static void haswell_set_pipeconf(struct drm_crtc *crtc);
151 static void haswell_set_pipemisc(struct drm_crtc *crtc);
152 static void vlv_prepare_pll(struct intel_crtc *crtc,
153                             const struct intel_crtc_state *pipe_config);
154 static void chv_prepare_pll(struct intel_crtc *crtc,
155                             const struct intel_crtc_state *pipe_config);
156 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
157 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
158 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
159                                     struct intel_crtc_state *crtc_state);
160 static void skylake_pfit_enable(struct intel_crtc *crtc);
161 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
162 static void ironlake_pfit_enable(struct intel_crtc *crtc);
163 static void intel_modeset_setup_hw_state(struct drm_device *dev,
164                                          struct drm_modeset_acquire_ctx *ctx);
165 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
166
167 struct intel_limit {
168         struct {
169                 int min, max;
170         } dot, vco, n, m, m1, m2, p, p1;
171
172         struct {
173                 int dot_limit;
174                 int p2_slow, p2_fast;
175         } p2;
176 };
177
178 /* returns HPLL frequency in kHz */
179 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
180 {
181         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
182
183         /* Obtain SKU information */
184         mutex_lock(&dev_priv->sb_lock);
185         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
186                 CCK_FUSE_HPLL_FREQ_MASK;
187         mutex_unlock(&dev_priv->sb_lock);
188
189         return vco_freq[hpll_freq] * 1000;
190 }
191
192 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
193                       const char *name, u32 reg, int ref_freq)
194 {
195         u32 val;
196         int divider;
197
198         mutex_lock(&dev_priv->sb_lock);
199         val = vlv_cck_read(dev_priv, reg);
200         mutex_unlock(&dev_priv->sb_lock);
201
202         divider = val & CCK_FREQUENCY_VALUES;
203
204         WARN((val & CCK_FREQUENCY_STATUS) !=
205              (divider << CCK_FREQUENCY_STATUS_SHIFT),
206              "%s change in progress\n", name);
207
208         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
209 }
210
211 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
212                            const char *name, u32 reg)
213 {
214         if (dev_priv->hpll_freq == 0)
215                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
216
217         return vlv_get_cck_clock(dev_priv, name, reg,
218                                  dev_priv->hpll_freq);
219 }
220
221 static void intel_update_czclk(struct drm_i915_private *dev_priv)
222 {
223         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
224                 return;
225
226         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
227                                                       CCK_CZ_CLOCK_CONTROL);
228
229         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
230 }
231
232 static inline u32 /* units of 100MHz */
233 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
234                     const struct intel_crtc_state *pipe_config)
235 {
236         if (HAS_DDI(dev_priv))
237                 return pipe_config->port_clock; /* SPLL */
238         else
239                 return dev_priv->fdi_pll_freq;
240 }
241
242 static const struct intel_limit intel_limits_i8xx_dac = {
243         .dot = { .min = 25000, .max = 350000 },
244         .vco = { .min = 908000, .max = 1512000 },
245         .n = { .min = 2, .max = 16 },
246         .m = { .min = 96, .max = 140 },
247         .m1 = { .min = 18, .max = 26 },
248         .m2 = { .min = 6, .max = 16 },
249         .p = { .min = 4, .max = 128 },
250         .p1 = { .min = 2, .max = 33 },
251         .p2 = { .dot_limit = 165000,
252                 .p2_slow = 4, .p2_fast = 2 },
253 };
254
255 static const struct intel_limit intel_limits_i8xx_dvo = {
256         .dot = { .min = 25000, .max = 350000 },
257         .vco = { .min = 908000, .max = 1512000 },
258         .n = { .min = 2, .max = 16 },
259         .m = { .min = 96, .max = 140 },
260         .m1 = { .min = 18, .max = 26 },
261         .m2 = { .min = 6, .max = 16 },
262         .p = { .min = 4, .max = 128 },
263         .p1 = { .min = 2, .max = 33 },
264         .p2 = { .dot_limit = 165000,
265                 .p2_slow = 4, .p2_fast = 4 },
266 };
267
268 static const struct intel_limit intel_limits_i8xx_lvds = {
269         .dot = { .min = 25000, .max = 350000 },
270         .vco = { .min = 908000, .max = 1512000 },
271         .n = { .min = 2, .max = 16 },
272         .m = { .min = 96, .max = 140 },
273         .m1 = { .min = 18, .max = 26 },
274         .m2 = { .min = 6, .max = 16 },
275         .p = { .min = 4, .max = 128 },
276         .p1 = { .min = 1, .max = 6 },
277         .p2 = { .dot_limit = 165000,
278                 .p2_slow = 14, .p2_fast = 7 },
279 };
280
281 static const struct intel_limit intel_limits_i9xx_sdvo = {
282         .dot = { .min = 20000, .max = 400000 },
283         .vco = { .min = 1400000, .max = 2800000 },
284         .n = { .min = 1, .max = 6 },
285         .m = { .min = 70, .max = 120 },
286         .m1 = { .min = 8, .max = 18 },
287         .m2 = { .min = 3, .max = 7 },
288         .p = { .min = 5, .max = 80 },
289         .p1 = { .min = 1, .max = 8 },
290         .p2 = { .dot_limit = 200000,
291                 .p2_slow = 10, .p2_fast = 5 },
292 };
293
294 static const struct intel_limit intel_limits_i9xx_lvds = {
295         .dot = { .min = 20000, .max = 400000 },
296         .vco = { .min = 1400000, .max = 2800000 },
297         .n = { .min = 1, .max = 6 },
298         .m = { .min = 70, .max = 120 },
299         .m1 = { .min = 8, .max = 18 },
300         .m2 = { .min = 3, .max = 7 },
301         .p = { .min = 7, .max = 98 },
302         .p1 = { .min = 1, .max = 8 },
303         .p2 = { .dot_limit = 112000,
304                 .p2_slow = 14, .p2_fast = 7 },
305 };
306
307
308 static const struct intel_limit intel_limits_g4x_sdvo = {
309         .dot = { .min = 25000, .max = 270000 },
310         .vco = { .min = 1750000, .max = 3500000},
311         .n = { .min = 1, .max = 4 },
312         .m = { .min = 104, .max = 138 },
313         .m1 = { .min = 17, .max = 23 },
314         .m2 = { .min = 5, .max = 11 },
315         .p = { .min = 10, .max = 30 },
316         .p1 = { .min = 1, .max = 3},
317         .p2 = { .dot_limit = 270000,
318                 .p2_slow = 10,
319                 .p2_fast = 10
320         },
321 };
322
323 static const struct intel_limit intel_limits_g4x_hdmi = {
324         .dot = { .min = 22000, .max = 400000 },
325         .vco = { .min = 1750000, .max = 3500000},
326         .n = { .min = 1, .max = 4 },
327         .m = { .min = 104, .max = 138 },
328         .m1 = { .min = 16, .max = 23 },
329         .m2 = { .min = 5, .max = 11 },
330         .p = { .min = 5, .max = 80 },
331         .p1 = { .min = 1, .max = 8},
332         .p2 = { .dot_limit = 165000,
333                 .p2_slow = 10, .p2_fast = 5 },
334 };
335
336 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
337         .dot = { .min = 20000, .max = 115000 },
338         .vco = { .min = 1750000, .max = 3500000 },
339         .n = { .min = 1, .max = 3 },
340         .m = { .min = 104, .max = 138 },
341         .m1 = { .min = 17, .max = 23 },
342         .m2 = { .min = 5, .max = 11 },
343         .p = { .min = 28, .max = 112 },
344         .p1 = { .min = 2, .max = 8 },
345         .p2 = { .dot_limit = 0,
346                 .p2_slow = 14, .p2_fast = 14
347         },
348 };
349
350 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
351         .dot = { .min = 80000, .max = 224000 },
352         .vco = { .min = 1750000, .max = 3500000 },
353         .n = { .min = 1, .max = 3 },
354         .m = { .min = 104, .max = 138 },
355         .m1 = { .min = 17, .max = 23 },
356         .m2 = { .min = 5, .max = 11 },
357         .p = { .min = 14, .max = 42 },
358         .p1 = { .min = 2, .max = 6 },
359         .p2 = { .dot_limit = 0,
360                 .p2_slow = 7, .p2_fast = 7
361         },
362 };
363
364 static const struct intel_limit intel_limits_pineview_sdvo = {
365         .dot = { .min = 20000, .max = 400000},
366         .vco = { .min = 1700000, .max = 3500000 },
367         /* Pineview's Ncounter is a ring counter */
368         .n = { .min = 3, .max = 6 },
369         .m = { .min = 2, .max = 256 },
370         /* Pineview only has one combined m divider, which we treat as m2. */
371         .m1 = { .min = 0, .max = 0 },
372         .m2 = { .min = 0, .max = 254 },
373         .p = { .min = 5, .max = 80 },
374         .p1 = { .min = 1, .max = 8 },
375         .p2 = { .dot_limit = 200000,
376                 .p2_slow = 10, .p2_fast = 5 },
377 };
378
379 static const struct intel_limit intel_limits_pineview_lvds = {
380         .dot = { .min = 20000, .max = 400000 },
381         .vco = { .min = 1700000, .max = 3500000 },
382         .n = { .min = 3, .max = 6 },
383         .m = { .min = 2, .max = 256 },
384         .m1 = { .min = 0, .max = 0 },
385         .m2 = { .min = 0, .max = 254 },
386         .p = { .min = 7, .max = 112 },
387         .p1 = { .min = 1, .max = 8 },
388         .p2 = { .dot_limit = 112000,
389                 .p2_slow = 14, .p2_fast = 14 },
390 };
391
392 /* Ironlake / Sandybridge
393  *
394  * We calculate clock using (register_value + 2) for N/M1/M2, so here
395  * the range value for them is (actual_value - 2).
396  */
397 static const struct intel_limit intel_limits_ironlake_dac = {
398         .dot = { .min = 25000, .max = 350000 },
399         .vco = { .min = 1760000, .max = 3510000 },
400         .n = { .min = 1, .max = 5 },
401         .m = { .min = 79, .max = 127 },
402         .m1 = { .min = 12, .max = 22 },
403         .m2 = { .min = 5, .max = 9 },
404         .p = { .min = 5, .max = 80 },
405         .p1 = { .min = 1, .max = 8 },
406         .p2 = { .dot_limit = 225000,
407                 .p2_slow = 10, .p2_fast = 5 },
408 };
409
410 static const struct intel_limit intel_limits_ironlake_single_lvds = {
411         .dot = { .min = 25000, .max = 350000 },
412         .vco = { .min = 1760000, .max = 3510000 },
413         .n = { .min = 1, .max = 3 },
414         .m = { .min = 79, .max = 118 },
415         .m1 = { .min = 12, .max = 22 },
416         .m2 = { .min = 5, .max = 9 },
417         .p = { .min = 28, .max = 112 },
418         .p1 = { .min = 2, .max = 8 },
419         .p2 = { .dot_limit = 225000,
420                 .p2_slow = 14, .p2_fast = 14 },
421 };
422
423 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
424         .dot = { .min = 25000, .max = 350000 },
425         .vco = { .min = 1760000, .max = 3510000 },
426         .n = { .min = 1, .max = 3 },
427         .m = { .min = 79, .max = 127 },
428         .m1 = { .min = 12, .max = 22 },
429         .m2 = { .min = 5, .max = 9 },
430         .p = { .min = 14, .max = 56 },
431         .p1 = { .min = 2, .max = 8 },
432         .p2 = { .dot_limit = 225000,
433                 .p2_slow = 7, .p2_fast = 7 },
434 };
435
436 /* LVDS 100mhz refclk limits. */
437 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
438         .dot = { .min = 25000, .max = 350000 },
439         .vco = { .min = 1760000, .max = 3510000 },
440         .n = { .min = 1, .max = 2 },
441         .m = { .min = 79, .max = 126 },
442         .m1 = { .min = 12, .max = 22 },
443         .m2 = { .min = 5, .max = 9 },
444         .p = { .min = 28, .max = 112 },
445         .p1 = { .min = 2, .max = 8 },
446         .p2 = { .dot_limit = 225000,
447                 .p2_slow = 14, .p2_fast = 14 },
448 };
449
450 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
451         .dot = { .min = 25000, .max = 350000 },
452         .vco = { .min = 1760000, .max = 3510000 },
453         .n = { .min = 1, .max = 3 },
454         .m = { .min = 79, .max = 126 },
455         .m1 = { .min = 12, .max = 22 },
456         .m2 = { .min = 5, .max = 9 },
457         .p = { .min = 14, .max = 42 },
458         .p1 = { .min = 2, .max = 6 },
459         .p2 = { .dot_limit = 225000,
460                 .p2_slow = 7, .p2_fast = 7 },
461 };
462
463 static const struct intel_limit intel_limits_vlv = {
464          /*
465           * These are the data rate limits (measured in fast clocks)
466           * since those are the strictest limits we have. The fast
467           * clock and actual rate limits are more relaxed, so checking
468           * them would make no difference.
469           */
470         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
471         .vco = { .min = 4000000, .max = 6000000 },
472         .n = { .min = 1, .max = 7 },
473         .m1 = { .min = 2, .max = 3 },
474         .m2 = { .min = 11, .max = 156 },
475         .p1 = { .min = 2, .max = 3 },
476         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
477 };
478
479 static const struct intel_limit intel_limits_chv = {
480         /*
481          * These are the data rate limits (measured in fast clocks)
482          * since those are the strictest limits we have.  The fast
483          * clock and actual rate limits are more relaxed, so checking
484          * them would make no difference.
485          */
486         .dot = { .min = 25000 * 5, .max = 540000 * 5},
487         .vco = { .min = 4800000, .max = 6480000 },
488         .n = { .min = 1, .max = 1 },
489         .m1 = { .min = 2, .max = 2 },
490         .m2 = { .min = 24 << 22, .max = 175 << 22 },
491         .p1 = { .min = 2, .max = 4 },
492         .p2 = { .p2_slow = 1, .p2_fast = 14 },
493 };
494
495 static const struct intel_limit intel_limits_bxt = {
496         /* FIXME: find real dot limits */
497         .dot = { .min = 0, .max = INT_MAX },
498         .vco = { .min = 4800000, .max = 6700000 },
499         .n = { .min = 1, .max = 1 },
500         .m1 = { .min = 2, .max = 2 },
501         /* FIXME: find real m2 limits */
502         .m2 = { .min = 2 << 22, .max = 255 << 22 },
503         .p1 = { .min = 2, .max = 4 },
504         .p2 = { .p2_slow = 1, .p2_fast = 20 },
505 };
506
507 static void
508 skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
509 {
510         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
511                 return;
512
513         if (enable)
514                 I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
515         else
516                 I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
517 }
518
519 static void
520 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
521 {
522         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
523                 return;
524
525         if (enable)
526                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
527                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
528         else
529                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
530                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
531                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
532 }
533
534 static bool
535 needs_modeset(const struct drm_crtc_state *state)
536 {
537         return drm_atomic_crtc_needs_modeset(state);
538 }
539
540 /*
541  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
542  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
543  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
544  * The helpers' return value is the rate of the clock that is fed to the
545  * display engine's pipe which can be the above fast dot clock rate or a
546  * divided-down version of it.
547  */
548 /* m1 is reserved as 0 in Pineview, n is a ring counter */
549 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
550 {
551         clock->m = clock->m2 + 2;
552         clock->p = clock->p1 * clock->p2;
553         if (WARN_ON(clock->n == 0 || clock->p == 0))
554                 return 0;
555         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
556         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
557
558         return clock->dot;
559 }
560
561 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
562 {
563         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
564 }
565
566 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
567 {
568         clock->m = i9xx_dpll_compute_m(clock);
569         clock->p = clock->p1 * clock->p2;
570         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
571                 return 0;
572         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
573         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
574
575         return clock->dot;
576 }
577
578 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
579 {
580         clock->m = clock->m1 * clock->m2;
581         clock->p = clock->p1 * clock->p2;
582         if (WARN_ON(clock->n == 0 || clock->p == 0))
583                 return 0;
584         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
585         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
586
587         return clock->dot / 5;
588 }
589
590 int chv_calc_dpll_params(int refclk, struct dpll *clock)
591 {
592         clock->m = clock->m1 * clock->m2;
593         clock->p = clock->p1 * clock->p2;
594         if (WARN_ON(clock->n == 0 || clock->p == 0))
595                 return 0;
596         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
597                         clock->n << 22);
598         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
599
600         return clock->dot / 5;
601 }
602
603 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
604
605 /*
606  * Returns whether the given set of divisors are valid for a given refclk with
607  * the given connectors.
608  */
609 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
610                                const struct intel_limit *limit,
611                                const struct dpll *clock)
612 {
613         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
614                 INTELPllInvalid("n out of range\n");
615         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
616                 INTELPllInvalid("p1 out of range\n");
617         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
618                 INTELPllInvalid("m2 out of range\n");
619         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
620                 INTELPllInvalid("m1 out of range\n");
621
622         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
623             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
624                 if (clock->m1 <= clock->m2)
625                         INTELPllInvalid("m1 <= m2\n");
626
627         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
628             !IS_GEN9_LP(dev_priv)) {
629                 if (clock->p < limit->p.min || limit->p.max < clock->p)
630                         INTELPllInvalid("p out of range\n");
631                 if (clock->m < limit->m.min || limit->m.max < clock->m)
632                         INTELPllInvalid("m out of range\n");
633         }
634
635         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
636                 INTELPllInvalid("vco out of range\n");
637         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
638          * connector, etc., rather than just a single range.
639          */
640         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
641                 INTELPllInvalid("dot out of range\n");
642
643         return true;
644 }
645
646 static int
647 i9xx_select_p2_div(const struct intel_limit *limit,
648                    const struct intel_crtc_state *crtc_state,
649                    int target)
650 {
651         struct drm_device *dev = crtc_state->base.crtc->dev;
652
653         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
654                 /*
655                  * For LVDS just rely on its current settings for dual-channel.
656                  * We haven't figured out how to reliably set up different
657                  * single/dual channel state, if we even can.
658                  */
659                 if (intel_is_dual_link_lvds(dev))
660                         return limit->p2.p2_fast;
661                 else
662                         return limit->p2.p2_slow;
663         } else {
664                 if (target < limit->p2.dot_limit)
665                         return limit->p2.p2_slow;
666                 else
667                         return limit->p2.p2_fast;
668         }
669 }
670
671 /*
672  * Returns a set of divisors for the desired target clock with the given
673  * refclk, or FALSE.  The returned values represent the clock equation:
674  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
675  *
676  * Target and reference clocks are specified in kHz.
677  *
678  * If match_clock is provided, then best_clock P divider must match the P
679  * divider from @match_clock used for LVDS downclocking.
680  */
681 static bool
682 i9xx_find_best_dpll(const struct intel_limit *limit,
683                     struct intel_crtc_state *crtc_state,
684                     int target, int refclk, struct dpll *match_clock,
685                     struct dpll *best_clock)
686 {
687         struct drm_device *dev = crtc_state->base.crtc->dev;
688         struct dpll clock;
689         int err = target;
690
691         memset(best_clock, 0, sizeof(*best_clock));
692
693         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
694
695         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
696              clock.m1++) {
697                 for (clock.m2 = limit->m2.min;
698                      clock.m2 <= limit->m2.max; clock.m2++) {
699                         if (clock.m2 >= clock.m1)
700                                 break;
701                         for (clock.n = limit->n.min;
702                              clock.n <= limit->n.max; clock.n++) {
703                                 for (clock.p1 = limit->p1.min;
704                                         clock.p1 <= limit->p1.max; clock.p1++) {
705                                         int this_err;
706
707                                         i9xx_calc_dpll_params(refclk, &clock);
708                                         if (!intel_PLL_is_valid(to_i915(dev),
709                                                                 limit,
710                                                                 &clock))
711                                                 continue;
712                                         if (match_clock &&
713                                             clock.p != match_clock->p)
714                                                 continue;
715
716                                         this_err = abs(clock.dot - target);
717                                         if (this_err < err) {
718                                                 *best_clock = clock;
719                                                 err = this_err;
720                                         }
721                                 }
722                         }
723                 }
724         }
725
726         return (err != target);
727 }
728
729 /*
730  * Returns a set of divisors for the desired target clock with the given
731  * refclk, or FALSE.  The returned values represent the clock equation:
732  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
733  *
734  * Target and reference clocks are specified in kHz.
735  *
736  * If match_clock is provided, then best_clock P divider must match the P
737  * divider from @match_clock used for LVDS downclocking.
738  */
739 static bool
740 pnv_find_best_dpll(const struct intel_limit *limit,
741                    struct intel_crtc_state *crtc_state,
742                    int target, int refclk, struct dpll *match_clock,
743                    struct dpll *best_clock)
744 {
745         struct drm_device *dev = crtc_state->base.crtc->dev;
746         struct dpll clock;
747         int err = target;
748
749         memset(best_clock, 0, sizeof(*best_clock));
750
751         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
752
753         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
754              clock.m1++) {
755                 for (clock.m2 = limit->m2.min;
756                      clock.m2 <= limit->m2.max; clock.m2++) {
757                         for (clock.n = limit->n.min;
758                              clock.n <= limit->n.max; clock.n++) {
759                                 for (clock.p1 = limit->p1.min;
760                                         clock.p1 <= limit->p1.max; clock.p1++) {
761                                         int this_err;
762
763                                         pnv_calc_dpll_params(refclk, &clock);
764                                         if (!intel_PLL_is_valid(to_i915(dev),
765                                                                 limit,
766                                                                 &clock))
767                                                 continue;
768                                         if (match_clock &&
769                                             clock.p != match_clock->p)
770                                                 continue;
771
772                                         this_err = abs(clock.dot - target);
773                                         if (this_err < err) {
774                                                 *best_clock = clock;
775                                                 err = this_err;
776                                         }
777                                 }
778                         }
779                 }
780         }
781
782         return (err != target);
783 }
784
785 /*
786  * Returns a set of divisors for the desired target clock with the given
787  * refclk, or FALSE.  The returned values represent the clock equation:
788  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
789  *
790  * Target and reference clocks are specified in kHz.
791  *
792  * If match_clock is provided, then best_clock P divider must match the P
793  * divider from @match_clock used for LVDS downclocking.
794  */
795 static bool
796 g4x_find_best_dpll(const struct intel_limit *limit,
797                    struct intel_crtc_state *crtc_state,
798                    int target, int refclk, struct dpll *match_clock,
799                    struct dpll *best_clock)
800 {
801         struct drm_device *dev = crtc_state->base.crtc->dev;
802         struct dpll clock;
803         int max_n;
804         bool found = false;
805         /* approximately equals target * 0.00585 */
806         int err_most = (target >> 8) + (target >> 9);
807
808         memset(best_clock, 0, sizeof(*best_clock));
809
810         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
811
812         max_n = limit->n.max;
813         /* based on hardware requirement, prefer smaller n to precision */
814         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
815                 /* based on hardware requirement, prefere larger m1,m2 */
816                 for (clock.m1 = limit->m1.max;
817                      clock.m1 >= limit->m1.min; clock.m1--) {
818                         for (clock.m2 = limit->m2.max;
819                              clock.m2 >= limit->m2.min; clock.m2--) {
820                                 for (clock.p1 = limit->p1.max;
821                                      clock.p1 >= limit->p1.min; clock.p1--) {
822                                         int this_err;
823
824                                         i9xx_calc_dpll_params(refclk, &clock);
825                                         if (!intel_PLL_is_valid(to_i915(dev),
826                                                                 limit,
827                                                                 &clock))
828                                                 continue;
829
830                                         this_err = abs(clock.dot - target);
831                                         if (this_err < err_most) {
832                                                 *best_clock = clock;
833                                                 err_most = this_err;
834                                                 max_n = clock.n;
835                                                 found = true;
836                                         }
837                                 }
838                         }
839                 }
840         }
841         return found;
842 }
843
844 /*
845  * Check if the calculated PLL configuration is more optimal compared to the
846  * best configuration and error found so far. Return the calculated error.
847  */
848 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
849                                const struct dpll *calculated_clock,
850                                const struct dpll *best_clock,
851                                unsigned int best_error_ppm,
852                                unsigned int *error_ppm)
853 {
854         /*
855          * For CHV ignore the error and consider only the P value.
856          * Prefer a bigger P value based on HW requirements.
857          */
858         if (IS_CHERRYVIEW(to_i915(dev))) {
859                 *error_ppm = 0;
860
861                 return calculated_clock->p > best_clock->p;
862         }
863
864         if (WARN_ON_ONCE(!target_freq))
865                 return false;
866
867         *error_ppm = div_u64(1000000ULL *
868                                 abs(target_freq - calculated_clock->dot),
869                              target_freq);
870         /*
871          * Prefer a better P value over a better (smaller) error if the error
872          * is small. Ensure this preference for future configurations too by
873          * setting the error to 0.
874          */
875         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
876                 *error_ppm = 0;
877
878                 return true;
879         }
880
881         return *error_ppm + 10 < best_error_ppm;
882 }
883
884 /*
885  * Returns a set of divisors for the desired target clock with the given
886  * refclk, or FALSE.  The returned values represent the clock equation:
887  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
888  */
889 static bool
890 vlv_find_best_dpll(const struct intel_limit *limit,
891                    struct intel_crtc_state *crtc_state,
892                    int target, int refclk, struct dpll *match_clock,
893                    struct dpll *best_clock)
894 {
895         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
896         struct drm_device *dev = crtc->base.dev;
897         struct dpll clock;
898         unsigned int bestppm = 1000000;
899         /* min update 19.2 MHz */
900         int max_n = min(limit->n.max, refclk / 19200);
901         bool found = false;
902
903         target *= 5; /* fast clock */
904
905         memset(best_clock, 0, sizeof(*best_clock));
906
907         /* based on hardware requirement, prefer smaller n to precision */
908         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
909                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
910                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
911                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
912                                 clock.p = clock.p1 * clock.p2;
913                                 /* based on hardware requirement, prefer bigger m1,m2 values */
914                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
915                                         unsigned int ppm;
916
917                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
918                                                                      refclk * clock.m1);
919
920                                         vlv_calc_dpll_params(refclk, &clock);
921
922                                         if (!intel_PLL_is_valid(to_i915(dev),
923                                                                 limit,
924                                                                 &clock))
925                                                 continue;
926
927                                         if (!vlv_PLL_is_optimal(dev, target,
928                                                                 &clock,
929                                                                 best_clock,
930                                                                 bestppm, &ppm))
931                                                 continue;
932
933                                         *best_clock = clock;
934                                         bestppm = ppm;
935                                         found = true;
936                                 }
937                         }
938                 }
939         }
940
941         return found;
942 }
943
944 /*
945  * Returns a set of divisors for the desired target clock with the given
946  * refclk, or FALSE.  The returned values represent the clock equation:
947  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
948  */
949 static bool
950 chv_find_best_dpll(const struct intel_limit *limit,
951                    struct intel_crtc_state *crtc_state,
952                    int target, int refclk, struct dpll *match_clock,
953                    struct dpll *best_clock)
954 {
955         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
956         struct drm_device *dev = crtc->base.dev;
957         unsigned int best_error_ppm;
958         struct dpll clock;
959         uint64_t m2;
960         int found = false;
961
962         memset(best_clock, 0, sizeof(*best_clock));
963         best_error_ppm = 1000000;
964
965         /*
966          * Based on hardware doc, the n always set to 1, and m1 always
967          * set to 2.  If requires to support 200Mhz refclk, we need to
968          * revisit this because n may not 1 anymore.
969          */
970         clock.n = 1, clock.m1 = 2;
971         target *= 5;    /* fast clock */
972
973         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
974                 for (clock.p2 = limit->p2.p2_fast;
975                                 clock.p2 >= limit->p2.p2_slow;
976                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
977                         unsigned int error_ppm;
978
979                         clock.p = clock.p1 * clock.p2;
980
981                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
982                                         clock.n) << 22, refclk * clock.m1);
983
984                         if (m2 > INT_MAX/clock.m1)
985                                 continue;
986
987                         clock.m2 = m2;
988
989                         chv_calc_dpll_params(refclk, &clock);
990
991                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
992                                 continue;
993
994                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
995                                                 best_error_ppm, &error_ppm))
996                                 continue;
997
998                         *best_clock = clock;
999                         best_error_ppm = error_ppm;
1000                         found = true;
1001                 }
1002         }
1003
1004         return found;
1005 }
1006
1007 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1008                         struct dpll *best_clock)
1009 {
1010         int refclk = 100000;
1011         const struct intel_limit *limit = &intel_limits_bxt;
1012
1013         return chv_find_best_dpll(limit, crtc_state,
1014                                   target_clock, refclk, NULL, best_clock);
1015 }
1016
1017 bool intel_crtc_active(struct intel_crtc *crtc)
1018 {
1019         /* Be paranoid as we can arrive here with only partial
1020          * state retrieved from the hardware during setup.
1021          *
1022          * We can ditch the adjusted_mode.crtc_clock check as soon
1023          * as Haswell has gained clock readout/fastboot support.
1024          *
1025          * We can ditch the crtc->primary->state->fb check as soon as we can
1026          * properly reconstruct framebuffers.
1027          *
1028          * FIXME: The intel_crtc->active here should be switched to
1029          * crtc->state->active once we have proper CRTC states wired up
1030          * for atomic.
1031          */
1032         return crtc->active && crtc->base.primary->state->fb &&
1033                 crtc->config->base.adjusted_mode.crtc_clock;
1034 }
1035
1036 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1037                                              enum pipe pipe)
1038 {
1039         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1040
1041         return crtc->config->cpu_transcoder;
1042 }
1043
1044 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1045                                     enum pipe pipe)
1046 {
1047         i915_reg_t reg = PIPEDSL(pipe);
1048         u32 line1, line2;
1049         u32 line_mask;
1050
1051         if (IS_GEN2(dev_priv))
1052                 line_mask = DSL_LINEMASK_GEN2;
1053         else
1054                 line_mask = DSL_LINEMASK_GEN3;
1055
1056         line1 = I915_READ(reg) & line_mask;
1057         msleep(5);
1058         line2 = I915_READ(reg) & line_mask;
1059
1060         return line1 != line2;
1061 }
1062
1063 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1064 {
1065         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1066         enum pipe pipe = crtc->pipe;
1067
1068         /* Wait for the display line to settle/start moving */
1069         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1070                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1071                           pipe_name(pipe), onoff(state));
1072 }
1073
1074 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1075 {
1076         wait_for_pipe_scanline_moving(crtc, false);
1077 }
1078
1079 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1080 {
1081         wait_for_pipe_scanline_moving(crtc, true);
1082 }
1083
1084 static void
1085 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1086 {
1087         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1088         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1089
1090         if (INTEL_GEN(dev_priv) >= 4) {
1091                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1092                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1093
1094                 /* Wait for the Pipe State to go off */
1095                 if (intel_wait_for_register(dev_priv,
1096                                             reg, I965_PIPECONF_ACTIVE, 0,
1097                                             100))
1098                         WARN(1, "pipe_off wait timed out\n");
1099         } else {
1100                 intel_wait_for_pipe_scanline_stopped(crtc);
1101         }
1102 }
1103
1104 /* Only for pre-ILK configs */
1105 void assert_pll(struct drm_i915_private *dev_priv,
1106                 enum pipe pipe, bool state)
1107 {
1108         u32 val;
1109         bool cur_state;
1110
1111         val = I915_READ(DPLL(pipe));
1112         cur_state = !!(val & DPLL_VCO_ENABLE);
1113         I915_STATE_WARN(cur_state != state,
1114              "PLL state assertion failure (expected %s, current %s)\n",
1115                         onoff(state), onoff(cur_state));
1116 }
1117
1118 /* XXX: the dsi pll is shared between MIPI DSI ports */
1119 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1120 {
1121         u32 val;
1122         bool cur_state;
1123
1124         mutex_lock(&dev_priv->sb_lock);
1125         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1126         mutex_unlock(&dev_priv->sb_lock);
1127
1128         cur_state = val & DSI_PLL_VCO_EN;
1129         I915_STATE_WARN(cur_state != state,
1130              "DSI PLL state assertion failure (expected %s, current %s)\n",
1131                         onoff(state), onoff(cur_state));
1132 }
1133
1134 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1135                           enum pipe pipe, bool state)
1136 {
1137         bool cur_state;
1138         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1139                                                                       pipe);
1140
1141         if (HAS_DDI(dev_priv)) {
1142                 /* DDI does not have a specific FDI_TX register */
1143                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1144                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1145         } else {
1146                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1147                 cur_state = !!(val & FDI_TX_ENABLE);
1148         }
1149         I915_STATE_WARN(cur_state != state,
1150              "FDI TX state assertion failure (expected %s, current %s)\n",
1151                         onoff(state), onoff(cur_state));
1152 }
1153 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1154 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1155
1156 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1157                           enum pipe pipe, bool state)
1158 {
1159         u32 val;
1160         bool cur_state;
1161
1162         val = I915_READ(FDI_RX_CTL(pipe));
1163         cur_state = !!(val & FDI_RX_ENABLE);
1164         I915_STATE_WARN(cur_state != state,
1165              "FDI RX state assertion failure (expected %s, current %s)\n",
1166                         onoff(state), onoff(cur_state));
1167 }
1168 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1169 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1170
1171 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1172                                       enum pipe pipe)
1173 {
1174         u32 val;
1175
1176         /* ILK FDI PLL is always enabled */
1177         if (IS_GEN5(dev_priv))
1178                 return;
1179
1180         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1181         if (HAS_DDI(dev_priv))
1182                 return;
1183
1184         val = I915_READ(FDI_TX_CTL(pipe));
1185         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1186 }
1187
1188 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1189                        enum pipe pipe, bool state)
1190 {
1191         u32 val;
1192         bool cur_state;
1193
1194         val = I915_READ(FDI_RX_CTL(pipe));
1195         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1196         I915_STATE_WARN(cur_state != state,
1197              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1198                         onoff(state), onoff(cur_state));
1199 }
1200
1201 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1202 {
1203         i915_reg_t pp_reg;
1204         u32 val;
1205         enum pipe panel_pipe = INVALID_PIPE;
1206         bool locked = true;
1207
1208         if (WARN_ON(HAS_DDI(dev_priv)))
1209                 return;
1210
1211         if (HAS_PCH_SPLIT(dev_priv)) {
1212                 u32 port_sel;
1213
1214                 pp_reg = PP_CONTROL(0);
1215                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1216
1217                 switch (port_sel) {
1218                 case PANEL_PORT_SELECT_LVDS:
1219                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1220                         break;
1221                 case PANEL_PORT_SELECT_DPA:
1222                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1223                         break;
1224                 case PANEL_PORT_SELECT_DPC:
1225                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1226                         break;
1227                 case PANEL_PORT_SELECT_DPD:
1228                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1229                         break;
1230                 default:
1231                         MISSING_CASE(port_sel);
1232                         break;
1233                 }
1234         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1235                 /* presumably write lock depends on pipe, not port select */
1236                 pp_reg = PP_CONTROL(pipe);
1237                 panel_pipe = pipe;
1238         } else {
1239                 u32 port_sel;
1240
1241                 pp_reg = PP_CONTROL(0);
1242                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1243
1244                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1245                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1246         }
1247
1248         val = I915_READ(pp_reg);
1249         if (!(val & PANEL_POWER_ON) ||
1250             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1251                 locked = false;
1252
1253         I915_STATE_WARN(panel_pipe == pipe && locked,
1254              "panel assertion failure, pipe %c regs locked\n",
1255              pipe_name(pipe));
1256 }
1257
1258 void assert_pipe(struct drm_i915_private *dev_priv,
1259                  enum pipe pipe, bool state)
1260 {
1261         bool cur_state;
1262         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1263                                                                       pipe);
1264         enum intel_display_power_domain power_domain;
1265
1266         /* we keep both pipes enabled on 830 */
1267         if (IS_I830(dev_priv))
1268                 state = true;
1269
1270         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1271         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1272                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1273                 cur_state = !!(val & PIPECONF_ENABLE);
1274
1275                 intel_display_power_put(dev_priv, power_domain);
1276         } else {
1277                 cur_state = false;
1278         }
1279
1280         I915_STATE_WARN(cur_state != state,
1281              "pipe %c assertion failure (expected %s, current %s)\n",
1282                         pipe_name(pipe), onoff(state), onoff(cur_state));
1283 }
1284
1285 static void assert_plane(struct intel_plane *plane, bool state)
1286 {
1287         enum pipe pipe;
1288         bool cur_state;
1289
1290         cur_state = plane->get_hw_state(plane, &pipe);
1291
1292         I915_STATE_WARN(cur_state != state,
1293                         "%s assertion failure (expected %s, current %s)\n",
1294                         plane->base.name, onoff(state), onoff(cur_state));
1295 }
1296
1297 #define assert_plane_enabled(p) assert_plane(p, true)
1298 #define assert_plane_disabled(p) assert_plane(p, false)
1299
1300 static void assert_planes_disabled(struct intel_crtc *crtc)
1301 {
1302         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1303         struct intel_plane *plane;
1304
1305         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1306                 assert_plane_disabled(plane);
1307 }
1308
1309 static void assert_vblank_disabled(struct drm_crtc *crtc)
1310 {
1311         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1312                 drm_crtc_vblank_put(crtc);
1313 }
1314
1315 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1316                                     enum pipe pipe)
1317 {
1318         u32 val;
1319         bool enabled;
1320
1321         val = I915_READ(PCH_TRANSCONF(pipe));
1322         enabled = !!(val & TRANS_ENABLE);
1323         I915_STATE_WARN(enabled,
1324              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1325              pipe_name(pipe));
1326 }
1327
1328 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1329                                    enum pipe pipe, enum port port,
1330                                    i915_reg_t dp_reg)
1331 {
1332         enum pipe port_pipe;
1333         bool state;
1334
1335         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1336
1337         I915_STATE_WARN(state && port_pipe == pipe,
1338                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1339                         port_name(port), pipe_name(pipe));
1340
1341         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1342                         "IBX PCH DP %c still using transcoder B\n",
1343                         port_name(port));
1344 }
1345
1346 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1347                                      enum pipe pipe, enum port port,
1348                                      i915_reg_t hdmi_reg)
1349 {
1350         enum pipe port_pipe;
1351         bool state;
1352
1353         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1354
1355         I915_STATE_WARN(state && port_pipe == pipe,
1356                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1357                         port_name(port), pipe_name(pipe));
1358
1359         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1360                         "IBX PCH HDMI %c still using transcoder B\n",
1361                         port_name(port));
1362 }
1363
1364 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1365                                       enum pipe pipe)
1366 {
1367         enum pipe port_pipe;
1368
1369         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1370         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1371         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1372
1373         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1374                         port_pipe == pipe,
1375                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1376                         pipe_name(pipe));
1377
1378         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1379                         port_pipe == pipe,
1380                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1381                         pipe_name(pipe));
1382
1383         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1384         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1385         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1386 }
1387
1388 static void _vlv_enable_pll(struct intel_crtc *crtc,
1389                             const struct intel_crtc_state *pipe_config)
1390 {
1391         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1392         enum pipe pipe = crtc->pipe;
1393
1394         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1395         POSTING_READ(DPLL(pipe));
1396         udelay(150);
1397
1398         if (intel_wait_for_register(dev_priv,
1399                                     DPLL(pipe),
1400                                     DPLL_LOCK_VLV,
1401                                     DPLL_LOCK_VLV,
1402                                     1))
1403                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1404 }
1405
1406 static void vlv_enable_pll(struct intel_crtc *crtc,
1407                            const struct intel_crtc_state *pipe_config)
1408 {
1409         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1410         enum pipe pipe = crtc->pipe;
1411
1412         assert_pipe_disabled(dev_priv, pipe);
1413
1414         /* PLL is protected by panel, make sure we can write it */
1415         assert_panel_unlocked(dev_priv, pipe);
1416
1417         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1418                 _vlv_enable_pll(crtc, pipe_config);
1419
1420         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1421         POSTING_READ(DPLL_MD(pipe));
1422 }
1423
1424
1425 static void _chv_enable_pll(struct intel_crtc *crtc,
1426                             const struct intel_crtc_state *pipe_config)
1427 {
1428         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1429         enum pipe pipe = crtc->pipe;
1430         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1431         u32 tmp;
1432
1433         mutex_lock(&dev_priv->sb_lock);
1434
1435         /* Enable back the 10bit clock to display controller */
1436         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1437         tmp |= DPIO_DCLKP_EN;
1438         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1439
1440         mutex_unlock(&dev_priv->sb_lock);
1441
1442         /*
1443          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1444          */
1445         udelay(1);
1446
1447         /* Enable PLL */
1448         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1449
1450         /* Check PLL is locked */
1451         if (intel_wait_for_register(dev_priv,
1452                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1453                                     1))
1454                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1455 }
1456
1457 static void chv_enable_pll(struct intel_crtc *crtc,
1458                            const struct intel_crtc_state *pipe_config)
1459 {
1460         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1461         enum pipe pipe = crtc->pipe;
1462
1463         assert_pipe_disabled(dev_priv, pipe);
1464
1465         /* PLL is protected by panel, make sure we can write it */
1466         assert_panel_unlocked(dev_priv, pipe);
1467
1468         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1469                 _chv_enable_pll(crtc, pipe_config);
1470
1471         if (pipe != PIPE_A) {
1472                 /*
1473                  * WaPixelRepeatModeFixForC0:chv
1474                  *
1475                  * DPLLCMD is AWOL. Use chicken bits to propagate
1476                  * the value from DPLLBMD to either pipe B or C.
1477                  */
1478                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1479                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1480                 I915_WRITE(CBR4_VLV, 0);
1481                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1482
1483                 /*
1484                  * DPLLB VGA mode also seems to cause problems.
1485                  * We should always have it disabled.
1486                  */
1487                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1488         } else {
1489                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1490                 POSTING_READ(DPLL_MD(pipe));
1491         }
1492 }
1493
1494 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1495 {
1496         struct intel_crtc *crtc;
1497         int count = 0;
1498
1499         for_each_intel_crtc(&dev_priv->drm, crtc) {
1500                 count += crtc->base.state->active &&
1501                         intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1502         }
1503
1504         return count;
1505 }
1506
1507 static void i9xx_enable_pll(struct intel_crtc *crtc,
1508                             const struct intel_crtc_state *crtc_state)
1509 {
1510         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1511         i915_reg_t reg = DPLL(crtc->pipe);
1512         u32 dpll = crtc_state->dpll_hw_state.dpll;
1513         int i;
1514
1515         assert_pipe_disabled(dev_priv, crtc->pipe);
1516
1517         /* PLL is protected by panel, make sure we can write it */
1518         if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1519                 assert_panel_unlocked(dev_priv, crtc->pipe);
1520
1521         /* Enable DVO 2x clock on both PLLs if necessary */
1522         if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1523                 /*
1524                  * It appears to be important that we don't enable this
1525                  * for the current pipe before otherwise configuring the
1526                  * PLL. No idea how this should be handled if multiple
1527                  * DVO outputs are enabled simultaneosly.
1528                  */
1529                 dpll |= DPLL_DVO_2X_MODE;
1530                 I915_WRITE(DPLL(!crtc->pipe),
1531                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1532         }
1533
1534         /*
1535          * Apparently we need to have VGA mode enabled prior to changing
1536          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1537          * dividers, even though the register value does change.
1538          */
1539         I915_WRITE(reg, 0);
1540
1541         I915_WRITE(reg, dpll);
1542
1543         /* Wait for the clocks to stabilize. */
1544         POSTING_READ(reg);
1545         udelay(150);
1546
1547         if (INTEL_GEN(dev_priv) >= 4) {
1548                 I915_WRITE(DPLL_MD(crtc->pipe),
1549                            crtc_state->dpll_hw_state.dpll_md);
1550         } else {
1551                 /* The pixel multiplier can only be updated once the
1552                  * DPLL is enabled and the clocks are stable.
1553                  *
1554                  * So write it again.
1555                  */
1556                 I915_WRITE(reg, dpll);
1557         }
1558
1559         /* We do this three times for luck */
1560         for (i = 0; i < 3; i++) {
1561                 I915_WRITE(reg, dpll);
1562                 POSTING_READ(reg);
1563                 udelay(150); /* wait for warmup */
1564         }
1565 }
1566
1567 static void i9xx_disable_pll(struct intel_crtc *crtc)
1568 {
1569         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1570         enum pipe pipe = crtc->pipe;
1571
1572         /* Disable DVO 2x clock on both PLLs if necessary */
1573         if (IS_I830(dev_priv) &&
1574             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1575             !intel_num_dvo_pipes(dev_priv)) {
1576                 I915_WRITE(DPLL(PIPE_B),
1577                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1578                 I915_WRITE(DPLL(PIPE_A),
1579                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1580         }
1581
1582         /* Don't disable pipe or pipe PLLs if needed */
1583         if (IS_I830(dev_priv))
1584                 return;
1585
1586         /* Make sure the pipe isn't still relying on us */
1587         assert_pipe_disabled(dev_priv, pipe);
1588
1589         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1590         POSTING_READ(DPLL(pipe));
1591 }
1592
1593 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1594 {
1595         u32 val;
1596
1597         /* Make sure the pipe isn't still relying on us */
1598         assert_pipe_disabled(dev_priv, pipe);
1599
1600         val = DPLL_INTEGRATED_REF_CLK_VLV |
1601                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1602         if (pipe != PIPE_A)
1603                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1604
1605         I915_WRITE(DPLL(pipe), val);
1606         POSTING_READ(DPLL(pipe));
1607 }
1608
1609 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1610 {
1611         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1612         u32 val;
1613
1614         /* Make sure the pipe isn't still relying on us */
1615         assert_pipe_disabled(dev_priv, pipe);
1616
1617         val = DPLL_SSC_REF_CLK_CHV |
1618                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1619         if (pipe != PIPE_A)
1620                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1621
1622         I915_WRITE(DPLL(pipe), val);
1623         POSTING_READ(DPLL(pipe));
1624
1625         mutex_lock(&dev_priv->sb_lock);
1626
1627         /* Disable 10bit clock to display controller */
1628         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1629         val &= ~DPIO_DCLKP_EN;
1630         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1631
1632         mutex_unlock(&dev_priv->sb_lock);
1633 }
1634
1635 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1636                          struct intel_digital_port *dport,
1637                          unsigned int expected_mask)
1638 {
1639         u32 port_mask;
1640         i915_reg_t dpll_reg;
1641
1642         switch (dport->base.port) {
1643         case PORT_B:
1644                 port_mask = DPLL_PORTB_READY_MASK;
1645                 dpll_reg = DPLL(0);
1646                 break;
1647         case PORT_C:
1648                 port_mask = DPLL_PORTC_READY_MASK;
1649                 dpll_reg = DPLL(0);
1650                 expected_mask <<= 4;
1651                 break;
1652         case PORT_D:
1653                 port_mask = DPLL_PORTD_READY_MASK;
1654                 dpll_reg = DPIO_PHY_STATUS;
1655                 break;
1656         default:
1657                 BUG();
1658         }
1659
1660         if (intel_wait_for_register(dev_priv,
1661                                     dpll_reg, port_mask, expected_mask,
1662                                     1000))
1663                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1664                      port_name(dport->base.port),
1665                      I915_READ(dpll_reg) & port_mask, expected_mask);
1666 }
1667
1668 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1669                                            enum pipe pipe)
1670 {
1671         struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1672                                                                 pipe);
1673         i915_reg_t reg;
1674         uint32_t val, pipeconf_val;
1675
1676         /* Make sure PCH DPLL is enabled */
1677         assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1678
1679         /* FDI must be feeding us bits for PCH ports */
1680         assert_fdi_tx_enabled(dev_priv, pipe);
1681         assert_fdi_rx_enabled(dev_priv, pipe);
1682
1683         if (HAS_PCH_CPT(dev_priv)) {
1684                 /* Workaround: Set the timing override bit before enabling the
1685                  * pch transcoder. */
1686                 reg = TRANS_CHICKEN2(pipe);
1687                 val = I915_READ(reg);
1688                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1689                 I915_WRITE(reg, val);
1690         }
1691
1692         reg = PCH_TRANSCONF(pipe);
1693         val = I915_READ(reg);
1694         pipeconf_val = I915_READ(PIPECONF(pipe));
1695
1696         if (HAS_PCH_IBX(dev_priv)) {
1697                 /*
1698                  * Make the BPC in transcoder be consistent with
1699                  * that in pipeconf reg. For HDMI we must use 8bpc
1700                  * here for both 8bpc and 12bpc.
1701                  */
1702                 val &= ~PIPECONF_BPC_MASK;
1703                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1704                         val |= PIPECONF_8BPC;
1705                 else
1706                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1707         }
1708
1709         val &= ~TRANS_INTERLACE_MASK;
1710         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1711                 if (HAS_PCH_IBX(dev_priv) &&
1712                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1713                         val |= TRANS_LEGACY_INTERLACED_ILK;
1714                 else
1715                         val |= TRANS_INTERLACED;
1716         else
1717                 val |= TRANS_PROGRESSIVE;
1718
1719         I915_WRITE(reg, val | TRANS_ENABLE);
1720         if (intel_wait_for_register(dev_priv,
1721                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1722                                     100))
1723                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1724 }
1725
1726 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1727                                       enum transcoder cpu_transcoder)
1728 {
1729         u32 val, pipeconf_val;
1730
1731         /* FDI must be feeding us bits for PCH ports */
1732         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1733         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1734
1735         /* Workaround: set timing override bit. */
1736         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1737         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1738         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1739
1740         val = TRANS_ENABLE;
1741         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1742
1743         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1744             PIPECONF_INTERLACED_ILK)
1745                 val |= TRANS_INTERLACED;
1746         else
1747                 val |= TRANS_PROGRESSIVE;
1748
1749         I915_WRITE(LPT_TRANSCONF, val);
1750         if (intel_wait_for_register(dev_priv,
1751                                     LPT_TRANSCONF,
1752                                     TRANS_STATE_ENABLE,
1753                                     TRANS_STATE_ENABLE,
1754                                     100))
1755                 DRM_ERROR("Failed to enable PCH transcoder\n");
1756 }
1757
1758 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1759                                             enum pipe pipe)
1760 {
1761         i915_reg_t reg;
1762         uint32_t val;
1763
1764         /* FDI relies on the transcoder */
1765         assert_fdi_tx_disabled(dev_priv, pipe);
1766         assert_fdi_rx_disabled(dev_priv, pipe);
1767
1768         /* Ports must be off as well */
1769         assert_pch_ports_disabled(dev_priv, pipe);
1770
1771         reg = PCH_TRANSCONF(pipe);
1772         val = I915_READ(reg);
1773         val &= ~TRANS_ENABLE;
1774         I915_WRITE(reg, val);
1775         /* wait for PCH transcoder off, transcoder state */
1776         if (intel_wait_for_register(dev_priv,
1777                                     reg, TRANS_STATE_ENABLE, 0,
1778                                     50))
1779                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1780
1781         if (HAS_PCH_CPT(dev_priv)) {
1782                 /* Workaround: Clear the timing override chicken bit again. */
1783                 reg = TRANS_CHICKEN2(pipe);
1784                 val = I915_READ(reg);
1785                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1786                 I915_WRITE(reg, val);
1787         }
1788 }
1789
1790 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1791 {
1792         u32 val;
1793
1794         val = I915_READ(LPT_TRANSCONF);
1795         val &= ~TRANS_ENABLE;
1796         I915_WRITE(LPT_TRANSCONF, val);
1797         /* wait for PCH transcoder off, transcoder state */
1798         if (intel_wait_for_register(dev_priv,
1799                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1800                                     50))
1801                 DRM_ERROR("Failed to disable PCH transcoder\n");
1802
1803         /* Workaround: clear timing override bit. */
1804         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1805         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1806         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1807 }
1808
1809 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1810 {
1811         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1812
1813         if (HAS_PCH_LPT(dev_priv))
1814                 return PIPE_A;
1815         else
1816                 return crtc->pipe;
1817 }
1818
1819 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1820 {
1821         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1822         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1823         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1824         enum pipe pipe = crtc->pipe;
1825         i915_reg_t reg;
1826         u32 val;
1827
1828         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1829
1830         assert_planes_disabled(crtc);
1831
1832         /*
1833          * A pipe without a PLL won't actually be able to drive bits from
1834          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1835          * need the check.
1836          */
1837         if (HAS_GMCH_DISPLAY(dev_priv)) {
1838                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1839                         assert_dsi_pll_enabled(dev_priv);
1840                 else
1841                         assert_pll_enabled(dev_priv, pipe);
1842         } else {
1843                 if (new_crtc_state->has_pch_encoder) {
1844                         /* if driving the PCH, we need FDI enabled */
1845                         assert_fdi_rx_pll_enabled(dev_priv,
1846                                                   intel_crtc_pch_transcoder(crtc));
1847                         assert_fdi_tx_pll_enabled(dev_priv,
1848                                                   (enum pipe) cpu_transcoder);
1849                 }
1850                 /* FIXME: assert CPU port conditions for SNB+ */
1851         }
1852
1853         reg = PIPECONF(cpu_transcoder);
1854         val = I915_READ(reg);
1855         if (val & PIPECONF_ENABLE) {
1856                 /* we keep both pipes enabled on 830 */
1857                 WARN_ON(!IS_I830(dev_priv));
1858                 return;
1859         }
1860
1861         I915_WRITE(reg, val | PIPECONF_ENABLE);
1862         POSTING_READ(reg);
1863
1864         /*
1865          * Until the pipe starts PIPEDSL reads will return a stale value,
1866          * which causes an apparent vblank timestamp jump when PIPEDSL
1867          * resets to its proper value. That also messes up the frame count
1868          * when it's derived from the timestamps. So let's wait for the
1869          * pipe to start properly before we call drm_crtc_vblank_on()
1870          */
1871         if (dev_priv->drm.max_vblank_count == 0)
1872                 intel_wait_for_pipe_scanline_moving(crtc);
1873 }
1874
1875 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1876 {
1877         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1878         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1879         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1880         enum pipe pipe = crtc->pipe;
1881         i915_reg_t reg;
1882         u32 val;
1883
1884         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1885
1886         /*
1887          * Make sure planes won't keep trying to pump pixels to us,
1888          * or we might hang the display.
1889          */
1890         assert_planes_disabled(crtc);
1891
1892         reg = PIPECONF(cpu_transcoder);
1893         val = I915_READ(reg);
1894         if ((val & PIPECONF_ENABLE) == 0)
1895                 return;
1896
1897         /*
1898          * Double wide has implications for planes
1899          * so best keep it disabled when not needed.
1900          */
1901         if (old_crtc_state->double_wide)
1902                 val &= ~PIPECONF_DOUBLE_WIDE;
1903
1904         /* Don't disable pipe or pipe PLLs if needed */
1905         if (!IS_I830(dev_priv))
1906                 val &= ~PIPECONF_ENABLE;
1907
1908         I915_WRITE(reg, val);
1909         if ((val & PIPECONF_ENABLE) == 0)
1910                 intel_wait_for_pipe_off(old_crtc_state);
1911 }
1912
1913 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1914 {
1915         return IS_GEN2(dev_priv) ? 2048 : 4096;
1916 }
1917
1918 static unsigned int
1919 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1920 {
1921         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1922         unsigned int cpp = fb->format->cpp[color_plane];
1923
1924         switch (fb->modifier) {
1925         case DRM_FORMAT_MOD_LINEAR:
1926                 return cpp;
1927         case I915_FORMAT_MOD_X_TILED:
1928                 if (IS_GEN2(dev_priv))
1929                         return 128;
1930                 else
1931                         return 512;
1932         case I915_FORMAT_MOD_Y_TILED_CCS:
1933                 if (color_plane == 1)
1934                         return 128;
1935                 /* fall through */
1936         case I915_FORMAT_MOD_Y_TILED:
1937                 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
1938                         return 128;
1939                 else
1940                         return 512;
1941         case I915_FORMAT_MOD_Yf_TILED_CCS:
1942                 if (color_plane == 1)
1943                         return 128;
1944                 /* fall through */
1945         case I915_FORMAT_MOD_Yf_TILED:
1946                 switch (cpp) {
1947                 case 1:
1948                         return 64;
1949                 case 2:
1950                 case 4:
1951                         return 128;
1952                 case 8:
1953                 case 16:
1954                         return 256;
1955                 default:
1956                         MISSING_CASE(cpp);
1957                         return cpp;
1958                 }
1959                 break;
1960         default:
1961                 MISSING_CASE(fb->modifier);
1962                 return cpp;
1963         }
1964 }
1965
1966 static unsigned int
1967 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1968 {
1969         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1970                 return 1;
1971         else
1972                 return intel_tile_size(to_i915(fb->dev)) /
1973                         intel_tile_width_bytes(fb, color_plane);
1974 }
1975
1976 /* Return the tile dimensions in pixel units */
1977 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1978                             unsigned int *tile_width,
1979                             unsigned int *tile_height)
1980 {
1981         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1982         unsigned int cpp = fb->format->cpp[color_plane];
1983
1984         *tile_width = tile_width_bytes / cpp;
1985         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1986 }
1987
1988 unsigned int
1989 intel_fb_align_height(const struct drm_framebuffer *fb,
1990                       int color_plane, unsigned int height)
1991 {
1992         unsigned int tile_height = intel_tile_height(fb, color_plane);
1993
1994         return ALIGN(height, tile_height);
1995 }
1996
1997 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1998 {
1999         unsigned int size = 0;
2000         int i;
2001
2002         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2003                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2004
2005         return size;
2006 }
2007
2008 static void
2009 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2010                         const struct drm_framebuffer *fb,
2011                         unsigned int rotation)
2012 {
2013         view->type = I915_GGTT_VIEW_NORMAL;
2014         if (drm_rotation_90_or_270(rotation)) {
2015                 view->type = I915_GGTT_VIEW_ROTATED;
2016                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2017         }
2018 }
2019
2020 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2021 {
2022         if (IS_I830(dev_priv))
2023                 return 16 * 1024;
2024         else if (IS_I85X(dev_priv))
2025                 return 256;
2026         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2027                 return 32;
2028         else
2029                 return 4 * 1024;
2030 }
2031
2032 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2033 {
2034         if (INTEL_GEN(dev_priv) >= 9)
2035                 return 256 * 1024;
2036         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2037                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2038                 return 128 * 1024;
2039         else if (INTEL_GEN(dev_priv) >= 4)
2040                 return 4 * 1024;
2041         else
2042                 return 0;
2043 }
2044
2045 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2046                                          int color_plane)
2047 {
2048         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2049
2050         /* AUX_DIST needs only 4K alignment */
2051         if (color_plane == 1)
2052                 return 4096;
2053
2054         switch (fb->modifier) {
2055         case DRM_FORMAT_MOD_LINEAR:
2056                 return intel_linear_alignment(dev_priv);
2057         case I915_FORMAT_MOD_X_TILED:
2058                 if (INTEL_GEN(dev_priv) >= 9)
2059                         return 256 * 1024;
2060                 return 0;
2061         case I915_FORMAT_MOD_Y_TILED_CCS:
2062         case I915_FORMAT_MOD_Yf_TILED_CCS:
2063         case I915_FORMAT_MOD_Y_TILED:
2064         case I915_FORMAT_MOD_Yf_TILED:
2065                 return 1 * 1024 * 1024;
2066         default:
2067                 MISSING_CASE(fb->modifier);
2068                 return 0;
2069         }
2070 }
2071
2072 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2073 {
2074         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2075         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2076
2077         return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2078 }
2079
2080 struct i915_vma *
2081 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2082                            const struct i915_ggtt_view *view,
2083                            bool uses_fence,
2084                            unsigned long *out_flags)
2085 {
2086         struct drm_device *dev = fb->dev;
2087         struct drm_i915_private *dev_priv = to_i915(dev);
2088         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2089         struct i915_vma *vma;
2090         unsigned int pinctl;
2091         u32 alignment;
2092
2093         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2094
2095         alignment = intel_surf_alignment(fb, 0);
2096
2097         /* Note that the w/a also requires 64 PTE of padding following the
2098          * bo. We currently fill all unused PTE with the shadow page and so
2099          * we should always have valid PTE following the scanout preventing
2100          * the VT-d warning.
2101          */
2102         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2103                 alignment = 256 * 1024;
2104
2105         /*
2106          * Global gtt pte registers are special registers which actually forward
2107          * writes to a chunk of system memory. Which means that there is no risk
2108          * that the register values disappear as soon as we call
2109          * intel_runtime_pm_put(), so it is correct to wrap only the
2110          * pin/unpin/fence and not more.
2111          */
2112         intel_runtime_pm_get(dev_priv);
2113
2114         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2115
2116         pinctl = 0;
2117
2118         /* Valleyview is definitely limited to scanning out the first
2119          * 512MiB. Lets presume this behaviour was inherited from the
2120          * g4x display engine and that all earlier gen are similarly
2121          * limited. Testing suggests that it is a little more
2122          * complicated than this. For example, Cherryview appears quite
2123          * happy to scanout from anywhere within its global aperture.
2124          */
2125         if (HAS_GMCH_DISPLAY(dev_priv))
2126                 pinctl |= PIN_MAPPABLE;
2127
2128         vma = i915_gem_object_pin_to_display_plane(obj,
2129                                                    alignment, view, pinctl);
2130         if (IS_ERR(vma))
2131                 goto err;
2132
2133         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2134                 int ret;
2135
2136                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2137                  * fence, whereas 965+ only requires a fence if using
2138                  * framebuffer compression.  For simplicity, we always, when
2139                  * possible, install a fence as the cost is not that onerous.
2140                  *
2141                  * If we fail to fence the tiled scanout, then either the
2142                  * modeset will reject the change (which is highly unlikely as
2143                  * the affected systems, all but one, do not have unmappable
2144                  * space) or we will not be able to enable full powersaving
2145                  * techniques (also likely not to apply due to various limits
2146                  * FBC and the like impose on the size of the buffer, which
2147                  * presumably we violated anyway with this unmappable buffer).
2148                  * Anyway, it is presumably better to stumble onwards with
2149                  * something and try to run the system in a "less than optimal"
2150                  * mode that matches the user configuration.
2151                  */
2152                 ret = i915_vma_pin_fence(vma);
2153                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2154                         i915_gem_object_unpin_from_display_plane(vma);
2155                         vma = ERR_PTR(ret);
2156                         goto err;
2157                 }
2158
2159                 if (ret == 0 && vma->fence)
2160                         *out_flags |= PLANE_HAS_FENCE;
2161         }
2162
2163         i915_vma_get(vma);
2164 err:
2165         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2166
2167         intel_runtime_pm_put(dev_priv);
2168         return vma;
2169 }
2170
2171 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2172 {
2173         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2174
2175         if (flags & PLANE_HAS_FENCE)
2176                 i915_vma_unpin_fence(vma);
2177         i915_gem_object_unpin_from_display_plane(vma);
2178         i915_vma_put(vma);
2179 }
2180
2181 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2182                           unsigned int rotation)
2183 {
2184         if (drm_rotation_90_or_270(rotation))
2185                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2186         else
2187                 return fb->pitches[color_plane];
2188 }
2189
2190 /*
2191  * Convert the x/y offsets into a linear offset.
2192  * Only valid with 0/180 degree rotation, which is fine since linear
2193  * offset is only used with linear buffers on pre-hsw and tiled buffers
2194  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2195  */
2196 u32 intel_fb_xy_to_linear(int x, int y,
2197                           const struct intel_plane_state *state,
2198                           int color_plane)
2199 {
2200         const struct drm_framebuffer *fb = state->base.fb;
2201         unsigned int cpp = fb->format->cpp[color_plane];
2202         unsigned int pitch = state->color_plane[color_plane].stride;
2203
2204         return y * pitch + x * cpp;
2205 }
2206
2207 /*
2208  * Add the x/y offsets derived from fb->offsets[] to the user
2209  * specified plane src x/y offsets. The resulting x/y offsets
2210  * specify the start of scanout from the beginning of the gtt mapping.
2211  */
2212 void intel_add_fb_offsets(int *x, int *y,
2213                           const struct intel_plane_state *state,
2214                           int color_plane)
2215
2216 {
2217         const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2218         unsigned int rotation = state->base.rotation;
2219
2220         if (drm_rotation_90_or_270(rotation)) {
2221                 *x += intel_fb->rotated[color_plane].x;
2222                 *y += intel_fb->rotated[color_plane].y;
2223         } else {
2224                 *x += intel_fb->normal[color_plane].x;
2225                 *y += intel_fb->normal[color_plane].y;
2226         }
2227 }
2228
2229 static u32 intel_adjust_tile_offset(int *x, int *y,
2230                                     unsigned int tile_width,
2231                                     unsigned int tile_height,
2232                                     unsigned int tile_size,
2233                                     unsigned int pitch_tiles,
2234                                     u32 old_offset,
2235                                     u32 new_offset)
2236 {
2237         unsigned int pitch_pixels = pitch_tiles * tile_width;
2238         unsigned int tiles;
2239
2240         WARN_ON(old_offset & (tile_size - 1));
2241         WARN_ON(new_offset & (tile_size - 1));
2242         WARN_ON(new_offset > old_offset);
2243
2244         tiles = (old_offset - new_offset) / tile_size;
2245
2246         *y += tiles / pitch_tiles * tile_height;
2247         *x += tiles % pitch_tiles * tile_width;
2248
2249         /* minimize x in case it got needlessly big */
2250         *y += *x / pitch_pixels * tile_height;
2251         *x %= pitch_pixels;
2252
2253         return new_offset;
2254 }
2255
2256 static u32 intel_adjust_aligned_offset(int *x, int *y,
2257                                        const struct drm_framebuffer *fb,
2258                                        int color_plane,
2259                                        unsigned int rotation,
2260                                        unsigned int pitch,
2261                                        u32 old_offset, u32 new_offset)
2262 {
2263         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2264         unsigned int cpp = fb->format->cpp[color_plane];
2265
2266         WARN_ON(new_offset > old_offset);
2267
2268         if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2269                 unsigned int tile_size, tile_width, tile_height;
2270                 unsigned int pitch_tiles;
2271
2272                 tile_size = intel_tile_size(dev_priv);
2273                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2274
2275                 if (drm_rotation_90_or_270(rotation)) {
2276                         pitch_tiles = pitch / tile_height;
2277                         swap(tile_width, tile_height);
2278                 } else {
2279                         pitch_tiles = pitch / (tile_width * cpp);
2280                 }
2281
2282                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2283                                          tile_size, pitch_tiles,
2284                                          old_offset, new_offset);
2285         } else {
2286                 old_offset += *y * pitch + *x * cpp;
2287
2288                 *y = (old_offset - new_offset) / pitch;
2289                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2290         }
2291
2292         return new_offset;
2293 }
2294
2295 /*
2296  * Adjust the tile offset by moving the difference into
2297  * the x/y offsets.
2298  */
2299 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2300                                              const struct intel_plane_state *state,
2301                                              int color_plane,
2302                                              u32 old_offset, u32 new_offset)
2303 {
2304         return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2305                                            state->base.rotation,
2306                                            state->color_plane[color_plane].stride,
2307                                            old_offset, new_offset);
2308 }
2309
2310 /*
2311  * Computes the aligned offset to the base tile and adjusts
2312  * x, y. bytes per pixel is assumed to be a power-of-two.
2313  *
2314  * In the 90/270 rotated case, x and y are assumed
2315  * to be already rotated to match the rotated GTT view, and
2316  * pitch is the tile_height aligned framebuffer height.
2317  *
2318  * This function is used when computing the derived information
2319  * under intel_framebuffer, so using any of that information
2320  * here is not allowed. Anything under drm_framebuffer can be
2321  * used. This is why the user has to pass in the pitch since it
2322  * is specified in the rotated orientation.
2323  */
2324 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2325                                         int *x, int *y,
2326                                         const struct drm_framebuffer *fb,
2327                                         int color_plane,
2328                                         unsigned int pitch,
2329                                         unsigned int rotation,
2330                                         u32 alignment)
2331 {
2332         uint64_t fb_modifier = fb->modifier;
2333         unsigned int cpp = fb->format->cpp[color_plane];
2334         u32 offset, offset_aligned;
2335
2336         if (alignment)
2337                 alignment--;
2338
2339         if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
2340                 unsigned int tile_size, tile_width, tile_height;
2341                 unsigned int tile_rows, tiles, pitch_tiles;
2342
2343                 tile_size = intel_tile_size(dev_priv);
2344                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2345
2346                 if (drm_rotation_90_or_270(rotation)) {
2347                         pitch_tiles = pitch / tile_height;
2348                         swap(tile_width, tile_height);
2349                 } else {
2350                         pitch_tiles = pitch / (tile_width * cpp);
2351                 }
2352
2353                 tile_rows = *y / tile_height;
2354                 *y %= tile_height;
2355
2356                 tiles = *x / tile_width;
2357                 *x %= tile_width;
2358
2359                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2360                 offset_aligned = offset & ~alignment;
2361
2362                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2363                                          tile_size, pitch_tiles,
2364                                          offset, offset_aligned);
2365         } else {
2366                 offset = *y * pitch + *x * cpp;
2367                 offset_aligned = offset & ~alignment;
2368
2369                 *y = (offset & alignment) / pitch;
2370                 *x = ((offset & alignment) - *y * pitch) / cpp;
2371         }
2372
2373         return offset_aligned;
2374 }
2375
2376 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2377                                               const struct intel_plane_state *state,
2378                                               int color_plane)
2379 {
2380         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2381         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2382         const struct drm_framebuffer *fb = state->base.fb;
2383         unsigned int rotation = state->base.rotation;
2384         int pitch = state->color_plane[color_plane].stride;
2385         u32 alignment;
2386
2387         if (intel_plane->id == PLANE_CURSOR)
2388                 alignment = intel_cursor_alignment(dev_priv);
2389         else
2390                 alignment = intel_surf_alignment(fb, color_plane);
2391
2392         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2393                                             pitch, rotation, alignment);
2394 }
2395
2396 /* Convert the fb->offset[] into x/y offsets */
2397 static int intel_fb_offset_to_xy(int *x, int *y,
2398                                  const struct drm_framebuffer *fb,
2399                                  int color_plane)
2400 {
2401         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2402
2403         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2404             fb->offsets[color_plane] % intel_tile_size(dev_priv))
2405                 return -EINVAL;
2406
2407         *x = 0;
2408         *y = 0;
2409
2410         intel_adjust_aligned_offset(x, y,
2411                                     fb, color_plane, DRM_MODE_ROTATE_0,
2412                                     fb->pitches[color_plane],
2413                                     fb->offsets[color_plane], 0);
2414
2415         return 0;
2416 }
2417
2418 static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2419 {
2420         switch (fb_modifier) {
2421         case I915_FORMAT_MOD_X_TILED:
2422                 return I915_TILING_X;
2423         case I915_FORMAT_MOD_Y_TILED:
2424         case I915_FORMAT_MOD_Y_TILED_CCS:
2425                 return I915_TILING_Y;
2426         default:
2427                 return I915_TILING_NONE;
2428         }
2429 }
2430
2431 /*
2432  * From the Sky Lake PRM:
2433  * "The Color Control Surface (CCS) contains the compression status of
2434  *  the cache-line pairs. The compression state of the cache-line pair
2435  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2436  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2437  *  cache-line-pairs. CCS is always Y tiled."
2438  *
2439  * Since cache line pairs refers to horizontally adjacent cache lines,
2440  * each cache line in the CCS corresponds to an area of 32x16 cache
2441  * lines on the main surface. Since each pixel is 4 bytes, this gives
2442  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2443  * main surface.
2444  */
2445 static const struct drm_format_info ccs_formats[] = {
2446         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2447         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2448         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2449         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2450 };
2451
2452 static const struct drm_format_info *
2453 lookup_format_info(const struct drm_format_info formats[],
2454                    int num_formats, u32 format)
2455 {
2456         int i;
2457
2458         for (i = 0; i < num_formats; i++) {
2459                 if (formats[i].format == format)
2460                         return &formats[i];
2461         }
2462
2463         return NULL;
2464 }
2465
2466 static const struct drm_format_info *
2467 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2468 {
2469         switch (cmd->modifier[0]) {
2470         case I915_FORMAT_MOD_Y_TILED_CCS:
2471         case I915_FORMAT_MOD_Yf_TILED_CCS:
2472                 return lookup_format_info(ccs_formats,
2473                                           ARRAY_SIZE(ccs_formats),
2474                                           cmd->pixel_format);
2475         default:
2476                 return NULL;
2477         }
2478 }
2479
2480 bool is_ccs_modifier(u64 modifier)
2481 {
2482         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2483                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2484 }
2485
2486 static int
2487 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2488                    struct drm_framebuffer *fb)
2489 {
2490         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2491         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2492         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2493         u32 gtt_offset_rotated = 0;
2494         unsigned int max_size = 0;
2495         int i, num_planes = fb->format->num_planes;
2496         unsigned int tile_size = intel_tile_size(dev_priv);
2497
2498         for (i = 0; i < num_planes; i++) {
2499                 unsigned int width, height;
2500                 unsigned int cpp, size;
2501                 u32 offset;
2502                 int x, y;
2503                 int ret;
2504
2505                 cpp = fb->format->cpp[i];
2506                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2507                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2508
2509                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2510                 if (ret) {
2511                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2512                                       i, fb->offsets[i]);
2513                         return ret;
2514                 }
2515
2516                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2517                         int hsub = fb->format->hsub;
2518                         int vsub = fb->format->vsub;
2519                         int tile_width, tile_height;
2520                         int main_x, main_y;
2521                         int ccs_x, ccs_y;
2522
2523                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2524                         tile_width *= hsub;
2525                         tile_height *= vsub;
2526
2527                         ccs_x = (x * hsub) % tile_width;
2528                         ccs_y = (y * vsub) % tile_height;
2529                         main_x = intel_fb->normal[0].x % tile_width;
2530                         main_y = intel_fb->normal[0].y % tile_height;
2531
2532                         /*
2533                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2534                          * x/y offsets must match between CCS and the main surface.
2535                          */
2536                         if (main_x != ccs_x || main_y != ccs_y) {
2537                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2538                                               main_x, main_y,
2539                                               ccs_x, ccs_y,
2540                                               intel_fb->normal[0].x,
2541                                               intel_fb->normal[0].y,
2542                                               x, y);
2543                                 return -EINVAL;
2544                         }
2545                 }
2546
2547                 /*
2548                  * The fence (if used) is aligned to the start of the object
2549                  * so having the framebuffer wrap around across the edge of the
2550                  * fenced region doesn't really work. We have no API to configure
2551                  * the fence start offset within the object (nor could we probably
2552                  * on gen2/3). So it's just easier if we just require that the
2553                  * fb layout agrees with the fence layout. We already check that the
2554                  * fb stride matches the fence stride elsewhere.
2555                  */
2556                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2557                     (x + width) * cpp > fb->pitches[i]) {
2558                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2559                                       i, fb->offsets[i]);
2560                         return -EINVAL;
2561                 }
2562
2563                 /*
2564                  * First pixel of the framebuffer from
2565                  * the start of the normal gtt mapping.
2566                  */
2567                 intel_fb->normal[i].x = x;
2568                 intel_fb->normal[i].y = y;
2569
2570                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2571                                                       fb->pitches[i],
2572                                                       DRM_MODE_ROTATE_0,
2573                                                       tile_size);
2574                 offset /= tile_size;
2575
2576                 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2577                         unsigned int tile_width, tile_height;
2578                         unsigned int pitch_tiles;
2579                         struct drm_rect r;
2580
2581                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2582
2583                         rot_info->plane[i].offset = offset;
2584                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2585                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2586                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2587
2588                         intel_fb->rotated[i].pitch =
2589                                 rot_info->plane[i].height * tile_height;
2590
2591                         /* how many tiles does this plane need */
2592                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2593                         /*
2594                          * If the plane isn't horizontally tile aligned,
2595                          * we need one more tile.
2596                          */
2597                         if (x != 0)
2598                                 size++;
2599
2600                         /* rotate the x/y offsets to match the GTT view */
2601                         r.x1 = x;
2602                         r.y1 = y;
2603                         r.x2 = x + width;
2604                         r.y2 = y + height;
2605                         drm_rect_rotate(&r,
2606                                         rot_info->plane[i].width * tile_width,
2607                                         rot_info->plane[i].height * tile_height,
2608                                         DRM_MODE_ROTATE_270);
2609                         x = r.x1;
2610                         y = r.y1;
2611
2612                         /* rotate the tile dimensions to match the GTT view */
2613                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2614                         swap(tile_width, tile_height);
2615
2616                         /*
2617                          * We only keep the x/y offsets, so push all of the
2618                          * gtt offset into the x/y offsets.
2619                          */
2620                         intel_adjust_tile_offset(&x, &y,
2621                                                  tile_width, tile_height,
2622                                                  tile_size, pitch_tiles,
2623                                                  gtt_offset_rotated * tile_size, 0);
2624
2625                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2626
2627                         /*
2628                          * First pixel of the framebuffer from
2629                          * the start of the rotated gtt mapping.
2630                          */
2631                         intel_fb->rotated[i].x = x;
2632                         intel_fb->rotated[i].y = y;
2633                 } else {
2634                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2635                                             x * cpp, tile_size);
2636                 }
2637
2638                 /* how many tiles in total needed in the bo */
2639                 max_size = max(max_size, offset + size);
2640         }
2641
2642         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2643                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2644                               mul_u32_u32(max_size, tile_size), obj->base.size);
2645                 return -EINVAL;
2646         }
2647
2648         return 0;
2649 }
2650
2651 static int i9xx_format_to_fourcc(int format)
2652 {
2653         switch (format) {
2654         case DISPPLANE_8BPP:
2655                 return DRM_FORMAT_C8;
2656         case DISPPLANE_BGRX555:
2657                 return DRM_FORMAT_XRGB1555;
2658         case DISPPLANE_BGRX565:
2659                 return DRM_FORMAT_RGB565;
2660         default:
2661         case DISPPLANE_BGRX888:
2662                 return DRM_FORMAT_XRGB8888;
2663         case DISPPLANE_RGBX888:
2664                 return DRM_FORMAT_XBGR8888;
2665         case DISPPLANE_BGRX101010:
2666                 return DRM_FORMAT_XRGB2101010;
2667         case DISPPLANE_RGBX101010:
2668                 return DRM_FORMAT_XBGR2101010;
2669         }
2670 }
2671
2672 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2673 {
2674         switch (format) {
2675         case PLANE_CTL_FORMAT_RGB_565:
2676                 return DRM_FORMAT_RGB565;
2677         case PLANE_CTL_FORMAT_NV12:
2678                 return DRM_FORMAT_NV12;
2679         default:
2680         case PLANE_CTL_FORMAT_XRGB_8888:
2681                 if (rgb_order) {
2682                         if (alpha)
2683                                 return DRM_FORMAT_ABGR8888;
2684                         else
2685                                 return DRM_FORMAT_XBGR8888;
2686                 } else {
2687                         if (alpha)
2688                                 return DRM_FORMAT_ARGB8888;
2689                         else
2690                                 return DRM_FORMAT_XRGB8888;
2691                 }
2692         case PLANE_CTL_FORMAT_XRGB_2101010:
2693                 if (rgb_order)
2694                         return DRM_FORMAT_XBGR2101010;
2695                 else
2696                         return DRM_FORMAT_XRGB2101010;
2697         }
2698 }
2699
2700 static bool
2701 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2702                               struct intel_initial_plane_config *plane_config)
2703 {
2704         struct drm_device *dev = crtc->base.dev;
2705         struct drm_i915_private *dev_priv = to_i915(dev);
2706         struct drm_i915_gem_object *obj = NULL;
2707         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2708         struct drm_framebuffer *fb = &plane_config->fb->base;
2709         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2710         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2711                                     PAGE_SIZE);
2712
2713         size_aligned -= base_aligned;
2714
2715         if (plane_config->size == 0)
2716                 return false;
2717
2718         /* If the FB is too big, just don't use it since fbdev is not very
2719          * important and we should probably use that space with FBC or other
2720          * features. */
2721         if (size_aligned * 2 > dev_priv->stolen_usable_size)
2722                 return false;
2723
2724         mutex_lock(&dev->struct_mutex);
2725         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2726                                                              base_aligned,
2727                                                              base_aligned,
2728                                                              size_aligned);
2729         mutex_unlock(&dev->struct_mutex);
2730         if (!obj)
2731                 return false;
2732
2733         if (plane_config->tiling == I915_TILING_X)
2734                 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
2735
2736         mode_cmd.pixel_format = fb->format->format;
2737         mode_cmd.width = fb->width;
2738         mode_cmd.height = fb->height;
2739         mode_cmd.pitches[0] = fb->pitches[0];
2740         mode_cmd.modifier[0] = fb->modifier;
2741         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2742
2743         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2744                 DRM_DEBUG_KMS("intel fb init failed\n");
2745                 goto out_unref_obj;
2746         }
2747
2748
2749         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2750         return true;
2751
2752 out_unref_obj:
2753         i915_gem_object_put(obj);
2754         return false;
2755 }
2756
2757 static void
2758 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2759                         struct intel_plane_state *plane_state,
2760                         bool visible)
2761 {
2762         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2763
2764         plane_state->base.visible = visible;
2765
2766         /* FIXME pre-g4x don't work like this */
2767         if (visible) {
2768                 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2769                 crtc_state->active_planes |= BIT(plane->id);
2770         } else {
2771                 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2772                 crtc_state->active_planes &= ~BIT(plane->id);
2773         }
2774
2775         DRM_DEBUG_KMS("%s active planes 0x%x\n",
2776                       crtc_state->base.crtc->name,
2777                       crtc_state->active_planes);
2778 }
2779
2780 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2781                                          struct intel_plane *plane)
2782 {
2783         struct intel_crtc_state *crtc_state =
2784                 to_intel_crtc_state(crtc->base.state);
2785         struct intel_plane_state *plane_state =
2786                 to_intel_plane_state(plane->base.state);
2787
2788         intel_set_plane_visible(crtc_state, plane_state, false);
2789
2790         if (plane->id == PLANE_PRIMARY)
2791                 intel_pre_disable_primary_noatomic(&crtc->base);
2792
2793         trace_intel_disable_plane(&plane->base, crtc);
2794         plane->disable_plane(plane, crtc);
2795 }
2796
2797 static void
2798 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2799                              struct intel_initial_plane_config *plane_config)
2800 {
2801         struct drm_device *dev = intel_crtc->base.dev;
2802         struct drm_i915_private *dev_priv = to_i915(dev);
2803         struct drm_crtc *c;
2804         struct drm_i915_gem_object *obj;
2805         struct drm_plane *primary = intel_crtc->base.primary;
2806         struct drm_plane_state *plane_state = primary->state;
2807         struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2808         struct intel_plane *intel_plane = to_intel_plane(primary);
2809         struct intel_plane_state *intel_state =
2810                 to_intel_plane_state(plane_state);
2811         struct drm_framebuffer *fb;
2812
2813         if (!plane_config->fb)
2814                 return;
2815
2816         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2817                 fb = &plane_config->fb->base;
2818                 goto valid_fb;
2819         }
2820
2821         kfree(plane_config->fb);
2822
2823         /*
2824          * Failed to alloc the obj, check to see if we should share
2825          * an fb with another CRTC instead
2826          */
2827         for_each_crtc(dev, c) {
2828                 struct intel_plane_state *state;
2829
2830                 if (c == &intel_crtc->base)
2831                         continue;
2832
2833                 if (!to_intel_crtc(c)->active)
2834                         continue;
2835
2836                 state = to_intel_plane_state(c->primary->state);
2837                 if (!state->vma)
2838                         continue;
2839
2840                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2841                         fb = state->base.fb;
2842                         drm_framebuffer_get(fb);
2843                         goto valid_fb;
2844                 }
2845         }
2846
2847         /*
2848          * We've failed to reconstruct the BIOS FB.  Current display state
2849          * indicates that the primary plane is visible, but has a NULL FB,
2850          * which will lead to problems later if we don't fix it up.  The
2851          * simplest solution is to just disable the primary plane now and
2852          * pretend the BIOS never had it enabled.
2853          */
2854         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2855
2856         return;
2857
2858 valid_fb:
2859         intel_fill_fb_ggtt_view(&intel_state->view, fb,
2860                                 intel_state->base.rotation);
2861         intel_state->color_plane[0].stride =
2862                 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2863
2864         mutex_lock(&dev->struct_mutex);
2865         intel_state->vma =
2866                 intel_pin_and_fence_fb_obj(fb,
2867                                            &intel_state->view,
2868                                            intel_plane_uses_fence(intel_state),
2869                                            &intel_state->flags);
2870         mutex_unlock(&dev->struct_mutex);
2871         if (IS_ERR(intel_state->vma)) {
2872                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2873                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
2874
2875                 intel_state->vma = NULL;
2876                 drm_framebuffer_put(fb);
2877                 return;
2878         }
2879
2880         obj = intel_fb_obj(fb);
2881         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2882
2883         plane_state->src_x = 0;
2884         plane_state->src_y = 0;
2885         plane_state->src_w = fb->width << 16;
2886         plane_state->src_h = fb->height << 16;
2887
2888         plane_state->crtc_x = 0;
2889         plane_state->crtc_y = 0;
2890         plane_state->crtc_w = fb->width;
2891         plane_state->crtc_h = fb->height;
2892
2893         intel_state->base.src = drm_plane_state_src(plane_state);
2894         intel_state->base.dst = drm_plane_state_dest(plane_state);
2895
2896         if (i915_gem_object_is_tiled(obj))
2897                 dev_priv->preserve_bios_swizzle = true;
2898
2899         plane_state->fb = fb;
2900         plane_state->crtc = &intel_crtc->base;
2901
2902         intel_set_plane_visible(to_intel_crtc_state(crtc_state),
2903                                 to_intel_plane_state(plane_state),
2904                                 true);
2905
2906         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2907                   &obj->frontbuffer_bits);
2908 }
2909
2910 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2911                                int color_plane,
2912                                unsigned int rotation)
2913 {
2914         int cpp = fb->format->cpp[color_plane];
2915
2916         switch (fb->modifier) {
2917         case DRM_FORMAT_MOD_LINEAR:
2918         case I915_FORMAT_MOD_X_TILED:
2919                 switch (cpp) {
2920                 case 8:
2921                         return 4096;
2922                 case 4:
2923                 case 2:
2924                 case 1:
2925                         return 8192;
2926                 default:
2927                         MISSING_CASE(cpp);
2928                         break;
2929                 }
2930                 break;
2931         case I915_FORMAT_MOD_Y_TILED_CCS:
2932         case I915_FORMAT_MOD_Yf_TILED_CCS:
2933                 /* FIXME AUX plane? */
2934         case I915_FORMAT_MOD_Y_TILED:
2935         case I915_FORMAT_MOD_Yf_TILED:
2936                 switch (cpp) {
2937                 case 8:
2938                         return 2048;
2939                 case 4:
2940                         return 4096;
2941                 case 2:
2942                 case 1:
2943                         return 8192;
2944                 default:
2945                         MISSING_CASE(cpp);
2946                         break;
2947                 }
2948                 break;
2949         default:
2950                 MISSING_CASE(fb->modifier);
2951         }
2952
2953         return 2048;
2954 }
2955
2956 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2957                                            int main_x, int main_y, u32 main_offset)
2958 {
2959         const struct drm_framebuffer *fb = plane_state->base.fb;
2960         int hsub = fb->format->hsub;
2961         int vsub = fb->format->vsub;
2962         int aux_x = plane_state->color_plane[1].x;
2963         int aux_y = plane_state->color_plane[1].y;
2964         u32 aux_offset = plane_state->color_plane[1].offset;
2965         u32 alignment = intel_surf_alignment(fb, 1);
2966
2967         while (aux_offset >= main_offset && aux_y <= main_y) {
2968                 int x, y;
2969
2970                 if (aux_x == main_x && aux_y == main_y)
2971                         break;
2972
2973                 if (aux_offset == 0)
2974                         break;
2975
2976                 x = aux_x / hsub;
2977                 y = aux_y / vsub;
2978                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
2979                                                                aux_offset, aux_offset - alignment);
2980                 aux_x = x * hsub + aux_x % hsub;
2981                 aux_y = y * vsub + aux_y % vsub;
2982         }
2983
2984         if (aux_x != main_x || aux_y != main_y)
2985                 return false;
2986
2987         plane_state->color_plane[1].offset = aux_offset;
2988         plane_state->color_plane[1].x = aux_x;
2989         plane_state->color_plane[1].y = aux_y;
2990
2991         return true;
2992 }
2993
2994 static int skl_check_main_surface(struct intel_plane_state *plane_state)
2995 {
2996         const struct drm_framebuffer *fb = plane_state->base.fb;
2997         unsigned int rotation = plane_state->base.rotation;
2998         int x = plane_state->base.src.x1 >> 16;
2999         int y = plane_state->base.src.y1 >> 16;
3000         int w = drm_rect_width(&plane_state->base.src) >> 16;
3001         int h = drm_rect_height(&plane_state->base.src) >> 16;
3002         int max_width = skl_max_plane_width(fb, 0, rotation);
3003         int max_height = 4096;
3004         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3005
3006         if (w > max_width || h > max_height) {
3007                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3008                               w, h, max_width, max_height);
3009                 return -EINVAL;
3010         }
3011
3012         intel_add_fb_offsets(&x, &y, plane_state, 0);
3013         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3014         alignment = intel_surf_alignment(fb, 0);
3015
3016         /*
3017          * AUX surface offset is specified as the distance from the
3018          * main surface offset, and it must be non-negative. Make
3019          * sure that is what we will get.
3020          */
3021         if (offset > aux_offset)
3022                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3023                                                            offset, aux_offset & ~(alignment - 1));
3024
3025         /*
3026          * When using an X-tiled surface, the plane blows up
3027          * if the x offset + width exceed the stride.
3028          *
3029          * TODO: linear and Y-tiled seem fine, Yf untested,
3030          */
3031         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3032                 int cpp = fb->format->cpp[0];
3033
3034                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3035                         if (offset == 0) {
3036                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3037                                 return -EINVAL;
3038                         }
3039
3040                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3041                                                                    offset, offset - alignment);
3042                 }
3043         }
3044
3045         /*
3046          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3047          * they match with the main surface x/y offsets.
3048          */
3049         if (is_ccs_modifier(fb->modifier)) {
3050                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3051                         if (offset == 0)
3052                                 break;
3053
3054                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3055                                                                    offset, offset - alignment);
3056                 }
3057
3058                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3059                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3060                         return -EINVAL;
3061                 }
3062         }
3063
3064         plane_state->color_plane[0].offset = offset;
3065         plane_state->color_plane[0].x = x;
3066         plane_state->color_plane[0].y = y;
3067
3068         return 0;
3069 }
3070
3071 static int
3072 skl_check_nv12_surface(struct intel_plane_state *plane_state)
3073 {
3074         /* Display WA #1106 */
3075         if (plane_state->base.rotation !=
3076             (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
3077             plane_state->base.rotation != DRM_MODE_ROTATE_270)
3078                 return 0;
3079
3080         /*
3081          * src coordinates are rotated here.
3082          * We check height but report it as width
3083          */
3084         if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
3085                 DRM_DEBUG_KMS("src width must be multiple "
3086                               "of 4 for rotated NV12\n");
3087                 return -EINVAL;
3088         }
3089
3090         return 0;
3091 }
3092
3093 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3094 {
3095         const struct drm_framebuffer *fb = plane_state->base.fb;
3096         unsigned int rotation = plane_state->base.rotation;
3097         int max_width = skl_max_plane_width(fb, 1, rotation);
3098         int max_height = 4096;
3099         int x = plane_state->base.src.x1 >> 17;
3100         int y = plane_state->base.src.y1 >> 17;
3101         int w = drm_rect_width(&plane_state->base.src) >> 17;
3102         int h = drm_rect_height(&plane_state->base.src) >> 17;
3103         u32 offset;
3104
3105         intel_add_fb_offsets(&x, &y, plane_state, 1);
3106         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3107
3108         /* FIXME not quite sure how/if these apply to the chroma plane */
3109         if (w > max_width || h > max_height) {
3110                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3111                               w, h, max_width, max_height);
3112                 return -EINVAL;
3113         }
3114
3115         plane_state->color_plane[1].offset = offset;
3116         plane_state->color_plane[1].x = x;
3117         plane_state->color_plane[1].y = y;
3118
3119         return 0;
3120 }
3121
3122 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3123 {
3124         const struct drm_framebuffer *fb = plane_state->base.fb;
3125         int src_x = plane_state->base.src.x1 >> 16;
3126         int src_y = plane_state->base.src.y1 >> 16;
3127         int hsub = fb->format->hsub;
3128         int vsub = fb->format->vsub;
3129         int x = src_x / hsub;
3130         int y = src_y / vsub;
3131         u32 offset;
3132
3133         intel_add_fb_offsets(&x, &y, plane_state, 1);
3134         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3135
3136         plane_state->color_plane[1].offset = offset;
3137         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3138         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3139
3140         return 0;
3141 }
3142
3143 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3144 {
3145         const struct drm_framebuffer *fb = plane_state->base.fb;
3146         unsigned int rotation = plane_state->base.rotation;
3147         int ret;
3148
3149         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3150         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3151         plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3152
3153         if (!plane_state->base.visible)
3154                 return 0;
3155
3156         /* Rotate src coordinates to match rotated GTT view */
3157         if (drm_rotation_90_or_270(rotation))
3158                 drm_rect_rotate(&plane_state->base.src,
3159                                 fb->width << 16, fb->height << 16,
3160                                 DRM_MODE_ROTATE_270);
3161
3162         /*
3163          * Handle the AUX surface first since
3164          * the main surface setup depends on it.
3165          */
3166         if (fb->format->format == DRM_FORMAT_NV12) {
3167                 ret = skl_check_nv12_surface(plane_state);
3168                 if (ret)
3169                         return ret;
3170                 ret = skl_check_nv12_aux_surface(plane_state);
3171                 if (ret)
3172                         return ret;
3173         } else if (is_ccs_modifier(fb->modifier)) {
3174                 ret = skl_check_ccs_aux_surface(plane_state);
3175                 if (ret)
3176                         return ret;
3177         } else {
3178                 plane_state->color_plane[1].offset = ~0xfff;
3179                 plane_state->color_plane[1].x = 0;
3180                 plane_state->color_plane[1].y = 0;
3181         }
3182
3183         ret = skl_check_main_surface(plane_state);
3184         if (ret)
3185                 return ret;
3186
3187         return 0;
3188 }
3189
3190 unsigned int
3191 i9xx_plane_max_stride(struct intel_plane *plane,
3192                       u32 pixel_format, u64 modifier,
3193                       unsigned int rotation)
3194 {
3195         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3196
3197         if (!HAS_GMCH_DISPLAY(dev_priv)) {
3198                 return 32*1024;
3199         } else if (INTEL_GEN(dev_priv) >= 4) {
3200                 if (modifier == I915_FORMAT_MOD_X_TILED)
3201                         return 16*1024;
3202                 else
3203                         return 32*1024;
3204         } else if (INTEL_GEN(dev_priv) >= 3) {
3205                 if (modifier == I915_FORMAT_MOD_X_TILED)
3206                         return 8*1024;
3207                 else
3208                         return 16*1024;
3209         } else {
3210                 if (plane->i9xx_plane == PLANE_C)
3211                         return 4*1024;
3212                 else
3213                         return 8*1024;
3214         }
3215 }
3216
3217 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3218                           const struct intel_plane_state *plane_state)
3219 {
3220         struct drm_i915_private *dev_priv =
3221                 to_i915(plane_state->base.plane->dev);
3222         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3223         const struct drm_framebuffer *fb = plane_state->base.fb;
3224         unsigned int rotation = plane_state->base.rotation;
3225         u32 dspcntr;
3226
3227         dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
3228
3229         if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
3230             IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
3231                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3232
3233         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3234                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3235
3236         if (INTEL_GEN(dev_priv) < 5)
3237                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3238
3239         switch (fb->format->format) {
3240         case DRM_FORMAT_C8:
3241                 dspcntr |= DISPPLANE_8BPP;
3242                 break;
3243         case DRM_FORMAT_XRGB1555:
3244                 dspcntr |= DISPPLANE_BGRX555;
3245                 break;
3246         case DRM_FORMAT_RGB565:
3247                 dspcntr |= DISPPLANE_BGRX565;
3248                 break;
3249         case DRM_FORMAT_XRGB8888:
3250                 dspcntr |= DISPPLANE_BGRX888;
3251                 break;
3252         case DRM_FORMAT_XBGR8888:
3253                 dspcntr |= DISPPLANE_RGBX888;
3254                 break;
3255         case DRM_FORMAT_XRGB2101010:
3256                 dspcntr |= DISPPLANE_BGRX101010;
3257                 break;
3258         case DRM_FORMAT_XBGR2101010:
3259                 dspcntr |= DISPPLANE_RGBX101010;
3260                 break;
3261         default:
3262                 MISSING_CASE(fb->format->format);
3263                 return 0;
3264         }
3265
3266         if (INTEL_GEN(dev_priv) >= 4 &&
3267             fb->modifier == I915_FORMAT_MOD_X_TILED)
3268                 dspcntr |= DISPPLANE_TILED;
3269
3270         if (rotation & DRM_MODE_ROTATE_180)
3271                 dspcntr |= DISPPLANE_ROTATE_180;
3272
3273         if (rotation & DRM_MODE_REFLECT_X)
3274                 dspcntr |= DISPPLANE_MIRROR;
3275
3276         return dspcntr;
3277 }
3278
3279 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3280 {
3281         struct drm_i915_private *dev_priv =
3282                 to_i915(plane_state->base.plane->dev);
3283         const struct drm_framebuffer *fb = plane_state->base.fb;
3284         unsigned int rotation = plane_state->base.rotation;
3285         int src_x = plane_state->base.src.x1 >> 16;
3286         int src_y = plane_state->base.src.y1 >> 16;
3287         u32 offset;
3288
3289         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3290         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3291
3292         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3293
3294         if (INTEL_GEN(dev_priv) >= 4)
3295                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3296                                                             plane_state, 0);
3297         else
3298                 offset = 0;
3299
3300         /* HSW/BDW do this automagically in hardware */
3301         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3302                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3303                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3304
3305                 if (rotation & DRM_MODE_ROTATE_180) {
3306                         src_x += src_w - 1;
3307                         src_y += src_h - 1;
3308                 } else if (rotation & DRM_MODE_REFLECT_X) {
3309                         src_x += src_w - 1;
3310                 }
3311         }
3312
3313         plane_state->color_plane[0].offset = offset;
3314         plane_state->color_plane[0].x = src_x;
3315         plane_state->color_plane[0].y = src_y;
3316
3317         return 0;
3318 }
3319
3320 static int
3321 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3322                  struct intel_plane_state *plane_state)
3323 {
3324         int ret;
3325
3326         ret = chv_plane_check_rotation(plane_state);
3327         if (ret)
3328                 return ret;
3329
3330         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3331                                                   &crtc_state->base,
3332                                                   DRM_PLANE_HELPER_NO_SCALING,
3333                                                   DRM_PLANE_HELPER_NO_SCALING,
3334                                                   false, true);
3335         if (ret)
3336                 return ret;
3337
3338         if (!plane_state->base.visible)
3339                 return 0;
3340
3341         ret = intel_plane_check_src_coordinates(plane_state);
3342         if (ret)
3343                 return ret;
3344
3345         ret = i9xx_check_plane_surface(plane_state);
3346         if (ret)
3347                 return ret;
3348
3349         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3350
3351         return 0;
3352 }
3353
3354 static void i9xx_update_plane(struct intel_plane *plane,
3355                               const struct intel_crtc_state *crtc_state,
3356                               const struct intel_plane_state *plane_state)
3357 {
3358         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3359         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3360         u32 linear_offset;
3361         u32 dspcntr = plane_state->ctl;
3362         i915_reg_t reg = DSPCNTR(i9xx_plane);
3363         int x = plane_state->color_plane[0].x;
3364         int y = plane_state->color_plane[0].y;
3365         unsigned long irqflags;
3366         u32 dspaddr_offset;
3367
3368         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3369
3370         if (INTEL_GEN(dev_priv) >= 4)
3371                 dspaddr_offset = plane_state->color_plane[0].offset;
3372         else
3373                 dspaddr_offset = linear_offset;
3374
3375         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3376
3377         if (INTEL_GEN(dev_priv) < 4) {
3378                 /* pipesrc and dspsize control the size that is scaled from,
3379                  * which should always be the user's requested size.
3380                  */
3381                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3382                               ((crtc_state->pipe_src_h - 1) << 16) |
3383                               (crtc_state->pipe_src_w - 1));
3384                 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3385         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3386                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3387                               ((crtc_state->pipe_src_h - 1) << 16) |
3388                               (crtc_state->pipe_src_w - 1));
3389                 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3390                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3391         }
3392
3393         I915_WRITE_FW(reg, dspcntr);
3394
3395         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3396         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3397                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3398                               intel_plane_ggtt_offset(plane_state) +
3399                               dspaddr_offset);
3400                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3401         } else if (INTEL_GEN(dev_priv) >= 4) {
3402                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3403                               intel_plane_ggtt_offset(plane_state) +
3404                               dspaddr_offset);
3405                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3406                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3407         } else {
3408                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3409                               intel_plane_ggtt_offset(plane_state) +
3410                               dspaddr_offset);
3411         }
3412         POSTING_READ_FW(reg);
3413
3414         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3415 }
3416
3417 static void i9xx_disable_plane(struct intel_plane *plane,
3418                                struct intel_crtc *crtc)
3419 {
3420         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3421         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3422         unsigned long irqflags;
3423
3424         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3425
3426         I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
3427         if (INTEL_GEN(dev_priv) >= 4)
3428                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3429         else
3430                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3431         POSTING_READ_FW(DSPCNTR(i9xx_plane));
3432
3433         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3434 }
3435
3436 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3437                                     enum pipe *pipe)
3438 {
3439         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3440         enum intel_display_power_domain power_domain;
3441         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3442         bool ret;
3443         u32 val;
3444
3445         /*
3446          * Not 100% correct for planes that can move between pipes,
3447          * but that's only the case for gen2-4 which don't have any
3448          * display power wells.
3449          */
3450         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3451         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3452                 return false;
3453
3454         val = I915_READ(DSPCNTR(i9xx_plane));
3455
3456         ret = val & DISPLAY_PLANE_ENABLE;
3457
3458         if (INTEL_GEN(dev_priv) >= 5)
3459                 *pipe = plane->pipe;
3460         else
3461                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3462                         DISPPLANE_SEL_PIPE_SHIFT;
3463
3464         intel_display_power_put(dev_priv, power_domain);
3465
3466         return ret;
3467 }
3468
3469 static u32
3470 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
3471 {
3472         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3473                 return 64;
3474         else
3475                 return intel_tile_width_bytes(fb, color_plane);
3476 }
3477
3478 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3479 {
3480         struct drm_device *dev = intel_crtc->base.dev;
3481         struct drm_i915_private *dev_priv = to_i915(dev);
3482
3483         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3484         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3485         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3486 }
3487
3488 /*
3489  * This function detaches (aka. unbinds) unused scalers in hardware
3490  */
3491 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
3492 {
3493         struct intel_crtc_scaler_state *scaler_state;
3494         int i;
3495
3496         scaler_state = &intel_crtc->config->scaler_state;
3497
3498         /* loop through and disable scalers that aren't in use */
3499         for (i = 0; i < intel_crtc->num_scalers; i++) {
3500                 if (!scaler_state->scalers[i].in_use)
3501                         skl_detach_scaler(intel_crtc, i);
3502         }
3503 }
3504
3505 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3506                      int color_plane)
3507 {
3508         const struct drm_framebuffer *fb = plane_state->base.fb;
3509         unsigned int rotation = plane_state->base.rotation;
3510         u32 stride = plane_state->color_plane[color_plane].stride;
3511
3512         if (color_plane >= fb->format->num_planes)
3513                 return 0;
3514
3515         /*
3516          * The stride is either expressed as a multiple of 64 bytes chunks for
3517          * linear buffers or in number of tiles for tiled buffers.
3518          */
3519         if (drm_rotation_90_or_270(rotation))
3520                 stride /= intel_tile_height(fb, color_plane);
3521         else
3522                 stride /= intel_fb_stride_alignment(fb, color_plane);
3523
3524         return stride;
3525 }
3526
3527 static u32 skl_plane_ctl_format(uint32_t pixel_format)
3528 {
3529         switch (pixel_format) {
3530         case DRM_FORMAT_C8:
3531                 return PLANE_CTL_FORMAT_INDEXED;
3532         case DRM_FORMAT_RGB565:
3533                 return PLANE_CTL_FORMAT_RGB_565;
3534         case DRM_FORMAT_XBGR8888:
3535         case DRM_FORMAT_ABGR8888:
3536                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3537         case DRM_FORMAT_XRGB8888:
3538         case DRM_FORMAT_ARGB8888:
3539                 return PLANE_CTL_FORMAT_XRGB_8888;
3540         case DRM_FORMAT_XRGB2101010:
3541                 return PLANE_CTL_FORMAT_XRGB_2101010;
3542         case DRM_FORMAT_XBGR2101010:
3543                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3544         case DRM_FORMAT_YUYV:
3545                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3546         case DRM_FORMAT_YVYU:
3547                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3548         case DRM_FORMAT_UYVY:
3549                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3550         case DRM_FORMAT_VYUY:
3551                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3552         case DRM_FORMAT_NV12:
3553                 return PLANE_CTL_FORMAT_NV12;
3554         default:
3555                 MISSING_CASE(pixel_format);
3556         }
3557
3558         return 0;
3559 }
3560
3561 /*
3562  * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3563  * to be already pre-multiplied. We need to add a knob (or a different
3564  * DRM_FORMAT) for user-space to configure that.
3565  */
3566 static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
3567 {
3568         switch (pixel_format) {
3569         case DRM_FORMAT_ABGR8888:
3570         case DRM_FORMAT_ARGB8888:
3571                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3572         default:
3573                 return PLANE_CTL_ALPHA_DISABLE;
3574         }
3575 }
3576
3577 static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
3578 {
3579         switch (pixel_format) {
3580         case DRM_FORMAT_ABGR8888:
3581         case DRM_FORMAT_ARGB8888:
3582                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3583         default:
3584                 return PLANE_COLOR_ALPHA_DISABLE;
3585         }
3586 }
3587
3588 static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3589 {
3590         switch (fb_modifier) {
3591         case DRM_FORMAT_MOD_LINEAR:
3592                 break;
3593         case I915_FORMAT_MOD_X_TILED:
3594                 return PLANE_CTL_TILED_X;
3595         case I915_FORMAT_MOD_Y_TILED:
3596                 return PLANE_CTL_TILED_Y;
3597         case I915_FORMAT_MOD_Y_TILED_CCS:
3598                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3599         case I915_FORMAT_MOD_Yf_TILED:
3600                 return PLANE_CTL_TILED_YF;
3601         case I915_FORMAT_MOD_Yf_TILED_CCS:
3602                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3603         default:
3604                 MISSING_CASE(fb_modifier);
3605         }
3606
3607         return 0;
3608 }
3609
3610 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3611 {
3612         switch (rotate) {
3613         case DRM_MODE_ROTATE_0:
3614                 break;
3615         /*
3616          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3617          * while i915 HW rotation is clockwise, thats why this swapping.
3618          */
3619         case DRM_MODE_ROTATE_90:
3620                 return PLANE_CTL_ROTATE_270;
3621         case DRM_MODE_ROTATE_180:
3622                 return PLANE_CTL_ROTATE_180;
3623         case DRM_MODE_ROTATE_270:
3624                 return PLANE_CTL_ROTATE_90;
3625         default:
3626                 MISSING_CASE(rotate);
3627         }
3628
3629         return 0;
3630 }
3631
3632 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3633 {
3634         switch (reflect) {
3635         case 0:
3636                 break;
3637         case DRM_MODE_REFLECT_X:
3638                 return PLANE_CTL_FLIP_HORIZONTAL;
3639         case DRM_MODE_REFLECT_Y:
3640         default:
3641                 MISSING_CASE(reflect);
3642         }
3643
3644         return 0;
3645 }
3646
3647 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3648                   const struct intel_plane_state *plane_state)
3649 {
3650         struct drm_i915_private *dev_priv =
3651                 to_i915(plane_state->base.plane->dev);
3652         const struct drm_framebuffer *fb = plane_state->base.fb;
3653         unsigned int rotation = plane_state->base.rotation;
3654         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3655         u32 plane_ctl;
3656
3657         plane_ctl = PLANE_CTL_ENABLE;
3658
3659         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3660                 plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
3661                 plane_ctl |=
3662                         PLANE_CTL_PIPE_GAMMA_ENABLE |
3663                         PLANE_CTL_PIPE_CSC_ENABLE |
3664                         PLANE_CTL_PLANE_GAMMA_DISABLE;
3665
3666                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3667                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3668
3669                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3670                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3671         }
3672
3673         plane_ctl |= skl_plane_ctl_format(fb->format->format);
3674         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3675         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3676
3677         if (INTEL_GEN(dev_priv) >= 10)
3678                 plane_ctl |= cnl_plane_ctl_flip(rotation &
3679                                                 DRM_MODE_REFLECT_MASK);
3680
3681         if (key->flags & I915_SET_COLORKEY_DESTINATION)
3682                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3683         else if (key->flags & I915_SET_COLORKEY_SOURCE)
3684                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3685
3686         return plane_ctl;
3687 }
3688
3689 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3690                         const struct intel_plane_state *plane_state)
3691 {
3692         struct drm_i915_private *dev_priv =
3693                 to_i915(plane_state->base.plane->dev);
3694         const struct drm_framebuffer *fb = plane_state->base.fb;
3695         u32 plane_color_ctl = 0;
3696
3697         if (INTEL_GEN(dev_priv) < 11) {
3698                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3699                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3700         }
3701         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3702         plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
3703
3704         if (fb->format->is_yuv) {
3705                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3706                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3707                 else
3708                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3709
3710                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3711                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3712         }
3713
3714         return plane_color_ctl;
3715 }
3716
3717 static int
3718 __intel_display_resume(struct drm_device *dev,
3719                        struct drm_atomic_state *state,
3720                        struct drm_modeset_acquire_ctx *ctx)
3721 {
3722         struct drm_crtc_state *crtc_state;
3723         struct drm_crtc *crtc;
3724         int i, ret;
3725
3726         intel_modeset_setup_hw_state(dev, ctx);
3727         i915_redisable_vga(to_i915(dev));
3728
3729         if (!state)
3730                 return 0;
3731
3732         /*
3733          * We've duplicated the state, pointers to the old state are invalid.
3734          *
3735          * Don't attempt to use the old state until we commit the duplicated state.
3736          */
3737         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3738                 /*
3739                  * Force recalculation even if we restore
3740                  * current state. With fast modeset this may not result
3741                  * in a modeset when the state is compatible.
3742                  */
3743                 crtc_state->mode_changed = true;
3744         }
3745
3746         /* ignore any reset values/BIOS leftovers in the WM registers */
3747         if (!HAS_GMCH_DISPLAY(to_i915(dev)))
3748                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3749
3750         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3751
3752         WARN_ON(ret == -EDEADLK);
3753         return ret;
3754 }
3755
3756 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3757 {
3758         return intel_has_gpu_reset(dev_priv) &&
3759                 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
3760 }
3761
3762 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3763 {
3764         struct drm_device *dev = &dev_priv->drm;
3765         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3766         struct drm_atomic_state *state;
3767         int ret;
3768
3769         /* reset doesn't touch the display */
3770         if (!i915_modparams.force_reset_modeset_test &&
3771             !gpu_reset_clobbers_display(dev_priv))
3772                 return;
3773
3774         /* We have a modeset vs reset deadlock, defensively unbreak it. */
3775         set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3776         wake_up_all(&dev_priv->gpu_error.wait_queue);
3777
3778         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3779                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3780                 i915_gem_set_wedged(dev_priv);
3781         }
3782
3783         /*
3784          * Need mode_config.mutex so that we don't
3785          * trample ongoing ->detect() and whatnot.
3786          */
3787         mutex_lock(&dev->mode_config.mutex);
3788         drm_modeset_acquire_init(ctx, 0);
3789         while (1) {
3790                 ret = drm_modeset_lock_all_ctx(dev, ctx);
3791                 if (ret != -EDEADLK)
3792                         break;
3793
3794                 drm_modeset_backoff(ctx);
3795         }
3796         /*
3797          * Disabling the crtcs gracefully seems nicer. Also the
3798          * g33 docs say we should at least disable all the planes.
3799          */
3800         state = drm_atomic_helper_duplicate_state(dev, ctx);
3801         if (IS_ERR(state)) {
3802                 ret = PTR_ERR(state);
3803                 DRM_ERROR("Duplicating state failed with %i\n", ret);
3804                 return;
3805         }
3806
3807         ret = drm_atomic_helper_disable_all(dev, ctx);
3808         if (ret) {
3809                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3810                 drm_atomic_state_put(state);
3811                 return;
3812         }
3813
3814         dev_priv->modeset_restore_state = state;
3815         state->acquire_ctx = ctx;
3816 }
3817
3818 void intel_finish_reset(struct drm_i915_private *dev_priv)
3819 {
3820         struct drm_device *dev = &dev_priv->drm;
3821         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3822         struct drm_atomic_state *state;
3823         int ret;
3824
3825         /* reset doesn't touch the display */
3826         if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
3827                 return;
3828
3829         state = fetch_and_zero(&dev_priv->modeset_restore_state);
3830         if (!state)
3831                 goto unlock;
3832
3833         /* reset doesn't touch the display */
3834         if (!gpu_reset_clobbers_display(dev_priv)) {
3835                 /* for testing only restore the display */
3836                 ret = __intel_display_resume(dev, state, ctx);
3837                 if (ret)
3838                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3839         } else {
3840                 /*
3841                  * The display has been reset as well,
3842                  * so need a full re-initialization.
3843                  */
3844                 intel_runtime_pm_disable_interrupts(dev_priv);
3845                 intel_runtime_pm_enable_interrupts(dev_priv);
3846
3847                 intel_pps_unlock_regs_wa(dev_priv);
3848                 intel_modeset_init_hw(dev);
3849                 intel_init_clock_gating(dev_priv);
3850
3851                 spin_lock_irq(&dev_priv->irq_lock);
3852                 if (dev_priv->display.hpd_irq_setup)
3853                         dev_priv->display.hpd_irq_setup(dev_priv);
3854                 spin_unlock_irq(&dev_priv->irq_lock);
3855
3856                 ret = __intel_display_resume(dev, state, ctx);
3857                 if (ret)
3858                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3859
3860                 intel_hpd_init(dev_priv);
3861         }
3862
3863         drm_atomic_state_put(state);
3864 unlock:
3865         drm_modeset_drop_locks(ctx);
3866         drm_modeset_acquire_fini(ctx);
3867         mutex_unlock(&dev->mode_config.mutex);
3868
3869         clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3870 }
3871
3872 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
3873                                      const struct intel_crtc_state *new_crtc_state)
3874 {
3875         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
3876         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3877
3878         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3879         crtc->base.mode = new_crtc_state->base.mode;
3880
3881         /*
3882          * Update pipe size and adjust fitter if needed: the reason for this is
3883          * that in compute_mode_changes we check the native mode (not the pfit
3884          * mode) to see if we can flip rather than do a full mode set. In the
3885          * fastboot case, we'll flip, but if we don't update the pipesrc and
3886          * pfit state, we'll end up with a big fb scanned out into the wrong
3887          * sized surface.
3888          */
3889
3890         I915_WRITE(PIPESRC(crtc->pipe),
3891                    ((new_crtc_state->pipe_src_w - 1) << 16) |
3892                    (new_crtc_state->pipe_src_h - 1));
3893
3894         /* on skylake this is done by detaching scalers */
3895         if (INTEL_GEN(dev_priv) >= 9) {
3896                 skl_detach_scalers(crtc);
3897
3898                 if (new_crtc_state->pch_pfit.enabled)
3899                         skylake_pfit_enable(crtc);
3900         } else if (HAS_PCH_SPLIT(dev_priv)) {
3901                 if (new_crtc_state->pch_pfit.enabled)
3902                         ironlake_pfit_enable(crtc);
3903                 else if (old_crtc_state->pch_pfit.enabled)
3904                         ironlake_pfit_disable(crtc, true);
3905         }
3906 }
3907
3908 static void intel_fdi_normal_train(struct intel_crtc *crtc)
3909 {
3910         struct drm_device *dev = crtc->base.dev;
3911         struct drm_i915_private *dev_priv = to_i915(dev);
3912         int pipe = crtc->pipe;
3913         i915_reg_t reg;
3914         u32 temp;
3915
3916         /* enable normal train */
3917         reg = FDI_TX_CTL(pipe);
3918         temp = I915_READ(reg);
3919         if (IS_IVYBRIDGE(dev_priv)) {
3920                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3921                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3922         } else {
3923                 temp &= ~FDI_LINK_TRAIN_NONE;
3924                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3925         }
3926         I915_WRITE(reg, temp);
3927
3928         reg = FDI_RX_CTL(pipe);
3929         temp = I915_READ(reg);
3930         if (HAS_PCH_CPT(dev_priv)) {
3931                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3932                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3933         } else {
3934                 temp &= ~FDI_LINK_TRAIN_NONE;
3935                 temp |= FDI_LINK_TRAIN_NONE;
3936         }
3937         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3938
3939         /* wait one idle pattern time */
3940         POSTING_READ(reg);
3941         udelay(1000);
3942
3943         /* IVB wants error correction enabled */
3944         if (IS_IVYBRIDGE(dev_priv))
3945                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3946                            FDI_FE_ERRC_ENABLE);
3947 }
3948
3949 /* The FDI link training functions for ILK/Ibexpeak. */
3950 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
3951                                     const struct intel_crtc_state *crtc_state)
3952 {
3953         struct drm_device *dev = crtc->base.dev;
3954         struct drm_i915_private *dev_priv = to_i915(dev);
3955         int pipe = crtc->pipe;
3956         i915_reg_t reg;
3957         u32 temp, tries;
3958
3959         /* FDI needs bits from pipe first */
3960         assert_pipe_enabled(dev_priv, pipe);
3961
3962         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3963            for train result */
3964         reg = FDI_RX_IMR(pipe);
3965         temp = I915_READ(reg);
3966         temp &= ~FDI_RX_SYMBOL_LOCK;
3967         temp &= ~FDI_RX_BIT_LOCK;
3968         I915_WRITE(reg, temp);
3969         I915_READ(reg);
3970         udelay(150);
3971
3972         /* enable CPU FDI TX and PCH FDI RX */
3973         reg = FDI_TX_CTL(pipe);
3974         temp = I915_READ(reg);
3975         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3976         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
3977         temp &= ~FDI_LINK_TRAIN_NONE;
3978         temp |= FDI_LINK_TRAIN_PATTERN_1;
3979         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3980
3981         reg = FDI_RX_CTL(pipe);
3982         temp = I915_READ(reg);
3983         temp &= ~FDI_LINK_TRAIN_NONE;
3984         temp |= FDI_LINK_TRAIN_PATTERN_1;
3985         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3986
3987         POSTING_READ(reg);
3988         udelay(150);
3989
3990         /* Ironlake workaround, enable clock pointer after FDI enable*/
3991         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3992         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3993                    FDI_RX_PHASE_SYNC_POINTER_EN);
3994
3995         reg = FDI_RX_IIR(pipe);
3996         for (tries = 0; tries < 5; tries++) {
3997                 temp = I915_READ(reg);
3998                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3999
4000                 if ((temp & FDI_RX_BIT_LOCK)) {
4001                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4002                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4003                         break;
4004                 }
4005         }
4006         if (tries == 5)
4007                 DRM_ERROR("FDI train 1 fail!\n");
4008
4009         /* Train 2 */
4010         reg = FDI_TX_CTL(pipe);
4011         temp = I915_READ(reg);
4012         temp &= ~FDI_LINK_TRAIN_NONE;
4013         temp |= FDI_LINK_TRAIN_PATTERN_2;
4014         I915_WRITE(reg, temp);
4015
4016         reg = FDI_RX_CTL(pipe);
4017         temp = I915_READ(reg);
4018         temp &= ~FDI_LINK_TRAIN_NONE;
4019         temp |= FDI_LINK_TRAIN_PATTERN_2;
4020         I915_WRITE(reg, temp);
4021
4022         POSTING_READ(reg);
4023         udelay(150);
4024
4025         reg = FDI_RX_IIR(pipe);
4026         for (tries = 0; tries < 5; tries++) {
4027                 temp = I915_READ(reg);
4028                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4029
4030                 if (temp & FDI_RX_SYMBOL_LOCK) {
4031                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4032                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4033                         break;
4034                 }
4035         }
4036         if (tries == 5)
4037                 DRM_ERROR("FDI train 2 fail!\n");
4038
4039         DRM_DEBUG_KMS("FDI train done\n");
4040
4041 }
4042
4043 static const int snb_b_fdi_train_param[] = {
4044         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4045         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4046         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4047         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4048 };
4049
4050 /* The FDI link training functions for SNB/Cougarpoint. */
4051 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4052                                 const struct intel_crtc_state *crtc_state)
4053 {
4054         struct drm_device *dev = crtc->base.dev;
4055         struct drm_i915_private *dev_priv = to_i915(dev);
4056         int pipe = crtc->pipe;
4057         i915_reg_t reg;
4058         u32 temp, i, retry;
4059
4060         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4061            for train result */
4062         reg = FDI_RX_IMR(pipe);
4063         temp = I915_READ(reg);
4064         temp &= ~FDI_RX_SYMBOL_LOCK;
4065         temp &= ~FDI_RX_BIT_LOCK;
4066         I915_WRITE(reg, temp);
4067
4068         POSTING_READ(reg);
4069         udelay(150);
4070
4071         /* enable CPU FDI TX and PCH FDI RX */
4072         reg = FDI_TX_CTL(pipe);
4073         temp = I915_READ(reg);
4074         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4075         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4076         temp &= ~FDI_LINK_TRAIN_NONE;
4077         temp |= FDI_LINK_TRAIN_PATTERN_1;
4078         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4079         /* SNB-B */
4080         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4081         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4082
4083         I915_WRITE(FDI_RX_MISC(pipe),
4084                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4085
4086         reg = FDI_RX_CTL(pipe);
4087         temp = I915_READ(reg);
4088         if (HAS_PCH_CPT(dev_priv)) {
4089                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4090                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4091         } else {
4092                 temp &= ~FDI_LINK_TRAIN_NONE;
4093                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4094         }
4095         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4096
4097         POSTING_READ(reg);
4098         udelay(150);
4099
4100         for (i = 0; i < 4; i++) {
4101                 reg = FDI_TX_CTL(pipe);
4102                 temp = I915_READ(reg);
4103                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4104                 temp |= snb_b_fdi_train_param[i];
4105                 I915_WRITE(reg, temp);
4106
4107                 POSTING_READ(reg);
4108                 udelay(500);
4109
4110                 for (retry = 0; retry < 5; retry++) {
4111                         reg = FDI_RX_IIR(pipe);
4112                         temp = I915_READ(reg);
4113                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4114                         if (temp & FDI_RX_BIT_LOCK) {
4115                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4116                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4117                                 break;
4118                         }
4119                         udelay(50);
4120                 }
4121                 if (retry < 5)
4122                         break;
4123         }
4124         if (i == 4)
4125                 DRM_ERROR("FDI train 1 fail!\n");
4126
4127         /* Train 2 */
4128         reg = FDI_TX_CTL(pipe);
4129         temp = I915_READ(reg);
4130         temp &= ~FDI_LINK_TRAIN_NONE;
4131         temp |= FDI_LINK_TRAIN_PATTERN_2;
4132         if (IS_GEN6(dev_priv)) {
4133                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4134                 /* SNB-B */
4135                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4136         }
4137         I915_WRITE(reg, temp);
4138
4139         reg = FDI_RX_CTL(pipe);
4140         temp = I915_READ(reg);
4141         if (HAS_PCH_CPT(dev_priv)) {
4142                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4143                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4144         } else {
4145                 temp &= ~FDI_LINK_TRAIN_NONE;
4146                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4147         }
4148         I915_WRITE(reg, temp);
4149
4150         POSTING_READ(reg);
4151         udelay(150);
4152
4153         for (i = 0; i < 4; i++) {
4154                 reg = FDI_TX_CTL(pipe);
4155                 temp = I915_READ(reg);
4156                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4157                 temp |= snb_b_fdi_train_param[i];
4158                 I915_WRITE(reg, temp);
4159
4160                 POSTING_READ(reg);
4161                 udelay(500);
4162
4163                 for (retry = 0; retry < 5; retry++) {
4164                         reg = FDI_RX_IIR(pipe);
4165                         temp = I915_READ(reg);
4166                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4167                         if (temp & FDI_RX_SYMBOL_LOCK) {
4168                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4169                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4170                                 break;
4171                         }
4172                         udelay(50);
4173                 }
4174                 if (retry < 5)
4175                         break;
4176         }
4177         if (i == 4)
4178                 DRM_ERROR("FDI train 2 fail!\n");
4179
4180         DRM_DEBUG_KMS("FDI train done.\n");
4181 }
4182
4183 /* Manual link training for Ivy Bridge A0 parts */
4184 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4185                                       const struct intel_crtc_state *crtc_state)
4186 {
4187         struct drm_device *dev = crtc->base.dev;
4188         struct drm_i915_private *dev_priv = to_i915(dev);
4189         int pipe = crtc->pipe;
4190         i915_reg_t reg;
4191         u32 temp, i, j;
4192
4193         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4194            for train result */
4195         reg = FDI_RX_IMR(pipe);
4196         temp = I915_READ(reg);
4197         temp &= ~FDI_RX_SYMBOL_LOCK;
4198         temp &= ~FDI_RX_BIT_LOCK;
4199         I915_WRITE(reg, temp);
4200
4201         POSTING_READ(reg);
4202         udelay(150);
4203
4204         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4205                       I915_READ(FDI_RX_IIR(pipe)));
4206
4207         /* Try each vswing and preemphasis setting twice before moving on */
4208         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4209                 /* disable first in case we need to retry */
4210                 reg = FDI_TX_CTL(pipe);
4211                 temp = I915_READ(reg);
4212                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4213                 temp &= ~FDI_TX_ENABLE;
4214                 I915_WRITE(reg, temp);
4215
4216                 reg = FDI_RX_CTL(pipe);
4217                 temp = I915_READ(reg);
4218                 temp &= ~FDI_LINK_TRAIN_AUTO;
4219                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4220                 temp &= ~FDI_RX_ENABLE;
4221                 I915_WRITE(reg, temp);
4222
4223                 /* enable CPU FDI TX and PCH FDI RX */
4224                 reg = FDI_TX_CTL(pipe);
4225                 temp = I915_READ(reg);
4226                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4227                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4228                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4229                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4230                 temp |= snb_b_fdi_train_param[j/2];
4231                 temp |= FDI_COMPOSITE_SYNC;
4232                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4233
4234                 I915_WRITE(FDI_RX_MISC(pipe),
4235                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4236
4237                 reg = FDI_RX_CTL(pipe);
4238                 temp = I915_READ(reg);
4239                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4240                 temp |= FDI_COMPOSITE_SYNC;
4241                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4242
4243                 POSTING_READ(reg);
4244                 udelay(1); /* should be 0.5us */
4245
4246                 for (i = 0; i < 4; i++) {
4247                         reg = FDI_RX_IIR(pipe);
4248                         temp = I915_READ(reg);
4249                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4250
4251                         if (temp & FDI_RX_BIT_LOCK ||
4252                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4253                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4254                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4255                                               i);
4256                                 break;
4257                         }
4258                         udelay(1); /* should be 0.5us */
4259                 }
4260                 if (i == 4) {
4261                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4262                         continue;
4263                 }
4264
4265                 /* Train 2 */
4266                 reg = FDI_TX_CTL(pipe);
4267                 temp = I915_READ(reg);
4268                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4269                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4270                 I915_WRITE(reg, temp);
4271
4272                 reg = FDI_RX_CTL(pipe);
4273                 temp = I915_READ(reg);
4274                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4275                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4276                 I915_WRITE(reg, temp);
4277
4278                 POSTING_READ(reg);
4279                 udelay(2); /* should be 1.5us */
4280
4281                 for (i = 0; i < 4; i++) {
4282                         reg = FDI_RX_IIR(pipe);
4283                         temp = I915_READ(reg);
4284                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4285
4286                         if (temp & FDI_RX_SYMBOL_LOCK ||
4287                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4288                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4289                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4290                                               i);
4291                                 goto train_done;
4292                         }
4293                         udelay(2); /* should be 1.5us */
4294                 }
4295                 if (i == 4)
4296                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4297         }
4298
4299 train_done:
4300         DRM_DEBUG_KMS("FDI train done.\n");
4301 }
4302
4303 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
4304 {
4305         struct drm_device *dev = intel_crtc->base.dev;
4306         struct drm_i915_private *dev_priv = to_i915(dev);
4307         int pipe = intel_crtc->pipe;
4308         i915_reg_t reg;
4309         u32 temp;
4310
4311         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4312         reg = FDI_RX_CTL(pipe);
4313         temp = I915_READ(reg);
4314         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4315         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
4316         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4317         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4318
4319         POSTING_READ(reg);
4320         udelay(200);
4321
4322         /* Switch from Rawclk to PCDclk */
4323         temp = I915_READ(reg);
4324         I915_WRITE(reg, temp | FDI_PCDCLK);
4325
4326         POSTING_READ(reg);
4327         udelay(200);
4328
4329         /* Enable CPU FDI TX PLL, always on for Ironlake */
4330         reg = FDI_TX_CTL(pipe);
4331         temp = I915_READ(reg);
4332         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4333                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4334
4335                 POSTING_READ(reg);
4336                 udelay(100);
4337         }
4338 }
4339
4340 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4341 {
4342         struct drm_device *dev = intel_crtc->base.dev;
4343         struct drm_i915_private *dev_priv = to_i915(dev);
4344         int pipe = intel_crtc->pipe;
4345         i915_reg_t reg;
4346         u32 temp;
4347
4348         /* Switch from PCDclk to Rawclk */
4349         reg = FDI_RX_CTL(pipe);
4350         temp = I915_READ(reg);
4351         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4352
4353         /* Disable CPU FDI TX PLL */
4354         reg = FDI_TX_CTL(pipe);
4355         temp = I915_READ(reg);
4356         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4357
4358         POSTING_READ(reg);
4359         udelay(100);
4360
4361         reg = FDI_RX_CTL(pipe);
4362         temp = I915_READ(reg);
4363         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4364
4365         /* Wait for the clocks to turn off. */
4366         POSTING_READ(reg);
4367         udelay(100);
4368 }
4369
4370 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4371 {
4372         struct drm_device *dev = crtc->dev;
4373         struct drm_i915_private *dev_priv = to_i915(dev);
4374         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4375         int pipe = intel_crtc->pipe;
4376         i915_reg_t reg;
4377         u32 temp;
4378
4379         /* disable CPU FDI tx and PCH FDI rx */
4380         reg = FDI_TX_CTL(pipe);
4381         temp = I915_READ(reg);
4382         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4383         POSTING_READ(reg);
4384
4385         reg = FDI_RX_CTL(pipe);
4386         temp = I915_READ(reg);
4387         temp &= ~(0x7 << 16);
4388         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4389         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4390
4391         POSTING_READ(reg);
4392         udelay(100);
4393
4394         /* Ironlake workaround, disable clock pointer after downing FDI */
4395         if (HAS_PCH_IBX(dev_priv))
4396                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4397
4398         /* still set train pattern 1 */
4399         reg = FDI_TX_CTL(pipe);
4400         temp = I915_READ(reg);
4401         temp &= ~FDI_LINK_TRAIN_NONE;
4402         temp |= FDI_LINK_TRAIN_PATTERN_1;
4403         I915_WRITE(reg, temp);
4404
4405         reg = FDI_RX_CTL(pipe);
4406         temp = I915_READ(reg);
4407         if (HAS_PCH_CPT(dev_priv)) {
4408                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4409                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4410         } else {
4411                 temp &= ~FDI_LINK_TRAIN_NONE;
4412                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4413         }
4414         /* BPC in FDI rx is consistent with that in PIPECONF */
4415         temp &= ~(0x07 << 16);
4416         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4417         I915_WRITE(reg, temp);
4418
4419         POSTING_READ(reg);
4420         udelay(100);
4421 }
4422
4423 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4424 {
4425         struct drm_crtc *crtc;
4426         bool cleanup_done;
4427
4428         drm_for_each_crtc(crtc, &dev_priv->drm) {
4429                 struct drm_crtc_commit *commit;
4430                 spin_lock(&crtc->commit_lock);
4431                 commit = list_first_entry_or_null(&crtc->commit_list,
4432                                                   struct drm_crtc_commit, commit_entry);
4433                 cleanup_done = commit ?
4434                         try_wait_for_completion(&commit->cleanup_done) : true;
4435                 spin_unlock(&crtc->commit_lock);
4436
4437                 if (cleanup_done)
4438                         continue;
4439
4440                 drm_crtc_wait_one_vblank(crtc);
4441
4442                 return true;
4443         }
4444
4445         return false;
4446 }
4447
4448 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4449 {
4450         u32 temp;
4451
4452         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4453
4454         mutex_lock(&dev_priv->sb_lock);
4455
4456         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4457         temp |= SBI_SSCCTL_DISABLE;
4458         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4459
4460         mutex_unlock(&dev_priv->sb_lock);
4461 }
4462
4463 /* Program iCLKIP clock to the desired frequency */
4464 static void lpt_program_iclkip(struct intel_crtc *crtc)
4465 {
4466         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4467         int clock = crtc->config->base.adjusted_mode.crtc_clock;
4468         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4469         u32 temp;
4470
4471         lpt_disable_iclkip(dev_priv);
4472
4473         /* The iCLK virtual clock root frequency is in MHz,
4474          * but the adjusted_mode->crtc_clock in in KHz. To get the
4475          * divisors, it is necessary to divide one by another, so we
4476          * convert the virtual clock precision to KHz here for higher
4477          * precision.
4478          */
4479         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4480                 u32 iclk_virtual_root_freq = 172800 * 1000;
4481                 u32 iclk_pi_range = 64;
4482                 u32 desired_divisor;
4483
4484                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4485                                                     clock << auxdiv);
4486                 divsel = (desired_divisor / iclk_pi_range) - 2;
4487                 phaseinc = desired_divisor % iclk_pi_range;
4488
4489                 /*
4490                  * Near 20MHz is a corner case which is
4491                  * out of range for the 7-bit divisor
4492                  */
4493                 if (divsel <= 0x7f)
4494                         break;
4495         }
4496
4497         /* This should not happen with any sane values */
4498         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4499                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4500         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4501                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4502
4503         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4504                         clock,
4505                         auxdiv,
4506                         divsel,
4507                         phasedir,
4508                         phaseinc);
4509
4510         mutex_lock(&dev_priv->sb_lock);
4511
4512         /* Program SSCDIVINTPHASE6 */
4513         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4514         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4515         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4516         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4517         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4518         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4519         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4520         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4521
4522         /* Program SSCAUXDIV */
4523         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4524         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4525         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4526         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4527
4528         /* Enable modulator and associated divider */
4529         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4530         temp &= ~SBI_SSCCTL_DISABLE;
4531         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4532
4533         mutex_unlock(&dev_priv->sb_lock);
4534
4535         /* Wait for initialization time */
4536         udelay(24);
4537
4538         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4539 }
4540
4541 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4542 {
4543         u32 divsel, phaseinc, auxdiv;
4544         u32 iclk_virtual_root_freq = 172800 * 1000;
4545         u32 iclk_pi_range = 64;
4546         u32 desired_divisor;
4547         u32 temp;
4548
4549         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4550                 return 0;
4551
4552         mutex_lock(&dev_priv->sb_lock);
4553
4554         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4555         if (temp & SBI_SSCCTL_DISABLE) {
4556                 mutex_unlock(&dev_priv->sb_lock);
4557                 return 0;
4558         }
4559
4560         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4561         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4562                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4563         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4564                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4565
4566         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4567         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4568                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4569
4570         mutex_unlock(&dev_priv->sb_lock);
4571
4572         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4573
4574         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4575                                  desired_divisor << auxdiv);
4576 }
4577
4578 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4579                                                 enum pipe pch_transcoder)
4580 {
4581         struct drm_device *dev = crtc->base.dev;
4582         struct drm_i915_private *dev_priv = to_i915(dev);
4583         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4584
4585         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4586                    I915_READ(HTOTAL(cpu_transcoder)));
4587         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4588                    I915_READ(HBLANK(cpu_transcoder)));
4589         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4590                    I915_READ(HSYNC(cpu_transcoder)));
4591
4592         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4593                    I915_READ(VTOTAL(cpu_transcoder)));
4594         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4595                    I915_READ(VBLANK(cpu_transcoder)));
4596         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4597                    I915_READ(VSYNC(cpu_transcoder)));
4598         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4599                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4600 }
4601
4602 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4603 {
4604         struct drm_i915_private *dev_priv = to_i915(dev);
4605         uint32_t temp;
4606
4607         temp = I915_READ(SOUTH_CHICKEN1);
4608         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4609                 return;
4610
4611         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4612         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4613
4614         temp &= ~FDI_BC_BIFURCATION_SELECT;
4615         if (enable)
4616                 temp |= FDI_BC_BIFURCATION_SELECT;
4617
4618         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4619         I915_WRITE(SOUTH_CHICKEN1, temp);
4620         POSTING_READ(SOUTH_CHICKEN1);
4621 }
4622
4623 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4624 {
4625         struct drm_device *dev = intel_crtc->base.dev;
4626
4627         switch (intel_crtc->pipe) {
4628         case PIPE_A:
4629                 break;
4630         case PIPE_B:
4631                 if (intel_crtc->config->fdi_lanes > 2)
4632                         cpt_set_fdi_bc_bifurcation(dev, false);
4633                 else
4634                         cpt_set_fdi_bc_bifurcation(dev, true);
4635
4636                 break;
4637         case PIPE_C:
4638                 cpt_set_fdi_bc_bifurcation(dev, true);
4639
4640                 break;
4641         default:
4642                 BUG();
4643         }
4644 }
4645
4646 /*
4647  * Finds the encoder associated with the given CRTC. This can only be
4648  * used when we know that the CRTC isn't feeding multiple encoders!
4649  */
4650 static struct intel_encoder *
4651 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4652                            const struct intel_crtc_state *crtc_state)
4653 {
4654         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4655         const struct drm_connector_state *connector_state;
4656         const struct drm_connector *connector;
4657         struct intel_encoder *encoder = NULL;
4658         int num_encoders = 0;
4659         int i;
4660
4661         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4662                 if (connector_state->crtc != &crtc->base)
4663                         continue;
4664
4665                 encoder = to_intel_encoder(connector_state->best_encoder);
4666                 num_encoders++;
4667         }
4668
4669         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4670              num_encoders, pipe_name(crtc->pipe));
4671
4672         return encoder;
4673 }
4674
4675 /*
4676  * Enable PCH resources required for PCH ports:
4677  *   - PCH PLLs
4678  *   - FDI training & RX/TX
4679  *   - update transcoder timings
4680  *   - DP transcoding bits
4681  *   - transcoder
4682  */
4683 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4684                                 const struct intel_crtc_state *crtc_state)
4685 {
4686         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4687         struct drm_device *dev = crtc->base.dev;
4688         struct drm_i915_private *dev_priv = to_i915(dev);
4689         int pipe = crtc->pipe;
4690         u32 temp;
4691
4692         assert_pch_transcoder_disabled(dev_priv, pipe);
4693
4694         if (IS_IVYBRIDGE(dev_priv))
4695                 ivybridge_update_fdi_bc_bifurcation(crtc);
4696
4697         /* Write the TU size bits before fdi link training, so that error
4698          * detection works. */
4699         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4700                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4701
4702         /* For PCH output, training FDI link */
4703         dev_priv->display.fdi_link_train(crtc, crtc_state);
4704
4705         /* We need to program the right clock selection before writing the pixel
4706          * mutliplier into the DPLL. */
4707         if (HAS_PCH_CPT(dev_priv)) {
4708                 u32 sel;
4709
4710                 temp = I915_READ(PCH_DPLL_SEL);
4711                 temp |= TRANS_DPLL_ENABLE(pipe);
4712                 sel = TRANS_DPLLB_SEL(pipe);
4713                 if (crtc_state->shared_dpll ==
4714                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4715                         temp |= sel;
4716                 else
4717                         temp &= ~sel;
4718                 I915_WRITE(PCH_DPLL_SEL, temp);
4719         }
4720
4721         /* XXX: pch pll's can be enabled any time before we enable the PCH
4722          * transcoder, and we actually should do this to not upset any PCH
4723          * transcoder that already use the clock when we share it.
4724          *
4725          * Note that enable_shared_dpll tries to do the right thing, but
4726          * get_shared_dpll unconditionally resets the pll - we need that to have
4727          * the right LVDS enable sequence. */
4728         intel_enable_shared_dpll(crtc);
4729
4730         /* set transcoder timing, panel must allow it */
4731         assert_panel_unlocked(dev_priv, pipe);
4732         ironlake_pch_transcoder_set_timings(crtc, pipe);
4733
4734         intel_fdi_normal_train(crtc);
4735
4736         /* For PCH DP, enable TRANS_DP_CTL */
4737         if (HAS_PCH_CPT(dev_priv) &&
4738             intel_crtc_has_dp_encoder(crtc_state)) {
4739                 const struct drm_display_mode *adjusted_mode =
4740                         &crtc_state->base.adjusted_mode;
4741                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4742                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4743                 enum port port;
4744
4745                 temp = I915_READ(reg);
4746                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4747                           TRANS_DP_SYNC_MASK |
4748                           TRANS_DP_BPC_MASK);
4749                 temp |= TRANS_DP_OUTPUT_ENABLE;
4750                 temp |= bpc << 9; /* same format but at 11:9 */
4751
4752                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4753                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4754                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4755                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4756
4757                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4758                 WARN_ON(port < PORT_B || port > PORT_D);
4759                 temp |= TRANS_DP_PORT_SEL(port);
4760
4761                 I915_WRITE(reg, temp);
4762         }
4763
4764         ironlake_enable_pch_transcoder(dev_priv, pipe);
4765 }
4766
4767 static void lpt_pch_enable(const struct intel_atomic_state *state,
4768                            const struct intel_crtc_state *crtc_state)
4769 {
4770         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4771         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4772         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4773
4774         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4775
4776         lpt_program_iclkip(crtc);
4777
4778         /* Set transcoder timing. */
4779         ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
4780
4781         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4782 }
4783
4784 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4785 {
4786         struct drm_i915_private *dev_priv = to_i915(dev);
4787         i915_reg_t dslreg = PIPEDSL(pipe);
4788         u32 temp;
4789
4790         temp = I915_READ(dslreg);
4791         udelay(500);
4792         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4793                 if (wait_for(I915_READ(dslreg) != temp, 5))
4794                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4795         }
4796 }
4797
4798 /*
4799  * The hardware phase 0.0 refers to the center of the pixel.
4800  * We want to start from the top/left edge which is phase
4801  * -0.5. That matches how the hardware calculates the scaling
4802  * factors (from top-left of the first pixel to bottom-right
4803  * of the last pixel, as opposed to the pixel centers).
4804  *
4805  * For 4:2:0 subsampled chroma planes we obviously have to
4806  * adjust that so that the chroma sample position lands in
4807  * the right spot.
4808  *
4809  * Note that for packed YCbCr 4:2:2 formats there is no way to
4810  * control chroma siting. The hardware simply replicates the
4811  * chroma samples for both of the luma samples, and thus we don't
4812  * actually get the expected MPEG2 chroma siting convention :(
4813  * The same behaviour is observed on pre-SKL platforms as well.
4814  */
4815 u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
4816 {
4817         int phase = -0x8000;
4818         u16 trip = 0;
4819
4820         if (chroma_cosited)
4821                 phase += (sub - 1) * 0x8000 / sub;
4822
4823         if (phase < 0)
4824                 phase = 0x10000 + phase;
4825         else
4826                 trip = PS_PHASE_TRIP;
4827
4828         return ((phase >> 2) & PS_PHASE_MASK) | trip;
4829 }
4830
4831 static int
4832 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4833                   unsigned int scaler_user, int *scaler_id,
4834                   int src_w, int src_h, int dst_w, int dst_h,
4835                   bool plane_scaler_check,
4836                   uint32_t pixel_format)
4837 {
4838         struct intel_crtc_scaler_state *scaler_state =
4839                 &crtc_state->scaler_state;
4840         struct intel_crtc *intel_crtc =
4841                 to_intel_crtc(crtc_state->base.crtc);
4842         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4843         const struct drm_display_mode *adjusted_mode =
4844                 &crtc_state->base.adjusted_mode;
4845         int need_scaling;
4846
4847         /*
4848          * Src coordinates are already rotated by 270 degrees for
4849          * the 90/270 degree plane rotation cases (to match the
4850          * GTT mapping), hence no need to account for rotation here.
4851          */
4852         need_scaling = src_w != dst_w || src_h != dst_h;
4853
4854         if (plane_scaler_check)
4855                 if (pixel_format == DRM_FORMAT_NV12)
4856                         need_scaling = true;
4857
4858         if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
4859                 need_scaling = true;
4860
4861         /*
4862          * Scaling/fitting not supported in IF-ID mode in GEN9+
4863          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
4864          * Once NV12 is enabled, handle it here while allocating scaler
4865          * for NV12.
4866          */
4867         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
4868             need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4869                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4870                 return -EINVAL;
4871         }
4872
4873         /*
4874          * if plane is being disabled or scaler is no more required or force detach
4875          *  - free scaler binded to this plane/crtc
4876          *  - in order to do this, update crtc->scaler_usage
4877          *
4878          * Here scaler state in crtc_state is set free so that
4879          * scaler can be assigned to other user. Actual register
4880          * update to free the scaler is done in plane/panel-fit programming.
4881          * For this purpose crtc/plane_state->scaler_id isn't reset here.
4882          */
4883         if (force_detach || !need_scaling) {
4884                 if (*scaler_id >= 0) {
4885                         scaler_state->scaler_users &= ~(1 << scaler_user);
4886                         scaler_state->scalers[*scaler_id].in_use = 0;
4887
4888                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4889                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4890                                 intel_crtc->pipe, scaler_user, *scaler_id,
4891                                 scaler_state->scaler_users);
4892                         *scaler_id = -1;
4893                 }
4894                 return 0;
4895         }
4896
4897         if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
4898             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
4899                 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4900                 return -EINVAL;
4901         }
4902
4903         /* range checks */
4904         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4905             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4906             (IS_GEN11(dev_priv) &&
4907              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
4908               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
4909             (!IS_GEN11(dev_priv) &&
4910              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4911               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
4912                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4913                         "size is out of scaler range\n",
4914                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4915                 return -EINVAL;
4916         }
4917
4918         /* mark this plane as a scaler user in crtc_state */
4919         scaler_state->scaler_users |= (1 << scaler_user);
4920         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4921                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4922                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4923                 scaler_state->scaler_users);
4924
4925         return 0;
4926 }
4927
4928 /**
4929  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4930  *
4931  * @state: crtc's scaler state
4932  *
4933  * Return
4934  *     0 - scaler_usage updated successfully
4935  *    error - requested scaling cannot be supported or other error condition
4936  */
4937 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4938 {
4939         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4940
4941         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4942                                  &state->scaler_state.scaler_id,
4943                                  state->pipe_src_w, state->pipe_src_h,
4944                                  adjusted_mode->crtc_hdisplay,
4945                                  adjusted_mode->crtc_vdisplay, false, 0);
4946 }
4947
4948 /**
4949  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4950  * @crtc_state: crtc's scaler state
4951  * @plane_state: atomic plane state to update
4952  *
4953  * Return
4954  *     0 - scaler_usage updated successfully
4955  *    error - requested scaling cannot be supported or other error condition
4956  */
4957 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4958                                    struct intel_plane_state *plane_state)
4959 {
4960
4961         struct intel_plane *intel_plane =
4962                 to_intel_plane(plane_state->base.plane);
4963         struct drm_framebuffer *fb = plane_state->base.fb;
4964         int ret;
4965
4966         bool force_detach = !fb || !plane_state->base.visible;
4967
4968         ret = skl_update_scaler(crtc_state, force_detach,
4969                                 drm_plane_index(&intel_plane->base),
4970                                 &plane_state->scaler_id,
4971                                 drm_rect_width(&plane_state->base.src) >> 16,
4972                                 drm_rect_height(&plane_state->base.src) >> 16,
4973                                 drm_rect_width(&plane_state->base.dst),
4974                                 drm_rect_height(&plane_state->base.dst),
4975                                 fb ? true : false, fb ? fb->format->format : 0);
4976
4977         if (ret || plane_state->scaler_id < 0)
4978                 return ret;
4979
4980         /* check colorkey */
4981         if (plane_state->ckey.flags) {
4982                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4983                               intel_plane->base.base.id,
4984                               intel_plane->base.name);
4985                 return -EINVAL;
4986         }
4987
4988         /* Check src format */
4989         switch (fb->format->format) {
4990         case DRM_FORMAT_RGB565:
4991         case DRM_FORMAT_XBGR8888:
4992         case DRM_FORMAT_XRGB8888:
4993         case DRM_FORMAT_ABGR8888:
4994         case DRM_FORMAT_ARGB8888:
4995         case DRM_FORMAT_XRGB2101010:
4996         case DRM_FORMAT_XBGR2101010:
4997         case DRM_FORMAT_YUYV:
4998         case DRM_FORMAT_YVYU:
4999         case DRM_FORMAT_UYVY:
5000         case DRM_FORMAT_VYUY:
5001         case DRM_FORMAT_NV12:
5002                 break;
5003         default:
5004                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5005                               intel_plane->base.base.id, intel_plane->base.name,
5006                               fb->base.id, fb->format->format);
5007                 return -EINVAL;
5008         }
5009
5010         return 0;
5011 }
5012
5013 static void skylake_scaler_disable(struct intel_crtc *crtc)
5014 {
5015         int i;
5016
5017         for (i = 0; i < crtc->num_scalers; i++)
5018                 skl_detach_scaler(crtc, i);
5019 }
5020
5021 static void skylake_pfit_enable(struct intel_crtc *crtc)
5022 {
5023         struct drm_device *dev = crtc->base.dev;
5024         struct drm_i915_private *dev_priv = to_i915(dev);
5025         int pipe = crtc->pipe;
5026         struct intel_crtc_scaler_state *scaler_state =
5027                 &crtc->config->scaler_state;
5028
5029         if (crtc->config->pch_pfit.enabled) {
5030                 u16 uv_rgb_hphase, uv_rgb_vphase;
5031                 int id;
5032
5033                 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
5034                         return;
5035
5036                 uv_rgb_hphase = skl_scaler_calc_phase(1, false);
5037                 uv_rgb_vphase = skl_scaler_calc_phase(1, false);
5038
5039                 id = scaler_state->scaler_id;
5040                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5041                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5042                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5043                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5044                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5045                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5046                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
5047                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
5048         }
5049 }
5050
5051 static void ironlake_pfit_enable(struct intel_crtc *crtc)
5052 {
5053         struct drm_device *dev = crtc->base.dev;
5054         struct drm_i915_private *dev_priv = to_i915(dev);
5055         int pipe = crtc->pipe;
5056
5057         if (crtc->config->pch_pfit.enabled) {
5058                 /* Force use of hard-coded filter coefficients
5059                  * as some pre-programmed values are broken,
5060                  * e.g. x201.
5061                  */
5062                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5063                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5064                                                  PF_PIPE_SEL_IVB(pipe));
5065                 else
5066                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5067                 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
5068                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
5069         }
5070 }
5071
5072 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5073 {
5074         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5075         struct drm_device *dev = crtc->base.dev;
5076         struct drm_i915_private *dev_priv = to_i915(dev);
5077
5078         if (!crtc_state->ips_enabled)
5079                 return;
5080
5081         /*
5082          * We can only enable IPS after we enable a plane and wait for a vblank
5083          * This function is called from post_plane_update, which is run after
5084          * a vblank wait.
5085          */
5086         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5087
5088         if (IS_BROADWELL(dev_priv)) {
5089                 mutex_lock(&dev_priv->pcu_lock);
5090                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5091                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5092                 mutex_unlock(&dev_priv->pcu_lock);
5093                 /* Quoting Art Runyan: "its not safe to expect any particular
5094                  * value in IPS_CTL bit 31 after enabling IPS through the
5095                  * mailbox." Moreover, the mailbox may return a bogus state,
5096                  * so we need to just enable it and continue on.
5097                  */
5098         } else {
5099                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5100                 /* The bit only becomes 1 in the next vblank, so this wait here
5101                  * is essentially intel_wait_for_vblank. If we don't have this
5102                  * and don't wait for vblanks until the end of crtc_enable, then
5103                  * the HW state readout code will complain that the expected
5104                  * IPS_CTL value is not the one we read. */
5105                 if (intel_wait_for_register(dev_priv,
5106                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5107                                             50))
5108                         DRM_ERROR("Timed out waiting for IPS enable\n");
5109         }
5110 }
5111
5112 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5113 {
5114         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5115         struct drm_device *dev = crtc->base.dev;
5116         struct drm_i915_private *dev_priv = to_i915(dev);
5117
5118         if (!crtc_state->ips_enabled)
5119                 return;
5120
5121         if (IS_BROADWELL(dev_priv)) {
5122                 mutex_lock(&dev_priv->pcu_lock);
5123                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5124                 mutex_unlock(&dev_priv->pcu_lock);
5125                 /*
5126                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5127                  * 42ms timeout value leads to occasional timeouts so use 100ms
5128                  * instead.
5129                  */
5130                 if (intel_wait_for_register(dev_priv,
5131                                             IPS_CTL, IPS_ENABLE, 0,
5132                                             100))
5133                         DRM_ERROR("Timed out waiting for IPS disable\n");
5134         } else {
5135                 I915_WRITE(IPS_CTL, 0);
5136                 POSTING_READ(IPS_CTL);
5137         }
5138
5139         /* We need to wait for a vblank before we can disable the plane. */
5140         intel_wait_for_vblank(dev_priv, crtc->pipe);
5141 }
5142
5143 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5144 {
5145         if (intel_crtc->overlay) {
5146                 struct drm_device *dev = intel_crtc->base.dev;
5147
5148                 mutex_lock(&dev->struct_mutex);
5149                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5150                 mutex_unlock(&dev->struct_mutex);
5151         }
5152
5153         /* Let userspace switch the overlay on again. In most cases userspace
5154          * has to recompute where to put it anyway.
5155          */
5156 }
5157
5158 /**
5159  * intel_post_enable_primary - Perform operations after enabling primary plane
5160  * @crtc: the CRTC whose primary plane was just enabled
5161  * @new_crtc_state: the enabling state
5162  *
5163  * Performs potentially sleeping operations that must be done after the primary
5164  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5165  * called due to an explicit primary plane update, or due to an implicit
5166  * re-enable that is caused when a sprite plane is updated to no longer
5167  * completely hide the primary plane.
5168  */
5169 static void
5170 intel_post_enable_primary(struct drm_crtc *crtc,
5171                           const struct intel_crtc_state *new_crtc_state)
5172 {
5173         struct drm_device *dev = crtc->dev;
5174         struct drm_i915_private *dev_priv = to_i915(dev);
5175         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5176         int pipe = intel_crtc->pipe;
5177
5178         /*
5179          * Gen2 reports pipe underruns whenever all planes are disabled.
5180          * So don't enable underrun reporting before at least some planes
5181          * are enabled.
5182          * FIXME: Need to fix the logic to work when we turn off all planes
5183          * but leave the pipe running.
5184          */
5185         if (IS_GEN2(dev_priv))
5186                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5187
5188         /* Underruns don't always raise interrupts, so check manually. */
5189         intel_check_cpu_fifo_underruns(dev_priv);
5190         intel_check_pch_fifo_underruns(dev_priv);
5191 }
5192
5193 /* FIXME get rid of this and use pre_plane_update */
5194 static void
5195 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5196 {
5197         struct drm_device *dev = crtc->dev;
5198         struct drm_i915_private *dev_priv = to_i915(dev);
5199         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5200         int pipe = intel_crtc->pipe;
5201
5202         /*
5203          * Gen2 reports pipe underruns whenever all planes are disabled.
5204          * So disable underrun reporting before all the planes get disabled.
5205          */
5206         if (IS_GEN2(dev_priv))
5207                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5208
5209         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5210
5211         /*
5212          * Vblank time updates from the shadow to live plane control register
5213          * are blocked if the memory self-refresh mode is active at that
5214          * moment. So to make sure the plane gets truly disabled, disable
5215          * first the self-refresh mode. The self-refresh enable bit in turn
5216          * will be checked/applied by the HW only at the next frame start
5217          * event which is after the vblank start event, so we need to have a
5218          * wait-for-vblank between disabling the plane and the pipe.
5219          */
5220         if (HAS_GMCH_DISPLAY(dev_priv) &&
5221             intel_set_memory_cxsr(dev_priv, false))
5222                 intel_wait_for_vblank(dev_priv, pipe);
5223 }
5224
5225 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5226                                        const struct intel_crtc_state *new_crtc_state)
5227 {
5228         if (!old_crtc_state->ips_enabled)
5229                 return false;
5230
5231         if (needs_modeset(&new_crtc_state->base))
5232                 return true;
5233
5234         return !new_crtc_state->ips_enabled;
5235 }
5236
5237 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5238                                        const struct intel_crtc_state *new_crtc_state)
5239 {
5240         if (!new_crtc_state->ips_enabled)
5241                 return false;
5242
5243         if (needs_modeset(&new_crtc_state->base))
5244                 return true;
5245
5246         /*
5247          * We can't read out IPS on broadwell, assume the worst and
5248          * forcibly enable IPS on the first fastset.
5249          */
5250         if (new_crtc_state->update_pipe &&
5251             old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5252                 return true;
5253
5254         return !old_crtc_state->ips_enabled;
5255 }
5256
5257 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5258                           const struct intel_crtc_state *crtc_state)
5259 {
5260         if (!crtc_state->nv12_planes)
5261                 return false;
5262
5263         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
5264                 return false;
5265
5266         if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
5267             IS_CANNONLAKE(dev_priv))
5268                 return true;
5269
5270         return false;
5271 }
5272
5273 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5274 {
5275         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5276         struct drm_device *dev = crtc->base.dev;
5277         struct drm_i915_private *dev_priv = to_i915(dev);
5278         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5279         struct intel_crtc_state *pipe_config =
5280                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5281                                                 crtc);
5282         struct drm_plane *primary = crtc->base.primary;
5283         struct drm_plane_state *old_primary_state =
5284                 drm_atomic_get_old_plane_state(old_state, primary);
5285
5286         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5287
5288         if (pipe_config->update_wm_post && pipe_config->base.active)
5289                 intel_update_watermarks(crtc);
5290
5291         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5292                 hsw_enable_ips(pipe_config);
5293
5294         if (old_primary_state) {
5295                 struct drm_plane_state *new_primary_state =
5296                         drm_atomic_get_new_plane_state(old_state, primary);
5297
5298                 intel_fbc_post_update(crtc);
5299
5300                 if (new_primary_state->visible &&
5301                     (needs_modeset(&pipe_config->base) ||
5302                      !old_primary_state->visible))
5303                         intel_post_enable_primary(&crtc->base, pipe_config);
5304         }
5305
5306         /* Display WA 827 */
5307         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5308             !needs_nv12_wa(dev_priv, pipe_config)) {
5309                 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5310                 skl_wa_528(dev_priv, crtc->pipe, false);
5311         }
5312 }
5313
5314 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5315                                    struct intel_crtc_state *pipe_config)
5316 {
5317         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5318         struct drm_device *dev = crtc->base.dev;
5319         struct drm_i915_private *dev_priv = to_i915(dev);
5320         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5321         struct drm_plane *primary = crtc->base.primary;
5322         struct drm_plane_state *old_primary_state =
5323                 drm_atomic_get_old_plane_state(old_state, primary);
5324         bool modeset = needs_modeset(&pipe_config->base);
5325         struct intel_atomic_state *old_intel_state =
5326                 to_intel_atomic_state(old_state);
5327
5328         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5329                 hsw_disable_ips(old_crtc_state);
5330
5331         if (old_primary_state) {
5332                 struct intel_plane_state *new_primary_state =
5333                         intel_atomic_get_new_plane_state(old_intel_state,
5334                                                          to_intel_plane(primary));
5335
5336                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5337                 /*
5338                  * Gen2 reports pipe underruns whenever all planes are disabled.
5339                  * So disable underrun reporting before all the planes get disabled.
5340                  */
5341                 if (IS_GEN2(dev_priv) && old_primary_state->visible &&
5342                     (modeset || !new_primary_state->base.visible))
5343                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5344         }
5345
5346         /* Display WA 827 */
5347         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5348             needs_nv12_wa(dev_priv, pipe_config)) {
5349                 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5350                 skl_wa_528(dev_priv, crtc->pipe, true);
5351         }
5352
5353         /*
5354          * Vblank time updates from the shadow to live plane control register
5355          * are blocked if the memory self-refresh mode is active at that
5356          * moment. So to make sure the plane gets truly disabled, disable
5357          * first the self-refresh mode. The self-refresh enable bit in turn
5358          * will be checked/applied by the HW only at the next frame start
5359          * event which is after the vblank start event, so we need to have a
5360          * wait-for-vblank between disabling the plane and the pipe.
5361          */
5362         if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
5363             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5364                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5365
5366         /*
5367          * IVB workaround: must disable low power watermarks for at least
5368          * one frame before enabling scaling.  LP watermarks can be re-enabled
5369          * when scaling is disabled.
5370          *
5371          * WaCxSRDisabledForSpriteScaling:ivb
5372          */
5373         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
5374                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5375
5376         /*
5377          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5378          * watermark programming here.
5379          */
5380         if (needs_modeset(&pipe_config->base))
5381                 return;
5382
5383         /*
5384          * For platforms that support atomic watermarks, program the
5385          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5386          * will be the intermediate values that are safe for both pre- and
5387          * post- vblank; when vblank happens, the 'active' values will be set
5388          * to the final 'target' values and we'll do this again to get the
5389          * optimal watermarks.  For gen9+ platforms, the values we program here
5390          * will be the final target values which will get automatically latched
5391          * at vblank time; no further programming will be necessary.
5392          *
5393          * If a platform hasn't been transitioned to atomic watermarks yet,
5394          * we'll continue to update watermarks the old way, if flags tell
5395          * us to.
5396          */
5397         if (dev_priv->display.initial_watermarks != NULL)
5398                 dev_priv->display.initial_watermarks(old_intel_state,
5399                                                      pipe_config);
5400         else if (pipe_config->update_wm_pre)
5401                 intel_update_watermarks(crtc);
5402 }
5403
5404 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
5405 {
5406         struct drm_device *dev = crtc->dev;
5407         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5408         struct drm_plane *p;
5409         int pipe = intel_crtc->pipe;
5410
5411         intel_crtc_dpms_overlay_disable(intel_crtc);
5412
5413         drm_for_each_plane_mask(p, dev, plane_mask)
5414                 to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
5415
5416         /*
5417          * FIXME: Once we grow proper nuclear flip support out of this we need
5418          * to compute the mask of flip planes precisely. For the time being
5419          * consider this a flip to a NULL plane.
5420          */
5421         intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
5422 }
5423
5424 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5425                                           struct intel_crtc_state *crtc_state,
5426                                           struct drm_atomic_state *old_state)
5427 {
5428         struct drm_connector_state *conn_state;
5429         struct drm_connector *conn;
5430         int i;
5431
5432         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5433                 struct intel_encoder *encoder =
5434                         to_intel_encoder(conn_state->best_encoder);
5435
5436                 if (conn_state->crtc != crtc)
5437                         continue;
5438
5439                 if (encoder->pre_pll_enable)
5440                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5441         }
5442 }
5443
5444 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5445                                       struct intel_crtc_state *crtc_state,
5446                                       struct drm_atomic_state *old_state)
5447 {
5448         struct drm_connector_state *conn_state;
5449         struct drm_connector *conn;
5450         int i;
5451
5452         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5453                 struct intel_encoder *encoder =
5454                         to_intel_encoder(conn_state->best_encoder);
5455
5456                 if (conn_state->crtc != crtc)
5457                         continue;
5458
5459                 if (encoder->pre_enable)
5460                         encoder->pre_enable(encoder, crtc_state, conn_state);
5461         }
5462 }
5463
5464 static void intel_encoders_enable(struct drm_crtc *crtc,
5465                                   struct intel_crtc_state *crtc_state,
5466                                   struct drm_atomic_state *old_state)
5467 {
5468         struct drm_connector_state *conn_state;
5469         struct drm_connector *conn;
5470         int i;
5471
5472         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5473                 struct intel_encoder *encoder =
5474                         to_intel_encoder(conn_state->best_encoder);
5475
5476                 if (conn_state->crtc != crtc)
5477                         continue;
5478
5479                 encoder->enable(encoder, crtc_state, conn_state);
5480                 intel_opregion_notify_encoder(encoder, true);
5481         }
5482 }
5483
5484 static void intel_encoders_disable(struct drm_crtc *crtc,
5485                                    struct intel_crtc_state *old_crtc_state,
5486                                    struct drm_atomic_state *old_state)
5487 {
5488         struct drm_connector_state *old_conn_state;
5489         struct drm_connector *conn;
5490         int i;
5491
5492         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5493                 struct intel_encoder *encoder =
5494                         to_intel_encoder(old_conn_state->best_encoder);
5495
5496                 if (old_conn_state->crtc != crtc)
5497                         continue;
5498
5499                 intel_opregion_notify_encoder(encoder, false);
5500                 encoder->disable(encoder, old_crtc_state, old_conn_state);
5501         }
5502 }
5503
5504 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5505                                         struct intel_crtc_state *old_crtc_state,
5506                                         struct drm_atomic_state *old_state)
5507 {
5508         struct drm_connector_state *old_conn_state;
5509         struct drm_connector *conn;
5510         int i;
5511
5512         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5513                 struct intel_encoder *encoder =
5514                         to_intel_encoder(old_conn_state->best_encoder);
5515
5516                 if (old_conn_state->crtc != crtc)
5517                         continue;
5518
5519                 if (encoder->post_disable)
5520                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5521         }
5522 }
5523
5524 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5525                                             struct intel_crtc_state *old_crtc_state,
5526                                             struct drm_atomic_state *old_state)
5527 {
5528         struct drm_connector_state *old_conn_state;
5529         struct drm_connector *conn;
5530         int i;
5531
5532         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5533                 struct intel_encoder *encoder =
5534                         to_intel_encoder(old_conn_state->best_encoder);
5535
5536                 if (old_conn_state->crtc != crtc)
5537                         continue;
5538
5539                 if (encoder->post_pll_disable)
5540                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5541         }
5542 }
5543
5544 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5545                                  struct drm_atomic_state *old_state)
5546 {
5547         struct drm_crtc *crtc = pipe_config->base.crtc;
5548         struct drm_device *dev = crtc->dev;
5549         struct drm_i915_private *dev_priv = to_i915(dev);
5550         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5551         int pipe = intel_crtc->pipe;
5552         struct intel_atomic_state *old_intel_state =
5553                 to_intel_atomic_state(old_state);
5554
5555         if (WARN_ON(intel_crtc->active))
5556                 return;
5557
5558         /*
5559          * Sometimes spurious CPU pipe underruns happen during FDI
5560          * training, at least with VGA+HDMI cloning. Suppress them.
5561          *
5562          * On ILK we get an occasional spurious CPU pipe underruns
5563          * between eDP port A enable and vdd enable. Also PCH port
5564          * enable seems to result in the occasional CPU pipe underrun.
5565          *
5566          * Spurious PCH underruns also occur during PCH enabling.
5567          */
5568         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5569         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5570
5571         if (intel_crtc->config->has_pch_encoder)
5572                 intel_prepare_shared_dpll(intel_crtc);
5573
5574         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5575                 intel_dp_set_m_n(intel_crtc, M1_N1);
5576
5577         intel_set_pipe_timings(intel_crtc);
5578         intel_set_pipe_src_size(intel_crtc);
5579
5580         if (intel_crtc->config->has_pch_encoder) {
5581                 intel_cpu_transcoder_set_m_n(intel_crtc,
5582                                      &intel_crtc->config->fdi_m_n, NULL);
5583         }
5584
5585         ironlake_set_pipeconf(crtc);
5586
5587         intel_crtc->active = true;
5588
5589         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5590
5591         if (intel_crtc->config->has_pch_encoder) {
5592                 /* Note: FDI PLL enabling _must_ be done before we enable the
5593                  * cpu pipes, hence this is separate from all the other fdi/pch
5594                  * enabling. */
5595                 ironlake_fdi_pll_enable(intel_crtc);
5596         } else {
5597                 assert_fdi_tx_disabled(dev_priv, pipe);
5598                 assert_fdi_rx_disabled(dev_priv, pipe);
5599         }
5600
5601         ironlake_pfit_enable(intel_crtc);
5602
5603         /*
5604          * On ILK+ LUT must be loaded before the pipe is running but with
5605          * clocks enabled
5606          */
5607         intel_color_load_luts(&pipe_config->base);
5608
5609         if (dev_priv->display.initial_watermarks != NULL)
5610                 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
5611         intel_enable_pipe(pipe_config);
5612
5613         if (intel_crtc->config->has_pch_encoder)
5614                 ironlake_pch_enable(old_intel_state, pipe_config);
5615
5616         assert_vblank_disabled(crtc);
5617         drm_crtc_vblank_on(crtc);
5618
5619         intel_encoders_enable(crtc, pipe_config, old_state);
5620
5621         if (HAS_PCH_CPT(dev_priv))
5622                 cpt_verify_modeset(dev, intel_crtc->pipe);
5623
5624         /*
5625          * Must wait for vblank to avoid spurious PCH FIFO underruns.
5626          * And a second vblank wait is needed at least on ILK with
5627          * some interlaced HDMI modes. Let's do the double wait always
5628          * in case there are more corner cases we don't know about.
5629          */
5630         if (intel_crtc->config->has_pch_encoder) {
5631                 intel_wait_for_vblank(dev_priv, pipe);
5632                 intel_wait_for_vblank(dev_priv, pipe);
5633         }
5634         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5635         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5636 }
5637
5638 /* IPS only exists on ULT machines and is tied to pipe A. */
5639 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5640 {
5641         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5642 }
5643
5644 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5645                                             enum pipe pipe, bool apply)
5646 {
5647         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5648         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5649
5650         if (apply)
5651                 val |= mask;
5652         else
5653                 val &= ~mask;
5654
5655         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5656 }
5657
5658 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5659 {
5660         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5661         enum pipe pipe = crtc->pipe;
5662         uint32_t val;
5663
5664         val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
5665
5666         /* Program B credit equally to all pipes */
5667         val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
5668
5669         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5670 }
5671
5672 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5673                                 struct drm_atomic_state *old_state)
5674 {
5675         struct drm_crtc *crtc = pipe_config->base.crtc;
5676         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5677         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5678         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5679         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5680         struct intel_atomic_state *old_intel_state =
5681                 to_intel_atomic_state(old_state);
5682         bool psl_clkgate_wa;
5683         u32 pipe_chicken;
5684
5685         if (WARN_ON(intel_crtc->active))
5686                 return;
5687
5688         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5689
5690         if (intel_crtc->config->shared_dpll)
5691                 intel_enable_shared_dpll(intel_crtc);
5692
5693         if (INTEL_GEN(dev_priv) >= 11)
5694                 icl_map_plls_to_ports(crtc, pipe_config, old_state);
5695
5696         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5697
5698         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5699                 intel_dp_set_m_n(intel_crtc, M1_N1);
5700
5701         if (!transcoder_is_dsi(cpu_transcoder))
5702                 intel_set_pipe_timings(intel_crtc);
5703
5704         intel_set_pipe_src_size(intel_crtc);
5705
5706         if (cpu_transcoder != TRANSCODER_EDP &&
5707             !transcoder_is_dsi(cpu_transcoder)) {
5708                 I915_WRITE(PIPE_MULT(cpu_transcoder),
5709                            intel_crtc->config->pixel_multiplier - 1);
5710         }
5711
5712         if (intel_crtc->config->has_pch_encoder) {
5713                 intel_cpu_transcoder_set_m_n(intel_crtc,
5714                                      &intel_crtc->config->fdi_m_n, NULL);
5715         }
5716
5717         if (!transcoder_is_dsi(cpu_transcoder))
5718                 haswell_set_pipeconf(crtc);
5719
5720         haswell_set_pipemisc(crtc);
5721
5722         intel_color_set_csc(&pipe_config->base);
5723
5724         intel_crtc->active = true;
5725
5726         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5727         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5728                          intel_crtc->config->pch_pfit.enabled;
5729         if (psl_clkgate_wa)
5730                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5731
5732         if (INTEL_GEN(dev_priv) >= 9)
5733                 skylake_pfit_enable(intel_crtc);
5734         else
5735                 ironlake_pfit_enable(intel_crtc);
5736
5737         /*
5738          * On ILK+ LUT must be loaded before the pipe is running but with
5739          * clocks enabled
5740          */
5741         intel_color_load_luts(&pipe_config->base);
5742
5743         /*
5744          * Display WA #1153: enable hardware to bypass the alpha math
5745          * and rounding for per-pixel values 00 and 0xff
5746          */
5747         if (INTEL_GEN(dev_priv) >= 11) {
5748                 pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
5749                 if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
5750                         I915_WRITE_FW(PIPE_CHICKEN(pipe),
5751                                       pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
5752         }
5753
5754         intel_ddi_set_pipe_settings(pipe_config);
5755         if (!transcoder_is_dsi(cpu_transcoder))
5756                 intel_ddi_enable_transcoder_func(pipe_config);
5757
5758         if (dev_priv->display.initial_watermarks != NULL)
5759                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5760
5761         if (INTEL_GEN(dev_priv) >= 11)
5762                 icl_pipe_mbus_enable(intel_crtc);
5763
5764         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5765         if (!transcoder_is_dsi(cpu_transcoder))
5766                 intel_enable_pipe(pipe_config);
5767
5768         if (intel_crtc->config->has_pch_encoder)
5769                 lpt_pch_enable(old_intel_state, pipe_config);
5770
5771         if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
5772                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
5773
5774         assert_vblank_disabled(crtc);
5775         drm_crtc_vblank_on(crtc);
5776
5777         intel_encoders_enable(crtc, pipe_config, old_state);
5778
5779         if (psl_clkgate_wa) {
5780                 intel_wait_for_vblank(dev_priv, pipe);
5781                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
5782         }
5783
5784         /* If we change the relative order between pipe/planes enabling, we need
5785          * to change the workaround. */
5786         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5787         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
5788                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5789                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5790         }
5791 }
5792
5793 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5794 {
5795         struct drm_device *dev = crtc->base.dev;
5796         struct drm_i915_private *dev_priv = to_i915(dev);
5797         int pipe = crtc->pipe;
5798
5799         /* To avoid upsetting the power well on haswell only disable the pfit if
5800          * it's in use. The hw state code will make sure we get this right. */
5801         if (force || crtc->config->pch_pfit.enabled) {
5802                 I915_WRITE(PF_CTL(pipe), 0);
5803                 I915_WRITE(PF_WIN_POS(pipe), 0);
5804                 I915_WRITE(PF_WIN_SZ(pipe), 0);
5805         }
5806 }
5807
5808 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5809                                   struct drm_atomic_state *old_state)
5810 {
5811         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5812         struct drm_device *dev = crtc->dev;
5813         struct drm_i915_private *dev_priv = to_i915(dev);
5814         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5815         int pipe = intel_crtc->pipe;
5816
5817         /*
5818          * Sometimes spurious CPU pipe underruns happen when the
5819          * pipe is already disabled, but FDI RX/TX is still enabled.
5820          * Happens at least with VGA+HDMI cloning. Suppress them.
5821          */
5822         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5823         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5824
5825         intel_encoders_disable(crtc, old_crtc_state, old_state);
5826
5827         drm_crtc_vblank_off(crtc);
5828         assert_vblank_disabled(crtc);
5829
5830         intel_disable_pipe(old_crtc_state);
5831
5832         ironlake_pfit_disable(intel_crtc, false);
5833
5834         if (intel_crtc->config->has_pch_encoder)
5835                 ironlake_fdi_disable(crtc);
5836
5837         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5838
5839         if (intel_crtc->config->has_pch_encoder) {
5840                 ironlake_disable_pch_transcoder(dev_priv, pipe);
5841
5842                 if (HAS_PCH_CPT(dev_priv)) {
5843                         i915_reg_t reg;
5844                         u32 temp;
5845
5846                         /* disable TRANS_DP_CTL */
5847                         reg = TRANS_DP_CTL(pipe);
5848                         temp = I915_READ(reg);
5849                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5850                                   TRANS_DP_PORT_SEL_MASK);
5851                         temp |= TRANS_DP_PORT_SEL_NONE;
5852                         I915_WRITE(reg, temp);
5853
5854                         /* disable DPLL_SEL */
5855                         temp = I915_READ(PCH_DPLL_SEL);
5856                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5857                         I915_WRITE(PCH_DPLL_SEL, temp);
5858                 }
5859
5860                 ironlake_fdi_pll_disable(intel_crtc);
5861         }
5862
5863         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5864         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5865 }
5866
5867 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5868                                  struct drm_atomic_state *old_state)
5869 {
5870         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5871         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5872         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5873         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
5874
5875         intel_encoders_disable(crtc, old_crtc_state, old_state);
5876
5877         drm_crtc_vblank_off(crtc);
5878         assert_vblank_disabled(crtc);
5879
5880         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5881         if (!transcoder_is_dsi(cpu_transcoder))
5882                 intel_disable_pipe(old_crtc_state);
5883
5884         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
5885                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
5886
5887         if (!transcoder_is_dsi(cpu_transcoder))
5888                 intel_ddi_disable_transcoder_func(old_crtc_state);
5889
5890         if (INTEL_GEN(dev_priv) >= 9)
5891                 skylake_scaler_disable(intel_crtc);
5892         else
5893                 ironlake_pfit_disable(intel_crtc, false);
5894
5895         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5896
5897         if (INTEL_GEN(dev_priv) >= 11)
5898                 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
5899 }
5900
5901 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5902 {
5903         struct drm_device *dev = crtc->base.dev;
5904         struct drm_i915_private *dev_priv = to_i915(dev);
5905         struct intel_crtc_state *pipe_config = crtc->config;
5906
5907         if (!pipe_config->gmch_pfit.control)
5908                 return;
5909
5910         /*
5911          * The panel fitter should only be adjusted whilst the pipe is disabled,
5912          * according to register description and PRM.
5913          */
5914         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5915         assert_pipe_disabled(dev_priv, crtc->pipe);
5916
5917         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5918         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5919
5920         /* Border color in case we don't scale up to the full screen. Black by
5921          * default, change to something else for debugging. */
5922         I915_WRITE(BCLRPAT(crtc->pipe), 0);
5923 }
5924
5925 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
5926 {
5927         if (IS_ICELAKE(dev_priv))
5928                 return port >= PORT_C && port <= PORT_F;
5929
5930         return false;
5931 }
5932
5933 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
5934 {
5935         if (!intel_port_is_tc(dev_priv, port))
5936                 return PORT_TC_NONE;
5937
5938         return port - PORT_C;
5939 }
5940
5941 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
5942 {
5943         switch (port) {
5944         case PORT_A:
5945                 return POWER_DOMAIN_PORT_DDI_A_LANES;
5946         case PORT_B:
5947                 return POWER_DOMAIN_PORT_DDI_B_LANES;
5948         case PORT_C:
5949                 return POWER_DOMAIN_PORT_DDI_C_LANES;
5950         case PORT_D:
5951                 return POWER_DOMAIN_PORT_DDI_D_LANES;
5952         case PORT_E:
5953                 return POWER_DOMAIN_PORT_DDI_E_LANES;
5954         case PORT_F:
5955                 return POWER_DOMAIN_PORT_DDI_F_LANES;
5956         default:
5957                 MISSING_CASE(port);
5958                 return POWER_DOMAIN_PORT_OTHER;
5959         }
5960 }
5961
5962 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
5963                                   struct intel_crtc_state *crtc_state)
5964 {
5965         struct drm_device *dev = crtc->dev;
5966         struct drm_i915_private *dev_priv = to_i915(dev);
5967         struct drm_encoder *encoder;
5968         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5969         enum pipe pipe = intel_crtc->pipe;
5970         u64 mask;
5971         enum transcoder transcoder = crtc_state->cpu_transcoder;
5972
5973         if (!crtc_state->base.active)
5974                 return 0;
5975
5976         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
5977         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
5978         if (crtc_state->pch_pfit.enabled ||
5979             crtc_state->pch_pfit.force_thru)
5980                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5981
5982         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5983                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5984
5985                 mask |= BIT_ULL(intel_encoder->power_domain);
5986         }
5987
5988         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
5989                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
5990
5991         if (crtc_state->shared_dpll)
5992                 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
5993
5994         return mask;
5995 }
5996
5997 static u64
5998 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5999                                struct intel_crtc_state *crtc_state)
6000 {
6001         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6002         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6003         enum intel_display_power_domain domain;
6004         u64 domains, new_domains, old_domains;
6005
6006         old_domains = intel_crtc->enabled_power_domains;
6007         intel_crtc->enabled_power_domains = new_domains =
6008                 get_crtc_power_domains(crtc, crtc_state);
6009
6010         domains = new_domains & ~old_domains;
6011
6012         for_each_power_domain(domain, domains)
6013                 intel_display_power_get(dev_priv, domain);
6014
6015         return old_domains & ~new_domains;
6016 }
6017
6018 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6019                                       u64 domains)
6020 {
6021         enum intel_display_power_domain domain;
6022
6023         for_each_power_domain(domain, domains)
6024                 intel_display_power_put(dev_priv, domain);
6025 }
6026
6027 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6028                                    struct drm_atomic_state *old_state)
6029 {
6030         struct intel_atomic_state *old_intel_state =
6031                 to_intel_atomic_state(old_state);
6032         struct drm_crtc *crtc = pipe_config->base.crtc;
6033         struct drm_device *dev = crtc->dev;
6034         struct drm_i915_private *dev_priv = to_i915(dev);
6035         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6036         int pipe = intel_crtc->pipe;
6037
6038         if (WARN_ON(intel_crtc->active))
6039                 return;
6040
6041         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6042                 intel_dp_set_m_n(intel_crtc, M1_N1);
6043
6044         intel_set_pipe_timings(intel_crtc);
6045         intel_set_pipe_src_size(intel_crtc);
6046
6047         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6048                 struct drm_i915_private *dev_priv = to_i915(dev);
6049
6050                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6051                 I915_WRITE(CHV_CANVAS(pipe), 0);
6052         }
6053
6054         i9xx_set_pipeconf(intel_crtc);
6055
6056         intel_color_set_csc(&pipe_config->base);
6057
6058         intel_crtc->active = true;
6059
6060         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6061
6062         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6063
6064         if (IS_CHERRYVIEW(dev_priv)) {
6065                 chv_prepare_pll(intel_crtc, intel_crtc->config);
6066                 chv_enable_pll(intel_crtc, intel_crtc->config);
6067         } else {
6068                 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6069                 vlv_enable_pll(intel_crtc, intel_crtc->config);
6070         }
6071
6072         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6073
6074         i9xx_pfit_enable(intel_crtc);
6075
6076         intel_color_load_luts(&pipe_config->base);
6077
6078         dev_priv->display.initial_watermarks(old_intel_state,
6079                                              pipe_config);
6080         intel_enable_pipe(pipe_config);
6081
6082         assert_vblank_disabled(crtc);
6083         drm_crtc_vblank_on(crtc);
6084
6085         intel_encoders_enable(crtc, pipe_config, old_state);
6086 }
6087
6088 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6089 {
6090         struct drm_device *dev = crtc->base.dev;
6091         struct drm_i915_private *dev_priv = to_i915(dev);
6092
6093         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6094         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6095 }
6096
6097 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6098                              struct drm_atomic_state *old_state)
6099 {
6100         struct intel_atomic_state *old_intel_state =
6101                 to_intel_atomic_state(old_state);
6102         struct drm_crtc *crtc = pipe_config->base.crtc;
6103         struct drm_device *dev = crtc->dev;
6104         struct drm_i915_private *dev_priv = to_i915(dev);
6105         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6106         enum pipe pipe = intel_crtc->pipe;
6107
6108         if (WARN_ON(intel_crtc->active))
6109                 return;
6110
6111         i9xx_set_pll_dividers(intel_crtc);
6112
6113         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6114                 intel_dp_set_m_n(intel_crtc, M1_N1);
6115
6116         intel_set_pipe_timings(intel_crtc);
6117         intel_set_pipe_src_size(intel_crtc);
6118
6119         i9xx_set_pipeconf(intel_crtc);
6120
6121         intel_crtc->active = true;
6122
6123         if (!IS_GEN2(dev_priv))
6124                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6125
6126         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6127
6128         i9xx_enable_pll(intel_crtc, pipe_config);
6129
6130         i9xx_pfit_enable(intel_crtc);
6131
6132         intel_color_load_luts(&pipe_config->base);
6133
6134         if (dev_priv->display.initial_watermarks != NULL)
6135                 dev_priv->display.initial_watermarks(old_intel_state,
6136                                                      intel_crtc->config);
6137         else
6138                 intel_update_watermarks(intel_crtc);
6139         intel_enable_pipe(pipe_config);
6140
6141         assert_vblank_disabled(crtc);
6142         drm_crtc_vblank_on(crtc);
6143
6144         intel_encoders_enable(crtc, pipe_config, old_state);
6145 }
6146
6147 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6148 {
6149         struct drm_device *dev = crtc->base.dev;
6150         struct drm_i915_private *dev_priv = to_i915(dev);
6151
6152         if (!crtc->config->gmch_pfit.control)
6153                 return;
6154
6155         assert_pipe_disabled(dev_priv, crtc->pipe);
6156
6157         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6158                          I915_READ(PFIT_CONTROL));
6159         I915_WRITE(PFIT_CONTROL, 0);
6160 }
6161
6162 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6163                               struct drm_atomic_state *old_state)
6164 {
6165         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6166         struct drm_device *dev = crtc->dev;
6167         struct drm_i915_private *dev_priv = to_i915(dev);
6168         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6169         int pipe = intel_crtc->pipe;
6170
6171         /*
6172          * On gen2 planes are double buffered but the pipe isn't, so we must
6173          * wait for planes to fully turn off before disabling the pipe.
6174          */
6175         if (IS_GEN2(dev_priv))
6176                 intel_wait_for_vblank(dev_priv, pipe);
6177
6178         intel_encoders_disable(crtc, old_crtc_state, old_state);
6179
6180         drm_crtc_vblank_off(crtc);
6181         assert_vblank_disabled(crtc);
6182
6183         intel_disable_pipe(old_crtc_state);
6184
6185         i9xx_pfit_disable(intel_crtc);
6186
6187         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6188
6189         if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
6190                 if (IS_CHERRYVIEW(dev_priv))
6191                         chv_disable_pll(dev_priv, pipe);
6192                 else if (IS_VALLEYVIEW(dev_priv))
6193                         vlv_disable_pll(dev_priv, pipe);
6194                 else
6195                         i9xx_disable_pll(intel_crtc);
6196         }
6197
6198         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6199
6200         if (!IS_GEN2(dev_priv))
6201                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6202
6203         if (!dev_priv->display.initial_watermarks)
6204                 intel_update_watermarks(intel_crtc);
6205
6206         /* clock the pipe down to 640x480@60 to potentially save power */
6207         if (IS_I830(dev_priv))
6208                 i830_enable_pipe(dev_priv, pipe);
6209 }
6210
6211 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6212                                         struct drm_modeset_acquire_ctx *ctx)
6213 {
6214         struct intel_encoder *encoder;
6215         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6216         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6217         enum intel_display_power_domain domain;
6218         struct intel_plane *plane;
6219         u64 domains;
6220         struct drm_atomic_state *state;
6221         struct intel_crtc_state *crtc_state;
6222         int ret;
6223
6224         if (!intel_crtc->active)
6225                 return;
6226
6227         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6228                 const struct intel_plane_state *plane_state =
6229                         to_intel_plane_state(plane->base.state);
6230
6231                 if (plane_state->base.visible)
6232                         intel_plane_disable_noatomic(intel_crtc, plane);
6233         }
6234
6235         state = drm_atomic_state_alloc(crtc->dev);
6236         if (!state) {
6237                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6238                               crtc->base.id, crtc->name);
6239                 return;
6240         }
6241
6242         state->acquire_ctx = ctx;
6243
6244         /* Everything's already locked, -EDEADLK can't happen. */
6245         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6246         ret = drm_atomic_add_affected_connectors(state, crtc);
6247
6248         WARN_ON(IS_ERR(crtc_state) || ret);
6249
6250         dev_priv->display.crtc_disable(crtc_state, state);
6251
6252         drm_atomic_state_put(state);
6253
6254         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6255                       crtc->base.id, crtc->name);
6256
6257         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6258         crtc->state->active = false;
6259         intel_crtc->active = false;
6260         crtc->enabled = false;
6261         crtc->state->connector_mask = 0;
6262         crtc->state->encoder_mask = 0;
6263
6264         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6265                 encoder->base.crtc = NULL;
6266
6267         intel_fbc_disable(intel_crtc);
6268         intel_update_watermarks(intel_crtc);
6269         intel_disable_shared_dpll(intel_crtc);
6270
6271         domains = intel_crtc->enabled_power_domains;
6272         for_each_power_domain(domain, domains)
6273                 intel_display_power_put(dev_priv, domain);
6274         intel_crtc->enabled_power_domains = 0;
6275
6276         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6277         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6278         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6279 }
6280
6281 /*
6282  * turn all crtc's off, but do not adjust state
6283  * This has to be paired with a call to intel_modeset_setup_hw_state.
6284  */
6285 int intel_display_suspend(struct drm_device *dev)
6286 {
6287         struct drm_i915_private *dev_priv = to_i915(dev);
6288         struct drm_atomic_state *state;
6289         int ret;
6290
6291         state = drm_atomic_helper_suspend(dev);
6292         ret = PTR_ERR_OR_ZERO(state);
6293         if (ret)
6294                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6295         else
6296                 dev_priv->modeset_restore_state = state;
6297         return ret;
6298 }
6299
6300 void intel_encoder_destroy(struct drm_encoder *encoder)
6301 {
6302         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6303
6304         drm_encoder_cleanup(encoder);
6305         kfree(intel_encoder);
6306 }
6307
6308 /* Cross check the actual hw state with our own modeset state tracking (and it's
6309  * internal consistency). */
6310 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6311                                          struct drm_connector_state *conn_state)
6312 {
6313         struct intel_connector *connector = to_intel_connector(conn_state->connector);
6314
6315         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6316                       connector->base.base.id,
6317                       connector->base.name);
6318
6319         if (connector->get_hw_state(connector)) {
6320                 struct intel_encoder *encoder = connector->encoder;
6321
6322                 I915_STATE_WARN(!crtc_state,
6323                          "connector enabled without attached crtc\n");
6324
6325                 if (!crtc_state)
6326                         return;
6327
6328                 I915_STATE_WARN(!crtc_state->active,
6329                       "connector is active, but attached crtc isn't\n");
6330
6331                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6332                         return;
6333
6334                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6335                         "atomic encoder doesn't match attached encoder\n");
6336
6337                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6338                         "attached encoder crtc differs from connector crtc\n");
6339         } else {
6340                 I915_STATE_WARN(crtc_state && crtc_state->active,
6341                         "attached crtc is active, but connector isn't\n");
6342                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6343                         "best encoder set without crtc!\n");
6344         }
6345 }
6346
6347 int intel_connector_init(struct intel_connector *connector)
6348 {
6349         struct intel_digital_connector_state *conn_state;
6350
6351         /*
6352          * Allocate enough memory to hold intel_digital_connector_state,
6353          * This might be a few bytes too many, but for connectors that don't
6354          * need it we'll free the state and allocate a smaller one on the first
6355          * succesful commit anyway.
6356          */
6357         conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
6358         if (!conn_state)
6359                 return -ENOMEM;
6360
6361         __drm_atomic_helper_connector_reset(&connector->base,
6362                                             &conn_state->base);
6363
6364         return 0;
6365 }
6366
6367 struct intel_connector *intel_connector_alloc(void)
6368 {
6369         struct intel_connector *connector;
6370
6371         connector = kzalloc(sizeof *connector, GFP_KERNEL);
6372         if (!connector)
6373                 return NULL;
6374
6375         if (intel_connector_init(connector) < 0) {
6376                 kfree(connector);
6377                 return NULL;
6378         }
6379
6380         return connector;
6381 }
6382
6383 /*
6384  * Free the bits allocated by intel_connector_alloc.
6385  * This should only be used after intel_connector_alloc has returned
6386  * successfully, and before drm_connector_init returns successfully.
6387  * Otherwise the destroy callbacks for the connector and the state should
6388  * take care of proper cleanup/free
6389  */
6390 void intel_connector_free(struct intel_connector *connector)
6391 {
6392         kfree(to_intel_digital_connector_state(connector->base.state));
6393         kfree(connector);
6394 }
6395
6396 /* Simple connector->get_hw_state implementation for encoders that support only
6397  * one connector and no cloning and hence the encoder state determines the state
6398  * of the connector. */
6399 bool intel_connector_get_hw_state(struct intel_connector *connector)
6400 {
6401         enum pipe pipe = 0;
6402         struct intel_encoder *encoder = connector->encoder;
6403
6404         return encoder->get_hw_state(encoder, &pipe);
6405 }
6406
6407 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6408 {
6409         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6410                 return crtc_state->fdi_lanes;
6411
6412         return 0;
6413 }
6414
6415 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6416                                      struct intel_crtc_state *pipe_config)
6417 {
6418         struct drm_i915_private *dev_priv = to_i915(dev);
6419         struct drm_atomic_state *state = pipe_config->base.state;
6420         struct intel_crtc *other_crtc;
6421         struct intel_crtc_state *other_crtc_state;
6422
6423         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6424                       pipe_name(pipe), pipe_config->fdi_lanes);
6425         if (pipe_config->fdi_lanes > 4) {
6426                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6427                               pipe_name(pipe), pipe_config->fdi_lanes);
6428                 return -EINVAL;
6429         }
6430
6431         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6432                 if (pipe_config->fdi_lanes > 2) {
6433                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6434                                       pipe_config->fdi_lanes);
6435                         return -EINVAL;
6436                 } else {
6437                         return 0;
6438                 }
6439         }
6440
6441         if (INTEL_INFO(dev_priv)->num_pipes == 2)
6442                 return 0;
6443
6444         /* Ivybridge 3 pipe is really complicated */
6445         switch (pipe) {
6446         case PIPE_A:
6447                 return 0;
6448         case PIPE_B:
6449                 if (pipe_config->fdi_lanes <= 2)
6450                         return 0;
6451
6452                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6453                 other_crtc_state =
6454                         intel_atomic_get_crtc_state(state, other_crtc);
6455                 if (IS_ERR(other_crtc_state))
6456                         return PTR_ERR(other_crtc_state);
6457
6458                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6459                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6460                                       pipe_name(pipe), pipe_config->fdi_lanes);
6461                         return -EINVAL;
6462                 }
6463                 return 0;
6464         case PIPE_C:
6465                 if (pipe_config->fdi_lanes > 2) {
6466                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6467                                       pipe_name(pipe), pipe_config->fdi_lanes);
6468                         return -EINVAL;
6469                 }
6470
6471                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6472                 other_crtc_state =
6473                         intel_atomic_get_crtc_state(state, other_crtc);
6474                 if (IS_ERR(other_crtc_state))
6475                         return PTR_ERR(other_crtc_state);
6476
6477                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6478                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6479                         return -EINVAL;
6480                 }
6481                 return 0;
6482         default:
6483                 BUG();
6484         }
6485 }
6486
6487 #define RETRY 1
6488 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6489                                        struct intel_crtc_state *pipe_config)
6490 {
6491         struct drm_device *dev = intel_crtc->base.dev;
6492         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6493         int lane, link_bw, fdi_dotclock, ret;
6494         bool needs_recompute = false;
6495
6496 retry:
6497         /* FDI is a binary signal running at ~2.7GHz, encoding
6498          * each output octet as 10 bits. The actual frequency
6499          * is stored as a divider into a 100MHz clock, and the
6500          * mode pixel clock is stored in units of 1KHz.
6501          * Hence the bw of each lane in terms of the mode signal
6502          * is:
6503          */
6504         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6505
6506         fdi_dotclock = adjusted_mode->crtc_clock;
6507
6508         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6509                                            pipe_config->pipe_bpp);
6510
6511         pipe_config->fdi_lanes = lane;
6512
6513         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6514                                link_bw, &pipe_config->fdi_m_n, false);
6515
6516         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6517         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6518                 pipe_config->pipe_bpp -= 2*3;
6519                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6520                               pipe_config->pipe_bpp);
6521                 needs_recompute = true;
6522                 pipe_config->bw_constrained = true;
6523
6524                 goto retry;
6525         }
6526
6527         if (needs_recompute)
6528                 return RETRY;
6529
6530         return ret;
6531 }
6532
6533 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6534 {
6535         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6536         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6537
6538         /* IPS only exists on ULT machines and is tied to pipe A. */
6539         if (!hsw_crtc_supports_ips(crtc))
6540                 return false;
6541
6542         if (!i915_modparams.enable_ips)
6543                 return false;
6544
6545         if (crtc_state->pipe_bpp > 24)
6546                 return false;
6547
6548         /*
6549          * We compare against max which means we must take
6550          * the increased cdclk requirement into account when
6551          * calculating the new cdclk.
6552          *
6553          * Should measure whether using a lower cdclk w/o IPS
6554          */
6555         if (IS_BROADWELL(dev_priv) &&
6556             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6557                 return false;
6558
6559         return true;
6560 }
6561
6562 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6563 {
6564         struct drm_i915_private *dev_priv =
6565                 to_i915(crtc_state->base.crtc->dev);
6566         struct intel_atomic_state *intel_state =
6567                 to_intel_atomic_state(crtc_state->base.state);
6568
6569         if (!hsw_crtc_state_ips_capable(crtc_state))
6570                 return false;
6571
6572         if (crtc_state->ips_force_disable)
6573                 return false;
6574
6575         /* IPS should be fine as long as at least one plane is enabled. */
6576         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6577                 return false;
6578
6579         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6580         if (IS_BROADWELL(dev_priv) &&
6581             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6582                 return false;
6583
6584         return true;
6585 }
6586
6587 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6588 {
6589         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6590
6591         /* GDG double wide on either pipe, otherwise pipe A only */
6592         return INTEL_GEN(dev_priv) < 4 &&
6593                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6594 }
6595
6596 static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6597 {
6598         uint32_t pixel_rate;
6599
6600         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6601
6602         /*
6603          * We only use IF-ID interlacing. If we ever use
6604          * PF-ID we'll need to adjust the pixel_rate here.
6605          */
6606
6607         if (pipe_config->pch_pfit.enabled) {
6608                 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
6609                 uint32_t pfit_size = pipe_config->pch_pfit.size;
6610
6611                 pipe_w = pipe_config->pipe_src_w;
6612                 pipe_h = pipe_config->pipe_src_h;
6613
6614                 pfit_w = (pfit_size >> 16) & 0xFFFF;
6615                 pfit_h = pfit_size & 0xFFFF;
6616                 if (pipe_w < pfit_w)
6617                         pipe_w = pfit_w;
6618                 if (pipe_h < pfit_h)
6619                         pipe_h = pfit_h;
6620
6621                 if (WARN_ON(!pfit_w || !pfit_h))
6622                         return pixel_rate;
6623
6624                 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
6625                                      pfit_w * pfit_h);
6626         }
6627
6628         return pixel_rate;
6629 }
6630
6631 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6632 {
6633         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6634
6635         if (HAS_GMCH_DISPLAY(dev_priv))
6636                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6637                 crtc_state->pixel_rate =
6638                         crtc_state->base.adjusted_mode.crtc_clock;
6639         else
6640                 crtc_state->pixel_rate =
6641                         ilk_pipe_pixel_rate(crtc_state);
6642 }
6643
6644 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6645                                      struct intel_crtc_state *pipe_config)
6646 {
6647         struct drm_device *dev = crtc->base.dev;
6648         struct drm_i915_private *dev_priv = to_i915(dev);
6649         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6650         int clock_limit = dev_priv->max_dotclk_freq;
6651
6652         if (INTEL_GEN(dev_priv) < 4) {
6653                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6654
6655                 /*
6656                  * Enable double wide mode when the dot clock
6657                  * is > 90% of the (display) core speed.
6658                  */
6659                 if (intel_crtc_supports_double_wide(crtc) &&
6660                     adjusted_mode->crtc_clock > clock_limit) {
6661                         clock_limit = dev_priv->max_dotclk_freq;
6662                         pipe_config->double_wide = true;
6663                 }
6664         }
6665
6666         if (adjusted_mode->crtc_clock > clock_limit) {
6667                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6668                               adjusted_mode->crtc_clock, clock_limit,
6669                               yesno(pipe_config->double_wide));
6670                 return -EINVAL;
6671         }
6672
6673         if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
6674                 /*
6675                  * There is only one pipe CSC unit per pipe, and we need that
6676                  * for output conversion from RGB->YCBCR. So if CTM is already
6677                  * applied we can't support YCBCR420 output.
6678                  */
6679                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6680                 return -EINVAL;
6681         }
6682
6683         /*
6684          * Pipe horizontal size must be even in:
6685          * - DVO ganged mode
6686          * - LVDS dual channel mode
6687          * - Double wide pipe
6688          */
6689         if (pipe_config->pipe_src_w & 1) {
6690                 if (pipe_config->double_wide) {
6691                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6692                         return -EINVAL;
6693                 }
6694
6695                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6696                     intel_is_dual_link_lvds(dev)) {
6697                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6698                         return -EINVAL;
6699                 }
6700         }
6701
6702         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6703          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6704          */
6705         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6706                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6707                 return -EINVAL;
6708
6709         intel_crtc_compute_pixel_rate(pipe_config);
6710
6711         if (pipe_config->has_pch_encoder)
6712                 return ironlake_fdi_compute_config(crtc, pipe_config);
6713
6714         return 0;
6715 }
6716
6717 static void
6718 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
6719 {
6720         while (*num > DATA_LINK_M_N_MASK ||
6721                *den > DATA_LINK_M_N_MASK) {
6722                 *num >>= 1;
6723                 *den >>= 1;
6724         }
6725 }
6726
6727 static void compute_m_n(unsigned int m, unsigned int n,
6728                         uint32_t *ret_m, uint32_t *ret_n,
6729                         bool reduce_m_n)
6730 {
6731         /*
6732          * Reduce M/N as much as possible without loss in precision. Several DP
6733          * dongles in particular seem to be fussy about too large *link* M/N
6734          * values. The passed in values are more likely to have the least
6735          * significant bits zero than M after rounding below, so do this first.
6736          */
6737         if (reduce_m_n) {
6738                 while ((m & 1) == 0 && (n & 1) == 0) {
6739                         m >>= 1;
6740                         n >>= 1;
6741                 }
6742         }
6743
6744         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6745         *ret_m = div_u64((uint64_t) m * *ret_n, n);
6746         intel_reduce_m_n_ratio(ret_m, ret_n);
6747 }
6748
6749 void
6750 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6751                        int pixel_clock, int link_clock,
6752                        struct intel_link_m_n *m_n,
6753                        bool reduce_m_n)
6754 {
6755         m_n->tu = 64;
6756
6757         compute_m_n(bits_per_pixel * pixel_clock,
6758                     link_clock * nlanes * 8,
6759                     &m_n->gmch_m, &m_n->gmch_n,
6760                     reduce_m_n);
6761
6762         compute_m_n(pixel_clock, link_clock,
6763                     &m_n->link_m, &m_n->link_n,
6764                     reduce_m_n);
6765 }
6766
6767 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6768 {
6769         if (i915_modparams.panel_use_ssc >= 0)
6770                 return i915_modparams.panel_use_ssc != 0;
6771         return dev_priv->vbt.lvds_use_ssc
6772                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6773 }
6774
6775 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
6776 {
6777         return (1 << dpll->n) << 16 | dpll->m2;
6778 }
6779
6780 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6781 {
6782         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
6783 }
6784
6785 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6786                                      struct intel_crtc_state *crtc_state,
6787                                      struct dpll *reduced_clock)
6788 {
6789         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6790         u32 fp, fp2 = 0;
6791
6792         if (IS_PINEVIEW(dev_priv)) {
6793                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
6794                 if (reduced_clock)
6795                         fp2 = pnv_dpll_compute_fp(reduced_clock);
6796         } else {
6797                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
6798                 if (reduced_clock)
6799                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
6800         }
6801
6802         crtc_state->dpll_hw_state.fp0 = fp;
6803
6804         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6805             reduced_clock) {
6806                 crtc_state->dpll_hw_state.fp1 = fp2;
6807         } else {
6808                 crtc_state->dpll_hw_state.fp1 = fp;
6809         }
6810 }
6811
6812 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6813                 pipe)
6814 {
6815         u32 reg_val;
6816
6817         /*
6818          * PLLB opamp always calibrates to max value of 0x3f, force enable it
6819          * and set it to a reasonable value instead.
6820          */
6821         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6822         reg_val &= 0xffffff00;
6823         reg_val |= 0x00000030;
6824         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6825
6826         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6827         reg_val &= 0x00ffffff;
6828         reg_val |= 0x8c000000;
6829         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6830
6831         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6832         reg_val &= 0xffffff00;
6833         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6834
6835         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6836         reg_val &= 0x00ffffff;
6837         reg_val |= 0xb0000000;
6838         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6839 }
6840
6841 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
6842                                          struct intel_link_m_n *m_n)
6843 {
6844         struct drm_device *dev = crtc->base.dev;
6845         struct drm_i915_private *dev_priv = to_i915(dev);
6846         int pipe = crtc->pipe;
6847
6848         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6849         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6850         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6851         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
6852 }
6853
6854 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
6855                                          struct intel_link_m_n *m_n,
6856                                          struct intel_link_m_n *m2_n2)
6857 {
6858         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6859         int pipe = crtc->pipe;
6860         enum transcoder transcoder = crtc->config->cpu_transcoder;
6861
6862         if (INTEL_GEN(dev_priv) >= 5) {
6863                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6864                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6865                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6866                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
6867                 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
6868                  * for gen < 8) and if DRRS is supported (to make sure the
6869                  * registers are not unnecessarily accessed).
6870                  */
6871                 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
6872                     INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
6873                         I915_WRITE(PIPE_DATA_M2(transcoder),
6874                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6875                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6876                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6877                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6878                 }
6879         } else {
6880                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6881                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6882                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6883                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
6884         }
6885 }
6886
6887 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
6888 {
6889         struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
6890
6891         if (m_n == M1_N1) {
6892                 dp_m_n = &crtc->config->dp_m_n;
6893                 dp_m2_n2 = &crtc->config->dp_m2_n2;
6894         } else if (m_n == M2_N2) {
6895
6896                 /*
6897                  * M2_N2 registers are not supported. Hence m2_n2 divider value
6898                  * needs to be programmed into M1_N1.
6899                  */
6900                 dp_m_n = &crtc->config->dp_m2_n2;
6901         } else {
6902                 DRM_ERROR("Unsupported divider value\n");
6903                 return;
6904         }
6905
6906         if (crtc->config->has_pch_encoder)
6907                 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
6908         else
6909                 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
6910 }
6911
6912 static void vlv_compute_dpll(struct intel_crtc *crtc,
6913                              struct intel_crtc_state *pipe_config)
6914 {
6915         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
6916                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6917         if (crtc->pipe != PIPE_A)
6918                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6919
6920         /* DPLL not used with DSI, but still need the rest set up */
6921         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6922                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
6923                         DPLL_EXT_BUFFER_ENABLE_VLV;
6924
6925         pipe_config->dpll_hw_state.dpll_md =
6926                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6927 }
6928
6929 static void chv_compute_dpll(struct intel_crtc *crtc,
6930                              struct intel_crtc_state *pipe_config)
6931 {
6932         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
6933                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6934         if (crtc->pipe != PIPE_A)
6935                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6936
6937         /* DPLL not used with DSI, but still need the rest set up */
6938         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6939                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
6940
6941         pipe_config->dpll_hw_state.dpll_md =
6942                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6943 }
6944
6945 static void vlv_prepare_pll(struct intel_crtc *crtc,
6946                             const struct intel_crtc_state *pipe_config)
6947 {
6948         struct drm_device *dev = crtc->base.dev;
6949         struct drm_i915_private *dev_priv = to_i915(dev);
6950         enum pipe pipe = crtc->pipe;
6951         u32 mdiv;
6952         u32 bestn, bestm1, bestm2, bestp1, bestp2;
6953         u32 coreclk, reg_val;
6954
6955         /* Enable Refclk */
6956         I915_WRITE(DPLL(pipe),
6957                    pipe_config->dpll_hw_state.dpll &
6958                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
6959
6960         /* No need to actually set up the DPLL with DSI */
6961         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6962                 return;
6963
6964         mutex_lock(&dev_priv->sb_lock);
6965
6966         bestn = pipe_config->dpll.n;
6967         bestm1 = pipe_config->dpll.m1;
6968         bestm2 = pipe_config->dpll.m2;
6969         bestp1 = pipe_config->dpll.p1;
6970         bestp2 = pipe_config->dpll.p2;
6971
6972         /* See eDP HDMI DPIO driver vbios notes doc */
6973
6974         /* PLL B needs special handling */
6975         if (pipe == PIPE_B)
6976                 vlv_pllb_recal_opamp(dev_priv, pipe);
6977
6978         /* Set up Tx target for periodic Rcomp update */
6979         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
6980
6981         /* Disable target IRef on PLL */
6982         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
6983         reg_val &= 0x00ffffff;
6984         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
6985
6986         /* Disable fast lock */
6987         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
6988
6989         /* Set idtafcrecal before PLL is enabled */
6990         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
6991         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
6992         mdiv |= ((bestn << DPIO_N_SHIFT));
6993         mdiv |= (1 << DPIO_K_SHIFT);
6994
6995         /*
6996          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
6997          * but we don't support that).
6998          * Note: don't use the DAC post divider as it seems unstable.
6999          */
7000         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7001         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7002
7003         mdiv |= DPIO_ENABLE_CALIBRATION;
7004         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7005
7006         /* Set HBR and RBR LPF coefficients */
7007         if (pipe_config->port_clock == 162000 ||
7008             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
7009             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
7010                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7011                                  0x009f0003);
7012         else
7013                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7014                                  0x00d0000f);
7015
7016         if (intel_crtc_has_dp_encoder(pipe_config)) {
7017                 /* Use SSC source */
7018                 if (pipe == PIPE_A)
7019                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7020                                          0x0df40000);
7021                 else
7022                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7023                                          0x0df70000);
7024         } else { /* HDMI or VGA */
7025                 /* Use bend source */
7026                 if (pipe == PIPE_A)
7027                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7028                                          0x0df70000);
7029                 else
7030                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7031                                          0x0df40000);
7032         }
7033
7034         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7035         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7036         if (intel_crtc_has_dp_encoder(crtc->config))
7037                 coreclk |= 0x01000000;
7038         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7039
7040         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7041         mutex_unlock(&dev_priv->sb_lock);
7042 }
7043
7044 static void chv_prepare_pll(struct intel_crtc *crtc,
7045                             const struct intel_crtc_state *pipe_config)
7046 {
7047         struct drm_device *dev = crtc->base.dev;
7048         struct drm_i915_private *dev_priv = to_i915(dev);
7049         enum pipe pipe = crtc->pipe;
7050         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7051         u32 loopfilter, tribuf_calcntr;
7052         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7053         u32 dpio_val;
7054         int vco;
7055
7056         /* Enable Refclk and SSC */
7057         I915_WRITE(DPLL(pipe),
7058                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7059
7060         /* No need to actually set up the DPLL with DSI */
7061         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7062                 return;
7063
7064         bestn = pipe_config->dpll.n;
7065         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7066         bestm1 = pipe_config->dpll.m1;
7067         bestm2 = pipe_config->dpll.m2 >> 22;
7068         bestp1 = pipe_config->dpll.p1;
7069         bestp2 = pipe_config->dpll.p2;
7070         vco = pipe_config->dpll.vco;
7071         dpio_val = 0;
7072         loopfilter = 0;
7073
7074         mutex_lock(&dev_priv->sb_lock);
7075
7076         /* p1 and p2 divider */
7077         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7078                         5 << DPIO_CHV_S1_DIV_SHIFT |
7079                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7080                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7081                         1 << DPIO_CHV_K_DIV_SHIFT);
7082
7083         /* Feedback post-divider - m2 */
7084         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7085
7086         /* Feedback refclk divider - n and m1 */
7087         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7088                         DPIO_CHV_M1_DIV_BY_2 |
7089                         1 << DPIO_CHV_N_DIV_SHIFT);
7090
7091         /* M2 fraction division */
7092         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7093
7094         /* M2 fraction division enable */
7095         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7096         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7097         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7098         if (bestm2_frac)
7099                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7100         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7101
7102         /* Program digital lock detect threshold */
7103         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7104         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7105                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7106         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7107         if (!bestm2_frac)
7108                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7109         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7110
7111         /* Loop filter */
7112         if (vco == 5400000) {
7113                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7114                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7115                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7116                 tribuf_calcntr = 0x9;
7117         } else if (vco <= 6200000) {
7118                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7119                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7120                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7121                 tribuf_calcntr = 0x9;
7122         } else if (vco <= 6480000) {
7123                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7124                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7125                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7126                 tribuf_calcntr = 0x8;
7127         } else {
7128                 /* Not supported. Apply the same limits as in the max case */
7129                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7130                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7131                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7132                 tribuf_calcntr = 0;
7133         }
7134         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7135
7136         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7137         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7138         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7139         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7140
7141         /* AFC Recal */
7142         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7143                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7144                         DPIO_AFC_RECAL);
7145
7146         mutex_unlock(&dev_priv->sb_lock);
7147 }
7148
7149 /**
7150  * vlv_force_pll_on - forcibly enable just the PLL
7151  * @dev_priv: i915 private structure
7152  * @pipe: pipe PLL to enable
7153  * @dpll: PLL configuration
7154  *
7155  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7156  * in cases where we need the PLL enabled even when @pipe is not going to
7157  * be enabled.
7158  */
7159 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7160                      const struct dpll *dpll)
7161 {
7162         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7163         struct intel_crtc_state *pipe_config;
7164
7165         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7166         if (!pipe_config)
7167                 return -ENOMEM;
7168
7169         pipe_config->base.crtc = &crtc->base;
7170         pipe_config->pixel_multiplier = 1;
7171         pipe_config->dpll = *dpll;
7172
7173         if (IS_CHERRYVIEW(dev_priv)) {
7174                 chv_compute_dpll(crtc, pipe_config);
7175                 chv_prepare_pll(crtc, pipe_config);
7176                 chv_enable_pll(crtc, pipe_config);
7177         } else {
7178                 vlv_compute_dpll(crtc, pipe_config);
7179                 vlv_prepare_pll(crtc, pipe_config);
7180                 vlv_enable_pll(crtc, pipe_config);
7181         }
7182
7183         kfree(pipe_config);
7184
7185         return 0;
7186 }
7187
7188 /**
7189  * vlv_force_pll_off - forcibly disable just the PLL
7190  * @dev_priv: i915 private structure
7191  * @pipe: pipe PLL to disable
7192  *
7193  * Disable the PLL for @pipe. To be used in cases where we need
7194  * the PLL enabled even when @pipe is not going to be enabled.
7195  */
7196 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7197 {
7198         if (IS_CHERRYVIEW(dev_priv))
7199                 chv_disable_pll(dev_priv, pipe);
7200         else
7201                 vlv_disable_pll(dev_priv, pipe);
7202 }
7203
7204 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7205                               struct intel_crtc_state *crtc_state,
7206                               struct dpll *reduced_clock)
7207 {
7208         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7209         u32 dpll;
7210         struct dpll *clock = &crtc_state->dpll;
7211
7212         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7213
7214         dpll = DPLL_VGA_MODE_DIS;
7215
7216         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7217                 dpll |= DPLLB_MODE_LVDS;
7218         else
7219                 dpll |= DPLLB_MODE_DAC_SERIAL;
7220
7221         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7222             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7223                 dpll |= (crtc_state->pixel_multiplier - 1)
7224                         << SDVO_MULTIPLIER_SHIFT_HIRES;
7225         }
7226
7227         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7228             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7229                 dpll |= DPLL_SDVO_HIGH_SPEED;
7230
7231         if (intel_crtc_has_dp_encoder(crtc_state))
7232                 dpll |= DPLL_SDVO_HIGH_SPEED;
7233
7234         /* compute bitmask from p1 value */
7235         if (IS_PINEVIEW(dev_priv))
7236                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7237         else {
7238                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7239                 if (IS_G4X(dev_priv) && reduced_clock)
7240                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7241         }
7242         switch (clock->p2) {
7243         case 5:
7244                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7245                 break;
7246         case 7:
7247                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7248                 break;
7249         case 10:
7250                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7251                 break;
7252         case 14:
7253                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7254                 break;
7255         }
7256         if (INTEL_GEN(dev_priv) >= 4)
7257                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7258
7259         if (crtc_state->sdvo_tv_clock)
7260                 dpll |= PLL_REF_INPUT_TVCLKINBC;
7261         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7262                  intel_panel_use_ssc(dev_priv))
7263                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7264         else
7265                 dpll |= PLL_REF_INPUT_DREFCLK;
7266
7267         dpll |= DPLL_VCO_ENABLE;
7268         crtc_state->dpll_hw_state.dpll = dpll;
7269
7270         if (INTEL_GEN(dev_priv) >= 4) {
7271                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7272                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7273                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7274         }
7275 }
7276
7277 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7278                               struct intel_crtc_state *crtc_state,
7279                               struct dpll *reduced_clock)
7280 {
7281         struct drm_device *dev = crtc->base.dev;
7282         struct drm_i915_private *dev_priv = to_i915(dev);
7283         u32 dpll;
7284         struct dpll *clock = &crtc_state->dpll;
7285
7286         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7287
7288         dpll = DPLL_VGA_MODE_DIS;
7289
7290         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7291                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7292         } else {
7293                 if (clock->p1 == 2)
7294                         dpll |= PLL_P1_DIVIDE_BY_TWO;
7295                 else
7296                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7297                 if (clock->p2 == 4)
7298                         dpll |= PLL_P2_DIVIDE_BY_4;
7299         }
7300
7301         if (!IS_I830(dev_priv) &&
7302             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7303                 dpll |= DPLL_DVO_2X_MODE;
7304
7305         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7306             intel_panel_use_ssc(dev_priv))
7307                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7308         else
7309                 dpll |= PLL_REF_INPUT_DREFCLK;
7310
7311         dpll |= DPLL_VCO_ENABLE;
7312         crtc_state->dpll_hw_state.dpll = dpll;
7313 }
7314
7315 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7316 {
7317         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
7318         enum pipe pipe = intel_crtc->pipe;
7319         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7320         const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7321         uint32_t crtc_vtotal, crtc_vblank_end;
7322         int vsyncshift = 0;
7323
7324         /* We need to be careful not to changed the adjusted mode, for otherwise
7325          * the hw state checker will get angry at the mismatch. */
7326         crtc_vtotal = adjusted_mode->crtc_vtotal;
7327         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7328
7329         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7330                 /* the chip adds 2 halflines automatically */
7331                 crtc_vtotal -= 1;
7332                 crtc_vblank_end -= 1;
7333
7334                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7335                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7336                 else
7337                         vsyncshift = adjusted_mode->crtc_hsync_start -
7338                                 adjusted_mode->crtc_htotal / 2;
7339                 if (vsyncshift < 0)
7340                         vsyncshift += adjusted_mode->crtc_htotal;
7341         }
7342
7343         if (INTEL_GEN(dev_priv) > 3)
7344                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7345
7346         I915_WRITE(HTOTAL(cpu_transcoder),
7347                    (adjusted_mode->crtc_hdisplay - 1) |
7348                    ((adjusted_mode->crtc_htotal - 1) << 16));
7349         I915_WRITE(HBLANK(cpu_transcoder),
7350                    (adjusted_mode->crtc_hblank_start - 1) |
7351                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
7352         I915_WRITE(HSYNC(cpu_transcoder),
7353                    (adjusted_mode->crtc_hsync_start - 1) |
7354                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
7355
7356         I915_WRITE(VTOTAL(cpu_transcoder),
7357                    (adjusted_mode->crtc_vdisplay - 1) |
7358                    ((crtc_vtotal - 1) << 16));
7359         I915_WRITE(VBLANK(cpu_transcoder),
7360                    (adjusted_mode->crtc_vblank_start - 1) |
7361                    ((crtc_vblank_end - 1) << 16));
7362         I915_WRITE(VSYNC(cpu_transcoder),
7363                    (adjusted_mode->crtc_vsync_start - 1) |
7364                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
7365
7366         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7367          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7368          * documented on the DDI_FUNC_CTL register description, EDP Input Select
7369          * bits. */
7370         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7371             (pipe == PIPE_B || pipe == PIPE_C))
7372                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7373
7374 }
7375
7376 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7377 {
7378         struct drm_device *dev = intel_crtc->base.dev;
7379         struct drm_i915_private *dev_priv = to_i915(dev);
7380         enum pipe pipe = intel_crtc->pipe;
7381
7382         /* pipesrc controls the size that is scaled from, which should
7383          * always be the user's requested size.
7384          */
7385         I915_WRITE(PIPESRC(pipe),
7386                    ((intel_crtc->config->pipe_src_w - 1) << 16) |
7387                    (intel_crtc->config->pipe_src_h - 1));
7388 }
7389
7390 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7391                                    struct intel_crtc_state *pipe_config)
7392 {
7393         struct drm_device *dev = crtc->base.dev;
7394         struct drm_i915_private *dev_priv = to_i915(dev);
7395         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7396         uint32_t tmp;
7397
7398         tmp = I915_READ(HTOTAL(cpu_transcoder));
7399         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7400         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7401         tmp = I915_READ(HBLANK(cpu_transcoder));
7402         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7403         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7404         tmp = I915_READ(HSYNC(cpu_transcoder));
7405         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7406         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7407
7408         tmp = I915_READ(VTOTAL(cpu_transcoder));
7409         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7410         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7411         tmp = I915_READ(VBLANK(cpu_transcoder));
7412         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7413         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7414         tmp = I915_READ(VSYNC(cpu_transcoder));
7415         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7416         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7417
7418         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7419                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7420                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7421                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7422         }
7423 }
7424
7425 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7426                                     struct intel_crtc_state *pipe_config)
7427 {
7428         struct drm_device *dev = crtc->base.dev;
7429         struct drm_i915_private *dev_priv = to_i915(dev);
7430         u32 tmp;
7431
7432         tmp = I915_READ(PIPESRC(crtc->pipe));
7433         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7434         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7435
7436         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7437         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7438 }
7439
7440 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7441                                  struct intel_crtc_state *pipe_config)
7442 {
7443         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7444         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7445         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7446         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7447
7448         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7449         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7450         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7451         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7452
7453         mode->flags = pipe_config->base.adjusted_mode.flags;
7454         mode->type = DRM_MODE_TYPE_DRIVER;
7455
7456         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7457
7458         mode->hsync = drm_mode_hsync(mode);
7459         mode->vrefresh = drm_mode_vrefresh(mode);
7460         drm_mode_set_name(mode);
7461 }
7462
7463 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7464 {
7465         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
7466         uint32_t pipeconf;
7467
7468         pipeconf = 0;
7469
7470         /* we keep both pipes enabled on 830 */
7471         if (IS_I830(dev_priv))
7472                 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7473
7474         if (intel_crtc->config->double_wide)
7475                 pipeconf |= PIPECONF_DOUBLE_WIDE;
7476
7477         /* only g4x and later have fancy bpc/dither controls */
7478         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7479             IS_CHERRYVIEW(dev_priv)) {
7480                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7481                 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7482                         pipeconf |= PIPECONF_DITHER_EN |
7483                                     PIPECONF_DITHER_TYPE_SP;
7484
7485                 switch (intel_crtc->config->pipe_bpp) {
7486                 case 18:
7487                         pipeconf |= PIPECONF_6BPC;
7488                         break;
7489                 case 24:
7490                         pipeconf |= PIPECONF_8BPC;
7491                         break;
7492                 case 30:
7493                         pipeconf |= PIPECONF_10BPC;
7494                         break;
7495                 default:
7496                         /* Case prevented by intel_choose_pipe_bpp_dither. */
7497                         BUG();
7498                 }
7499         }
7500
7501         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7502                 if (INTEL_GEN(dev_priv) < 4 ||
7503                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7504                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7505                 else
7506                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7507         } else
7508                 pipeconf |= PIPECONF_PROGRESSIVE;
7509
7510         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7511              intel_crtc->config->limited_color_range)
7512                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7513
7514         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7515         POSTING_READ(PIPECONF(intel_crtc->pipe));
7516 }
7517
7518 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7519                                    struct intel_crtc_state *crtc_state)
7520 {
7521         struct drm_device *dev = crtc->base.dev;
7522         struct drm_i915_private *dev_priv = to_i915(dev);
7523         const struct intel_limit *limit;
7524         int refclk = 48000;
7525
7526         memset(&crtc_state->dpll_hw_state, 0,
7527                sizeof(crtc_state->dpll_hw_state));
7528
7529         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7530                 if (intel_panel_use_ssc(dev_priv)) {
7531                         refclk = dev_priv->vbt.lvds_ssc_freq;
7532                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7533                 }
7534
7535                 limit = &intel_limits_i8xx_lvds;
7536         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7537                 limit = &intel_limits_i8xx_dvo;
7538         } else {
7539                 limit = &intel_limits_i8xx_dac;
7540         }
7541
7542         if (!crtc_state->clock_set &&
7543             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7544                                  refclk, NULL, &crtc_state->dpll)) {
7545                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7546                 return -EINVAL;
7547         }
7548
7549         i8xx_compute_dpll(crtc, crtc_state, NULL);
7550
7551         return 0;
7552 }
7553
7554 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7555                                   struct intel_crtc_state *crtc_state)
7556 {
7557         struct drm_device *dev = crtc->base.dev;
7558         struct drm_i915_private *dev_priv = to_i915(dev);
7559         const struct intel_limit *limit;
7560         int refclk = 96000;
7561
7562         memset(&crtc_state->dpll_hw_state, 0,
7563                sizeof(crtc_state->dpll_hw_state));
7564
7565         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7566                 if (intel_panel_use_ssc(dev_priv)) {
7567                         refclk = dev_priv->vbt.lvds_ssc_freq;
7568                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7569                 }
7570
7571                 if (intel_is_dual_link_lvds(dev))
7572                         limit = &intel_limits_g4x_dual_channel_lvds;
7573                 else
7574                         limit = &intel_limits_g4x_single_channel_lvds;
7575         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7576                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7577                 limit = &intel_limits_g4x_hdmi;
7578         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7579                 limit = &intel_limits_g4x_sdvo;
7580         } else {
7581                 /* The option is for other outputs */
7582                 limit = &intel_limits_i9xx_sdvo;
7583         }
7584
7585         if (!crtc_state->clock_set &&
7586             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7587                                 refclk, NULL, &crtc_state->dpll)) {
7588                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7589                 return -EINVAL;
7590         }
7591
7592         i9xx_compute_dpll(crtc, crtc_state, NULL);
7593
7594         return 0;
7595 }
7596
7597 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7598                                   struct intel_crtc_state *crtc_state)
7599 {
7600         struct drm_device *dev = crtc->base.dev;
7601         struct drm_i915_private *dev_priv = to_i915(dev);
7602         const struct intel_limit *limit;
7603         int refclk = 96000;
7604
7605         memset(&crtc_state->dpll_hw_state, 0,
7606                sizeof(crtc_state->dpll_hw_state));
7607
7608         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7609                 if (intel_panel_use_ssc(dev_priv)) {
7610                         refclk = dev_priv->vbt.lvds_ssc_freq;
7611                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7612                 }
7613
7614                 limit = &intel_limits_pineview_lvds;
7615         } else {
7616                 limit = &intel_limits_pineview_sdvo;
7617         }
7618
7619         if (!crtc_state->clock_set &&
7620             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7621                                 refclk, NULL, &crtc_state->dpll)) {
7622                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7623                 return -EINVAL;
7624         }
7625
7626         i9xx_compute_dpll(crtc, crtc_state, NULL);
7627
7628         return 0;
7629 }
7630
7631 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7632                                    struct intel_crtc_state *crtc_state)
7633 {
7634         struct drm_device *dev = crtc->base.dev;
7635         struct drm_i915_private *dev_priv = to_i915(dev);
7636         const struct intel_limit *limit;
7637         int refclk = 96000;
7638
7639         memset(&crtc_state->dpll_hw_state, 0,
7640                sizeof(crtc_state->dpll_hw_state));
7641
7642         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7643                 if (intel_panel_use_ssc(dev_priv)) {
7644                         refclk = dev_priv->vbt.lvds_ssc_freq;
7645                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7646                 }
7647
7648                 limit = &intel_limits_i9xx_lvds;
7649         } else {
7650                 limit = &intel_limits_i9xx_sdvo;
7651         }
7652
7653         if (!crtc_state->clock_set &&
7654             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7655                                  refclk, NULL, &crtc_state->dpll)) {
7656                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7657                 return -EINVAL;
7658         }
7659
7660         i9xx_compute_dpll(crtc, crtc_state, NULL);
7661
7662         return 0;
7663 }
7664
7665 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7666                                   struct intel_crtc_state *crtc_state)
7667 {
7668         int refclk = 100000;
7669         const struct intel_limit *limit = &intel_limits_chv;
7670
7671         memset(&crtc_state->dpll_hw_state, 0,
7672                sizeof(crtc_state->dpll_hw_state));
7673
7674         if (!crtc_state->clock_set &&
7675             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7676                                 refclk, NULL, &crtc_state->dpll)) {
7677                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7678                 return -EINVAL;
7679         }
7680
7681         chv_compute_dpll(crtc, crtc_state);
7682
7683         return 0;
7684 }
7685
7686 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7687                                   struct intel_crtc_state *crtc_state)
7688 {
7689         int refclk = 100000;
7690         const struct intel_limit *limit = &intel_limits_vlv;
7691
7692         memset(&crtc_state->dpll_hw_state, 0,
7693                sizeof(crtc_state->dpll_hw_state));
7694
7695         if (!crtc_state->clock_set &&
7696             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7697                                 refclk, NULL, &crtc_state->dpll)) {
7698                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7699                 return -EINVAL;
7700         }
7701
7702         vlv_compute_dpll(crtc, crtc_state);
7703
7704         return 0;
7705 }
7706
7707 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7708                                  struct intel_crtc_state *pipe_config)
7709 {
7710         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7711         uint32_t tmp;
7712
7713         if (INTEL_GEN(dev_priv) <= 3 &&
7714             (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
7715                 return;
7716
7717         tmp = I915_READ(PFIT_CONTROL);
7718         if (!(tmp & PFIT_ENABLE))
7719                 return;
7720
7721         /* Check whether the pfit is attached to our pipe. */
7722         if (INTEL_GEN(dev_priv) < 4) {
7723                 if (crtc->pipe != PIPE_B)
7724                         return;
7725         } else {
7726                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7727                         return;
7728         }
7729
7730         pipe_config->gmch_pfit.control = tmp;
7731         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7732 }
7733
7734 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7735                                struct intel_crtc_state *pipe_config)
7736 {
7737         struct drm_device *dev = crtc->base.dev;
7738         struct drm_i915_private *dev_priv = to_i915(dev);
7739         int pipe = pipe_config->cpu_transcoder;
7740         struct dpll clock;
7741         u32 mdiv;
7742         int refclk = 100000;
7743
7744         /* In case of DSI, DPLL will not be used */
7745         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7746                 return;
7747
7748         mutex_lock(&dev_priv->sb_lock);
7749         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
7750         mutex_unlock(&dev_priv->sb_lock);
7751
7752         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7753         clock.m2 = mdiv & DPIO_M2DIV_MASK;
7754         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7755         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7756         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7757
7758         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
7759 }
7760
7761 static void
7762 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7763                               struct intel_initial_plane_config *plane_config)
7764 {
7765         struct drm_device *dev = crtc->base.dev;
7766         struct drm_i915_private *dev_priv = to_i915(dev);
7767         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
7768         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
7769         enum pipe pipe;
7770         u32 val, base, offset;
7771         int fourcc, pixel_format;
7772         unsigned int aligned_height;
7773         struct drm_framebuffer *fb;
7774         struct intel_framebuffer *intel_fb;
7775
7776         if (!plane->get_hw_state(plane, &pipe))
7777                 return;
7778
7779         WARN_ON(pipe != crtc->pipe);
7780
7781         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7782         if (!intel_fb) {
7783                 DRM_DEBUG_KMS("failed to alloc fb\n");
7784                 return;
7785         }
7786
7787         fb = &intel_fb->base;
7788
7789         fb->dev = dev;
7790
7791         val = I915_READ(DSPCNTR(i9xx_plane));
7792
7793         if (INTEL_GEN(dev_priv) >= 4) {
7794                 if (val & DISPPLANE_TILED) {
7795                         plane_config->tiling = I915_TILING_X;
7796                         fb->modifier = I915_FORMAT_MOD_X_TILED;
7797                 }
7798         }
7799
7800         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7801         fourcc = i9xx_format_to_fourcc(pixel_format);
7802         fb->format = drm_format_info(fourcc);
7803
7804         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7805                 offset = I915_READ(DSPOFFSET(i9xx_plane));
7806                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7807         } else if (INTEL_GEN(dev_priv) >= 4) {
7808                 if (plane_config->tiling)
7809                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
7810                 else
7811                         offset = I915_READ(DSPLINOFF(i9xx_plane));
7812                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7813         } else {
7814                 base = I915_READ(DSPADDR(i9xx_plane));
7815         }
7816         plane_config->base = base;
7817
7818         val = I915_READ(PIPESRC(pipe));
7819         fb->width = ((val >> 16) & 0xfff) + 1;
7820         fb->height = ((val >> 0) & 0xfff) + 1;
7821
7822         val = I915_READ(DSPSTRIDE(i9xx_plane));
7823         fb->pitches[0] = val & 0xffffffc0;
7824
7825         aligned_height = intel_fb_align_height(fb, 0, fb->height);
7826
7827         plane_config->size = fb->pitches[0] * aligned_height;
7828
7829         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7830                       crtc->base.name, plane->base.name, fb->width, fb->height,
7831                       fb->format->cpp[0] * 8, base, fb->pitches[0],
7832                       plane_config->size);
7833
7834         plane_config->fb = intel_fb;
7835 }
7836
7837 static void chv_crtc_clock_get(struct intel_crtc *crtc,
7838                                struct intel_crtc_state *pipe_config)
7839 {
7840         struct drm_device *dev = crtc->base.dev;
7841         struct drm_i915_private *dev_priv = to_i915(dev);
7842         int pipe = pipe_config->cpu_transcoder;
7843         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7844         struct dpll clock;
7845         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
7846         int refclk = 100000;
7847
7848         /* In case of DSI, DPLL will not be used */
7849         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7850                 return;
7851
7852         mutex_lock(&dev_priv->sb_lock);
7853         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7854         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7855         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7856         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7857         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7858         mutex_unlock(&dev_priv->sb_lock);
7859
7860         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7861         clock.m2 = (pll_dw0 & 0xff) << 22;
7862         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7863                 clock.m2 |= pll_dw2 & 0x3fffff;
7864         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7865         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7866         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7867
7868         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
7869 }
7870
7871 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7872                                  struct intel_crtc_state *pipe_config)
7873 {
7874         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7875         enum intel_display_power_domain power_domain;
7876         uint32_t tmp;
7877         bool ret;
7878
7879         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7880         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
7881                 return false;
7882
7883         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7884         pipe_config->shared_dpll = NULL;
7885
7886         ret = false;
7887
7888         tmp = I915_READ(PIPECONF(crtc->pipe));
7889         if (!(tmp & PIPECONF_ENABLE))
7890                 goto out;
7891
7892         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7893             IS_CHERRYVIEW(dev_priv)) {
7894                 switch (tmp & PIPECONF_BPC_MASK) {
7895                 case PIPECONF_6BPC:
7896                         pipe_config->pipe_bpp = 18;
7897                         break;
7898                 case PIPECONF_8BPC:
7899                         pipe_config->pipe_bpp = 24;
7900                         break;
7901                 case PIPECONF_10BPC:
7902                         pipe_config->pipe_bpp = 30;
7903                         break;
7904                 default:
7905                         break;
7906                 }
7907         }
7908
7909         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7910             (tmp & PIPECONF_COLOR_RANGE_SELECT))
7911                 pipe_config->limited_color_range = true;
7912
7913         if (INTEL_GEN(dev_priv) < 4)
7914                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
7915
7916         intel_get_pipe_timings(crtc, pipe_config);
7917         intel_get_pipe_src_size(crtc, pipe_config);
7918
7919         i9xx_get_pfit_config(crtc, pipe_config);
7920
7921         if (INTEL_GEN(dev_priv) >= 4) {
7922                 /* No way to read it out on pipes B and C */
7923                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
7924                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
7925                 else
7926                         tmp = I915_READ(DPLL_MD(crtc->pipe));
7927                 pipe_config->pixel_multiplier =
7928                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
7929                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
7930                 pipe_config->dpll_hw_state.dpll_md = tmp;
7931         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7932                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7933                 tmp = I915_READ(DPLL(crtc->pipe));
7934                 pipe_config->pixel_multiplier =
7935                         ((tmp & SDVO_MULTIPLIER_MASK)
7936                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
7937         } else {
7938                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
7939                  * port and will be fixed up in the encoder->get_config
7940                  * function. */
7941                 pipe_config->pixel_multiplier = 1;
7942         }
7943         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
7944         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
7945                 /*
7946                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
7947                  * on 830. Filter it out here so that we don't
7948                  * report errors due to that.
7949                  */
7950                 if (IS_I830(dev_priv))
7951                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
7952
7953                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
7954                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
7955         } else {
7956                 /* Mask out read-only status bits. */
7957                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
7958                                                      DPLL_PORTC_READY_MASK |
7959                                                      DPLL_PORTB_READY_MASK);
7960         }
7961
7962         if (IS_CHERRYVIEW(dev_priv))
7963                 chv_crtc_clock_get(crtc, pipe_config);
7964         else if (IS_VALLEYVIEW(dev_priv))
7965                 vlv_crtc_clock_get(crtc, pipe_config);
7966         else
7967                 i9xx_crtc_clock_get(crtc, pipe_config);
7968
7969         /*
7970          * Normally the dotclock is filled in by the encoder .get_config()
7971          * but in case the pipe is enabled w/o any ports we need a sane
7972          * default.
7973          */
7974         pipe_config->base.adjusted_mode.crtc_clock =
7975                 pipe_config->port_clock / pipe_config->pixel_multiplier;
7976
7977         ret = true;
7978
7979 out:
7980         intel_display_power_put(dev_priv, power_domain);
7981
7982         return ret;
7983 }
7984
7985 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
7986 {
7987         struct intel_encoder *encoder;
7988         int i;
7989         u32 val, final;
7990         bool has_lvds = false;
7991         bool has_cpu_edp = false;
7992         bool has_panel = false;
7993         bool has_ck505 = false;
7994         bool can_ssc = false;
7995         bool using_ssc_source = false;
7996
7997         /* We need to take the global config into account */
7998         for_each_intel_encoder(&dev_priv->drm, encoder) {
7999                 switch (encoder->type) {
8000                 case INTEL_OUTPUT_LVDS:
8001                         has_panel = true;
8002                         has_lvds = true;
8003                         break;
8004                 case INTEL_OUTPUT_EDP:
8005                         has_panel = true;
8006                         if (encoder->port == PORT_A)
8007                                 has_cpu_edp = true;
8008                         break;
8009                 default:
8010                         break;
8011                 }
8012         }
8013
8014         if (HAS_PCH_IBX(dev_priv)) {
8015                 has_ck505 = dev_priv->vbt.display_clock_mode;
8016                 can_ssc = has_ck505;
8017         } else {
8018                 has_ck505 = false;
8019                 can_ssc = true;
8020         }
8021
8022         /* Check if any DPLLs are using the SSC source */
8023         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8024                 u32 temp = I915_READ(PCH_DPLL(i));
8025
8026                 if (!(temp & DPLL_VCO_ENABLE))
8027                         continue;
8028
8029                 if ((temp & PLL_REF_INPUT_MASK) ==
8030                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8031                         using_ssc_source = true;
8032                         break;
8033                 }
8034         }
8035
8036         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8037                       has_panel, has_lvds, has_ck505, using_ssc_source);
8038
8039         /* Ironlake: try to setup display ref clock before DPLL
8040          * enabling. This is only under driver's control after
8041          * PCH B stepping, previous chipset stepping should be
8042          * ignoring this setting.
8043          */
8044         val = I915_READ(PCH_DREF_CONTROL);
8045
8046         /* As we must carefully and slowly disable/enable each source in turn,
8047          * compute the final state we want first and check if we need to
8048          * make any changes at all.
8049          */
8050         final = val;
8051         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8052         if (has_ck505)
8053                 final |= DREF_NONSPREAD_CK505_ENABLE;
8054         else
8055                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8056
8057         final &= ~DREF_SSC_SOURCE_MASK;
8058         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8059         final &= ~DREF_SSC1_ENABLE;
8060
8061         if (has_panel) {
8062                 final |= DREF_SSC_SOURCE_ENABLE;
8063
8064                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8065                         final |= DREF_SSC1_ENABLE;
8066
8067                 if (has_cpu_edp) {
8068                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8069                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8070                         else
8071                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8072                 } else
8073                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8074         } else if (using_ssc_source) {
8075                 final |= DREF_SSC_SOURCE_ENABLE;
8076                 final |= DREF_SSC1_ENABLE;
8077         }
8078
8079         if (final == val)
8080                 return;
8081
8082         /* Always enable nonspread source */
8083         val &= ~DREF_NONSPREAD_SOURCE_MASK;
8084
8085         if (has_ck505)
8086                 val |= DREF_NONSPREAD_CK505_ENABLE;
8087         else
8088                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8089
8090         if (has_panel) {
8091                 val &= ~DREF_SSC_SOURCE_MASK;
8092                 val |= DREF_SSC_SOURCE_ENABLE;
8093
8094                 /* SSC must be turned on before enabling the CPU output  */
8095                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8096                         DRM_DEBUG_KMS("Using SSC on panel\n");
8097                         val |= DREF_SSC1_ENABLE;
8098                 } else
8099                         val &= ~DREF_SSC1_ENABLE;
8100
8101                 /* Get SSC going before enabling the outputs */
8102                 I915_WRITE(PCH_DREF_CONTROL, val);
8103                 POSTING_READ(PCH_DREF_CONTROL);
8104                 udelay(200);
8105
8106                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8107
8108                 /* Enable CPU source on CPU attached eDP */
8109                 if (has_cpu_edp) {
8110                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8111                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
8112                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8113                         } else
8114                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8115                 } else
8116                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8117
8118                 I915_WRITE(PCH_DREF_CONTROL, val);
8119                 POSTING_READ(PCH_DREF_CONTROL);
8120                 udelay(200);
8121         } else {
8122                 DRM_DEBUG_KMS("Disabling CPU source output\n");
8123
8124                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8125
8126                 /* Turn off CPU output */
8127                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8128
8129                 I915_WRITE(PCH_DREF_CONTROL, val);
8130                 POSTING_READ(PCH_DREF_CONTROL);
8131                 udelay(200);
8132
8133                 if (!using_ssc_source) {
8134                         DRM_DEBUG_KMS("Disabling SSC source\n");
8135
8136                         /* Turn off the SSC source */
8137                         val &= ~DREF_SSC_SOURCE_MASK;
8138                         val |= DREF_SSC_SOURCE_DISABLE;
8139
8140                         /* Turn off SSC1 */
8141                         val &= ~DREF_SSC1_ENABLE;
8142
8143                         I915_WRITE(PCH_DREF_CONTROL, val);
8144                         POSTING_READ(PCH_DREF_CONTROL);
8145                         udelay(200);
8146                 }
8147         }
8148
8149         BUG_ON(val != final);
8150 }
8151
8152 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8153 {
8154         uint32_t tmp;
8155
8156         tmp = I915_READ(SOUTH_CHICKEN2);
8157         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8158         I915_WRITE(SOUTH_CHICKEN2, tmp);
8159
8160         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8161                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8162                 DRM_ERROR("FDI mPHY reset assert timeout\n");
8163
8164         tmp = I915_READ(SOUTH_CHICKEN2);
8165         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8166         I915_WRITE(SOUTH_CHICKEN2, tmp);
8167
8168         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8169                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8170                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8171 }
8172
8173 /* WaMPhyProgramming:hsw */
8174 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8175 {
8176         uint32_t tmp;
8177
8178         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8179         tmp &= ~(0xFF << 24);
8180         tmp |= (0x12 << 24);
8181         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8182
8183         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8184         tmp |= (1 << 11);
8185         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8186
8187         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8188         tmp |= (1 << 11);
8189         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8190
8191         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8192         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8193         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8194
8195         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8196         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8197         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8198
8199         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8200         tmp &= ~(7 << 13);
8201         tmp |= (5 << 13);
8202         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8203
8204         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8205         tmp &= ~(7 << 13);
8206         tmp |= (5 << 13);
8207         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8208
8209         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8210         tmp &= ~0xFF;
8211         tmp |= 0x1C;
8212         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8213
8214         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8215         tmp &= ~0xFF;
8216         tmp |= 0x1C;
8217         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8218
8219         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8220         tmp &= ~(0xFF << 16);
8221         tmp |= (0x1C << 16);
8222         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8223
8224         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8225         tmp &= ~(0xFF << 16);
8226         tmp |= (0x1C << 16);
8227         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8228
8229         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8230         tmp |= (1 << 27);
8231         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8232
8233         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8234         tmp |= (1 << 27);
8235         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8236
8237         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8238         tmp &= ~(0xF << 28);
8239         tmp |= (4 << 28);
8240         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8241
8242         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8243         tmp &= ~(0xF << 28);
8244         tmp |= (4 << 28);
8245         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8246 }
8247
8248 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8249  * Programming" based on the parameters passed:
8250  * - Sequence to enable CLKOUT_DP
8251  * - Sequence to enable CLKOUT_DP without spread
8252  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8253  */
8254 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8255                                  bool with_spread, bool with_fdi)
8256 {
8257         uint32_t reg, tmp;
8258
8259         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8260                 with_spread = true;
8261         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8262             with_fdi, "LP PCH doesn't have FDI\n"))
8263                 with_fdi = false;
8264
8265         mutex_lock(&dev_priv->sb_lock);
8266
8267         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8268         tmp &= ~SBI_SSCCTL_DISABLE;
8269         tmp |= SBI_SSCCTL_PATHALT;
8270         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8271
8272         udelay(24);
8273
8274         if (with_spread) {
8275                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8276                 tmp &= ~SBI_SSCCTL_PATHALT;
8277                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8278
8279                 if (with_fdi) {
8280                         lpt_reset_fdi_mphy(dev_priv);
8281                         lpt_program_fdi_mphy(dev_priv);
8282                 }
8283         }
8284
8285         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8286         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8287         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8288         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8289
8290         mutex_unlock(&dev_priv->sb_lock);
8291 }
8292
8293 /* Sequence to disable CLKOUT_DP */
8294 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8295 {
8296         uint32_t reg, tmp;
8297
8298         mutex_lock(&dev_priv->sb_lock);
8299
8300         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8301         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8302         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8303         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8304
8305         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8306         if (!(tmp & SBI_SSCCTL_DISABLE)) {
8307                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8308                         tmp |= SBI_SSCCTL_PATHALT;
8309                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8310                         udelay(32);
8311                 }
8312                 tmp |= SBI_SSCCTL_DISABLE;
8313                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8314         }
8315
8316         mutex_unlock(&dev_priv->sb_lock);
8317 }
8318
8319 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8320
8321 static const uint16_t sscdivintphase[] = {
8322         [BEND_IDX( 50)] = 0x3B23,
8323         [BEND_IDX( 45)] = 0x3B23,
8324         [BEND_IDX( 40)] = 0x3C23,
8325         [BEND_IDX( 35)] = 0x3C23,
8326         [BEND_IDX( 30)] = 0x3D23,
8327         [BEND_IDX( 25)] = 0x3D23,
8328         [BEND_IDX( 20)] = 0x3E23,
8329         [BEND_IDX( 15)] = 0x3E23,
8330         [BEND_IDX( 10)] = 0x3F23,
8331         [BEND_IDX(  5)] = 0x3F23,
8332         [BEND_IDX(  0)] = 0x0025,
8333         [BEND_IDX( -5)] = 0x0025,
8334         [BEND_IDX(-10)] = 0x0125,
8335         [BEND_IDX(-15)] = 0x0125,
8336         [BEND_IDX(-20)] = 0x0225,
8337         [BEND_IDX(-25)] = 0x0225,
8338         [BEND_IDX(-30)] = 0x0325,
8339         [BEND_IDX(-35)] = 0x0325,
8340         [BEND_IDX(-40)] = 0x0425,
8341         [BEND_IDX(-45)] = 0x0425,
8342         [BEND_IDX(-50)] = 0x0525,
8343 };
8344
8345 /*
8346  * Bend CLKOUT_DP
8347  * steps -50 to 50 inclusive, in steps of 5
8348  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8349  * change in clock period = -(steps / 10) * 5.787 ps
8350  */
8351 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8352 {
8353         uint32_t tmp;
8354         int idx = BEND_IDX(steps);
8355
8356         if (WARN_ON(steps % 5 != 0))
8357                 return;
8358
8359         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8360                 return;
8361
8362         mutex_lock(&dev_priv->sb_lock);
8363
8364         if (steps % 10 != 0)
8365                 tmp = 0xAAAAAAAB;
8366         else
8367                 tmp = 0x00000000;
8368         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8369
8370         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8371         tmp &= 0xffff0000;
8372         tmp |= sscdivintphase[idx];
8373         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8374
8375         mutex_unlock(&dev_priv->sb_lock);
8376 }
8377
8378 #undef BEND_IDX
8379
8380 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8381 {
8382         struct intel_encoder *encoder;
8383         bool has_vga = false;
8384
8385         for_each_intel_encoder(&dev_priv->drm, encoder) {
8386                 switch (encoder->type) {
8387                 case INTEL_OUTPUT_ANALOG:
8388                         has_vga = true;
8389                         break;
8390                 default:
8391                         break;
8392                 }
8393         }
8394
8395         if (has_vga) {
8396                 lpt_bend_clkout_dp(dev_priv, 0);
8397                 lpt_enable_clkout_dp(dev_priv, true, true);
8398         } else {
8399                 lpt_disable_clkout_dp(dev_priv);
8400         }
8401 }
8402
8403 /*
8404  * Initialize reference clocks when the driver loads
8405  */
8406 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8407 {
8408         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8409                 ironlake_init_pch_refclk(dev_priv);
8410         else if (HAS_PCH_LPT(dev_priv))
8411                 lpt_init_pch_refclk(dev_priv);
8412 }
8413
8414 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8415 {
8416         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8417         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8418         int pipe = intel_crtc->pipe;
8419         uint32_t val;
8420
8421         val = 0;
8422
8423         switch (intel_crtc->config->pipe_bpp) {
8424         case 18:
8425                 val |= PIPECONF_6BPC;
8426                 break;
8427         case 24:
8428                 val |= PIPECONF_8BPC;
8429                 break;
8430         case 30:
8431                 val |= PIPECONF_10BPC;
8432                 break;
8433         case 36:
8434                 val |= PIPECONF_12BPC;
8435                 break;
8436         default:
8437                 /* Case prevented by intel_choose_pipe_bpp_dither. */
8438                 BUG();
8439         }
8440
8441         if (intel_crtc->config->dither)
8442                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8443
8444         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8445                 val |= PIPECONF_INTERLACED_ILK;
8446         else
8447                 val |= PIPECONF_PROGRESSIVE;
8448
8449         if (intel_crtc->config->limited_color_range)
8450                 val |= PIPECONF_COLOR_RANGE_SELECT;
8451
8452         I915_WRITE(PIPECONF(pipe), val);
8453         POSTING_READ(PIPECONF(pipe));
8454 }
8455
8456 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8457 {
8458         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8459         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8460         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8461         u32 val = 0;
8462
8463         if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8464                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8465
8466         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8467                 val |= PIPECONF_INTERLACED_ILK;
8468         else
8469                 val |= PIPECONF_PROGRESSIVE;
8470
8471         I915_WRITE(PIPECONF(cpu_transcoder), val);
8472         POSTING_READ(PIPECONF(cpu_transcoder));
8473 }
8474
8475 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8476 {
8477         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8478         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8479         struct intel_crtc_state *config = intel_crtc->config;
8480
8481         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8482                 u32 val = 0;
8483
8484                 switch (intel_crtc->config->pipe_bpp) {
8485                 case 18:
8486                         val |= PIPEMISC_DITHER_6_BPC;
8487                         break;
8488                 case 24:
8489                         val |= PIPEMISC_DITHER_8_BPC;
8490                         break;
8491                 case 30:
8492                         val |= PIPEMISC_DITHER_10_BPC;
8493                         break;
8494                 case 36:
8495                         val |= PIPEMISC_DITHER_12_BPC;
8496                         break;
8497                 default:
8498                         /* Case prevented by pipe_config_set_bpp. */
8499                         BUG();
8500                 }
8501
8502                 if (intel_crtc->config->dither)
8503                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8504
8505                 if (config->ycbcr420) {
8506                         val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
8507                                 PIPEMISC_YUV420_ENABLE |
8508                                 PIPEMISC_YUV420_MODE_FULL_BLEND;
8509                 }
8510
8511                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8512         }
8513 }
8514
8515 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8516 {
8517         /*
8518          * Account for spread spectrum to avoid
8519          * oversubscribing the link. Max center spread
8520          * is 2.5%; use 5% for safety's sake.
8521          */
8522         u32 bps = target_clock * bpp * 21 / 20;
8523         return DIV_ROUND_UP(bps, link_bw * 8);
8524 }
8525
8526 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8527 {
8528         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8529 }
8530
8531 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8532                                   struct intel_crtc_state *crtc_state,
8533                                   struct dpll *reduced_clock)
8534 {
8535         struct drm_crtc *crtc = &intel_crtc->base;
8536         struct drm_device *dev = crtc->dev;
8537         struct drm_i915_private *dev_priv = to_i915(dev);
8538         u32 dpll, fp, fp2;
8539         int factor;
8540
8541         /* Enable autotuning of the PLL clock (if permissible) */
8542         factor = 21;
8543         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8544                 if ((intel_panel_use_ssc(dev_priv) &&
8545                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
8546                     (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
8547                         factor = 25;
8548         } else if (crtc_state->sdvo_tv_clock)
8549                 factor = 20;
8550
8551         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8552
8553         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8554                 fp |= FP_CB_TUNE;
8555
8556         if (reduced_clock) {
8557                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8558
8559                 if (reduced_clock->m < factor * reduced_clock->n)
8560                         fp2 |= FP_CB_TUNE;
8561         } else {
8562                 fp2 = fp;
8563         }
8564
8565         dpll = 0;
8566
8567         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8568                 dpll |= DPLLB_MODE_LVDS;
8569         else
8570                 dpll |= DPLLB_MODE_DAC_SERIAL;
8571
8572         dpll |= (crtc_state->pixel_multiplier - 1)
8573                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8574
8575         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8576             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8577                 dpll |= DPLL_SDVO_HIGH_SPEED;
8578
8579         if (intel_crtc_has_dp_encoder(crtc_state))
8580                 dpll |= DPLL_SDVO_HIGH_SPEED;
8581
8582         /*
8583          * The high speed IO clock is only really required for
8584          * SDVO/HDMI/DP, but we also enable it for CRT to make it
8585          * possible to share the DPLL between CRT and HDMI. Enabling
8586          * the clock needlessly does no real harm, except use up a
8587          * bit of power potentially.
8588          *
8589          * We'll limit this to IVB with 3 pipes, since it has only two
8590          * DPLLs and so DPLL sharing is the only way to get three pipes
8591          * driving PCH ports at the same time. On SNB we could do this,
8592          * and potentially avoid enabling the second DPLL, but it's not
8593          * clear if it''s a win or loss power wise. No point in doing
8594          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8595          */
8596         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8597             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8598                 dpll |= DPLL_SDVO_HIGH_SPEED;
8599
8600         /* compute bitmask from p1 value */
8601         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8602         /* also FPA1 */
8603         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8604
8605         switch (crtc_state->dpll.p2) {
8606         case 5:
8607                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8608                 break;
8609         case 7:
8610                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8611                 break;
8612         case 10:
8613                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8614                 break;
8615         case 14:
8616                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8617                 break;
8618         }
8619
8620         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8621             intel_panel_use_ssc(dev_priv))
8622                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8623         else
8624                 dpll |= PLL_REF_INPUT_DREFCLK;
8625
8626         dpll |= DPLL_VCO_ENABLE;
8627
8628         crtc_state->dpll_hw_state.dpll = dpll;
8629         crtc_state->dpll_hw_state.fp0 = fp;
8630         crtc_state->dpll_hw_state.fp1 = fp2;
8631 }
8632
8633 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8634                                        struct intel_crtc_state *crtc_state)
8635 {
8636         struct drm_device *dev = crtc->base.dev;
8637         struct drm_i915_private *dev_priv = to_i915(dev);
8638         const struct intel_limit *limit;
8639         int refclk = 120000;
8640
8641         memset(&crtc_state->dpll_hw_state, 0,
8642                sizeof(crtc_state->dpll_hw_state));
8643
8644         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8645         if (!crtc_state->has_pch_encoder)
8646                 return 0;
8647
8648         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8649                 if (intel_panel_use_ssc(dev_priv)) {
8650                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8651                                       dev_priv->vbt.lvds_ssc_freq);
8652                         refclk = dev_priv->vbt.lvds_ssc_freq;
8653                 }
8654
8655                 if (intel_is_dual_link_lvds(dev)) {
8656                         if (refclk == 100000)
8657                                 limit = &intel_limits_ironlake_dual_lvds_100m;
8658                         else
8659                                 limit = &intel_limits_ironlake_dual_lvds;
8660                 } else {
8661                         if (refclk == 100000)
8662                                 limit = &intel_limits_ironlake_single_lvds_100m;
8663                         else
8664                                 limit = &intel_limits_ironlake_single_lvds;
8665                 }
8666         } else {
8667                 limit = &intel_limits_ironlake_dac;
8668         }
8669
8670         if (!crtc_state->clock_set &&
8671             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8672                                 refclk, NULL, &crtc_state->dpll)) {
8673                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8674                 return -EINVAL;
8675         }
8676
8677         ironlake_compute_dpll(crtc, crtc_state, NULL);
8678
8679         if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
8680                 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8681                                  pipe_name(crtc->pipe));
8682                 return -EINVAL;
8683         }
8684
8685         return 0;
8686 }
8687
8688 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8689                                          struct intel_link_m_n *m_n)
8690 {
8691         struct drm_device *dev = crtc->base.dev;
8692         struct drm_i915_private *dev_priv = to_i915(dev);
8693         enum pipe pipe = crtc->pipe;
8694
8695         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8696         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8697         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8698                 & ~TU_SIZE_MASK;
8699         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8700         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8701                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8702 }
8703
8704 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8705                                          enum transcoder transcoder,
8706                                          struct intel_link_m_n *m_n,
8707                                          struct intel_link_m_n *m2_n2)
8708 {
8709         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8710         enum pipe pipe = crtc->pipe;
8711
8712         if (INTEL_GEN(dev_priv) >= 5) {
8713                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8714                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8715                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8716                         & ~TU_SIZE_MASK;
8717                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8718                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8719                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8720                 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
8721                  * gen < 8) and if DRRS is supported (to make sure the
8722                  * registers are not unnecessarily read).
8723                  */
8724                 if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
8725                         crtc->config->has_drrs) {
8726                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8727                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8728                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8729                                         & ~TU_SIZE_MASK;
8730                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8731                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8732                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8733                 }
8734         } else {
8735                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8736                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8737                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8738                         & ~TU_SIZE_MASK;
8739                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8740                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8741                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8742         }
8743 }
8744
8745 void intel_dp_get_m_n(struct intel_crtc *crtc,
8746                       struct intel_crtc_state *pipe_config)
8747 {
8748         if (pipe_config->has_pch_encoder)
8749                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8750         else
8751                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8752                                              &pipe_config->dp_m_n,
8753                                              &pipe_config->dp_m2_n2);
8754 }
8755
8756 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
8757                                         struct intel_crtc_state *pipe_config)
8758 {
8759         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8760                                      &pipe_config->fdi_m_n, NULL);
8761 }
8762
8763 static void skylake_get_pfit_config(struct intel_crtc *crtc,
8764                                     struct intel_crtc_state *pipe_config)
8765 {
8766         struct drm_device *dev = crtc->base.dev;
8767         struct drm_i915_private *dev_priv = to_i915(dev);
8768         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
8769         uint32_t ps_ctrl = 0;
8770         int id = -1;
8771         int i;
8772
8773         /* find scaler attached to this pipe */
8774         for (i = 0; i < crtc->num_scalers; i++) {
8775                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8776                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8777                         id = i;
8778                         pipe_config->pch_pfit.enabled = true;
8779                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8780                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8781                         break;
8782                 }
8783         }
8784
8785         scaler_state->scaler_id = id;
8786         if (id >= 0) {
8787                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8788         } else {
8789                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
8790         }
8791 }
8792
8793 static void
8794 skylake_get_initial_plane_config(struct intel_crtc *crtc,
8795                                  struct intel_initial_plane_config *plane_config)
8796 {
8797         struct drm_device *dev = crtc->base.dev;
8798         struct drm_i915_private *dev_priv = to_i915(dev);
8799         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8800         enum plane_id plane_id = plane->id;
8801         enum pipe pipe;
8802         u32 val, base, offset, stride_mult, tiling, alpha;
8803         int fourcc, pixel_format;
8804         unsigned int aligned_height;
8805         struct drm_framebuffer *fb;
8806         struct intel_framebuffer *intel_fb;
8807
8808         if (!plane->get_hw_state(plane, &pipe))
8809                 return;
8810
8811         WARN_ON(pipe != crtc->pipe);
8812
8813         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8814         if (!intel_fb) {
8815                 DRM_DEBUG_KMS("failed to alloc fb\n");
8816                 return;
8817         }
8818
8819         fb = &intel_fb->base;
8820
8821         fb->dev = dev;
8822
8823         val = I915_READ(PLANE_CTL(pipe, plane_id));
8824
8825         if (INTEL_GEN(dev_priv) >= 11)
8826                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
8827         else
8828                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
8829
8830         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
8831                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
8832                 alpha &= PLANE_COLOR_ALPHA_MASK;
8833         } else {
8834                 alpha = val & PLANE_CTL_ALPHA_MASK;
8835         }
8836
8837         fourcc = skl_format_to_fourcc(pixel_format,
8838                                       val & PLANE_CTL_ORDER_RGBX, alpha);
8839         fb->format = drm_format_info(fourcc);
8840
8841         tiling = val & PLANE_CTL_TILED_MASK;
8842         switch (tiling) {
8843         case PLANE_CTL_TILED_LINEAR:
8844                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
8845                 break;
8846         case PLANE_CTL_TILED_X:
8847                 plane_config->tiling = I915_TILING_X;
8848                 fb->modifier = I915_FORMAT_MOD_X_TILED;
8849                 break;
8850         case PLANE_CTL_TILED_Y:
8851                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
8852                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
8853                 else
8854                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
8855                 break;
8856         case PLANE_CTL_TILED_YF:
8857                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
8858                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
8859                 else
8860                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
8861                 break;
8862         default:
8863                 MISSING_CASE(tiling);
8864                 goto error;
8865         }
8866
8867         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
8868         plane_config->base = base;
8869
8870         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
8871
8872         val = I915_READ(PLANE_SIZE(pipe, plane_id));
8873         fb->height = ((val >> 16) & 0xfff) + 1;
8874         fb->width = ((val >> 0) & 0x1fff) + 1;
8875
8876         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
8877         stride_mult = intel_fb_stride_alignment(fb, 0);
8878         fb->pitches[0] = (val & 0x3ff) * stride_mult;
8879
8880         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8881
8882         plane_config->size = fb->pitches[0] * aligned_height;
8883
8884         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8885                       crtc->base.name, plane->base.name, fb->width, fb->height,
8886                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8887                       plane_config->size);
8888
8889         plane_config->fb = intel_fb;
8890         return;
8891
8892 error:
8893         kfree(intel_fb);
8894 }
8895
8896 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
8897                                      struct intel_crtc_state *pipe_config)
8898 {
8899         struct drm_device *dev = crtc->base.dev;
8900         struct drm_i915_private *dev_priv = to_i915(dev);
8901         uint32_t tmp;
8902
8903         tmp = I915_READ(PF_CTL(crtc->pipe));
8904
8905         if (tmp & PF_ENABLE) {
8906                 pipe_config->pch_pfit.enabled = true;
8907                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
8908                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
8909
8910                 /* We currently do not free assignements of panel fitters on
8911                  * ivb/hsw (since we don't use the higher upscaling modes which
8912                  * differentiates them) so just WARN about this case for now. */
8913                 if (IS_GEN7(dev_priv)) {
8914                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
8915                                 PF_PIPE_SEL_IVB(crtc->pipe));
8916                 }
8917         }
8918 }
8919
8920 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
8921                                      struct intel_crtc_state *pipe_config)
8922 {
8923         struct drm_device *dev = crtc->base.dev;
8924         struct drm_i915_private *dev_priv = to_i915(dev);
8925         enum intel_display_power_domain power_domain;
8926         uint32_t tmp;
8927         bool ret;
8928
8929         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8930         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8931                 return false;
8932
8933         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8934         pipe_config->shared_dpll = NULL;
8935
8936         ret = false;
8937         tmp = I915_READ(PIPECONF(crtc->pipe));
8938         if (!(tmp & PIPECONF_ENABLE))
8939                 goto out;
8940
8941         switch (tmp & PIPECONF_BPC_MASK) {
8942         case PIPECONF_6BPC:
8943                 pipe_config->pipe_bpp = 18;
8944                 break;
8945         case PIPECONF_8BPC:
8946                 pipe_config->pipe_bpp = 24;
8947                 break;
8948         case PIPECONF_10BPC:
8949                 pipe_config->pipe_bpp = 30;
8950                 break;
8951         case PIPECONF_12BPC:
8952                 pipe_config->pipe_bpp = 36;
8953                 break;
8954         default:
8955                 break;
8956         }
8957
8958         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
8959                 pipe_config->limited_color_range = true;
8960
8961         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
8962                 struct intel_shared_dpll *pll;
8963                 enum intel_dpll_id pll_id;
8964
8965                 pipe_config->has_pch_encoder = true;
8966
8967                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
8968                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8969                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
8970
8971                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
8972
8973                 if (HAS_PCH_IBX(dev_priv)) {
8974                         /*
8975                          * The pipe->pch transcoder and pch transcoder->pll
8976                          * mapping is fixed.
8977                          */
8978                         pll_id = (enum intel_dpll_id) crtc->pipe;
8979                 } else {
8980                         tmp = I915_READ(PCH_DPLL_SEL);
8981                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
8982                                 pll_id = DPLL_ID_PCH_PLL_B;
8983                         else
8984                                 pll_id= DPLL_ID_PCH_PLL_A;
8985                 }
8986
8987                 pipe_config->shared_dpll =
8988                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
8989                 pll = pipe_config->shared_dpll;
8990
8991                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
8992                                                 &pipe_config->dpll_hw_state));
8993
8994                 tmp = pipe_config->dpll_hw_state.dpll;
8995                 pipe_config->pixel_multiplier =
8996                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
8997                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
8998
8999                 ironlake_pch_clock_get(crtc, pipe_config);
9000         } else {
9001                 pipe_config->pixel_multiplier = 1;
9002         }
9003
9004         intel_get_pipe_timings(crtc, pipe_config);
9005         intel_get_pipe_src_size(crtc, pipe_config);
9006
9007         ironlake_get_pfit_config(crtc, pipe_config);
9008
9009         ret = true;
9010
9011 out:
9012         intel_display_power_put(dev_priv, power_domain);
9013
9014         return ret;
9015 }
9016
9017 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9018 {
9019         struct drm_device *dev = &dev_priv->drm;
9020         struct intel_crtc *crtc;
9021
9022         for_each_intel_crtc(dev, crtc)
9023                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9024                      pipe_name(crtc->pipe));
9025
9026         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
9027                         "Display power well on\n");
9028         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9029         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9030         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9031         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
9032         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9033              "CPU PWM1 enabled\n");
9034         if (IS_HASWELL(dev_priv))
9035                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9036                      "CPU PWM2 enabled\n");
9037         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9038              "PCH PWM1 enabled\n");
9039         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9040              "Utility pin enabled\n");
9041         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9042
9043         /*
9044          * In theory we can still leave IRQs enabled, as long as only the HPD
9045          * interrupts remain enabled. We used to check for that, but since it's
9046          * gen-specific and since we only disable LCPLL after we fully disable
9047          * the interrupts, the check below should be enough.
9048          */
9049         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9050 }
9051
9052 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9053 {
9054         if (IS_HASWELL(dev_priv))
9055                 return I915_READ(D_COMP_HSW);
9056         else
9057                 return I915_READ(D_COMP_BDW);
9058 }
9059
9060 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9061 {
9062         if (IS_HASWELL(dev_priv)) {
9063                 mutex_lock(&dev_priv->pcu_lock);
9064                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9065                                             val))
9066                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9067                 mutex_unlock(&dev_priv->pcu_lock);
9068         } else {
9069                 I915_WRITE(D_COMP_BDW, val);
9070                 POSTING_READ(D_COMP_BDW);
9071         }
9072 }
9073
9074 /*
9075  * This function implements pieces of two sequences from BSpec:
9076  * - Sequence for display software to disable LCPLL
9077  * - Sequence for display software to allow package C8+
9078  * The steps implemented here are just the steps that actually touch the LCPLL
9079  * register. Callers should take care of disabling all the display engine
9080  * functions, doing the mode unset, fixing interrupts, etc.
9081  */
9082 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9083                               bool switch_to_fclk, bool allow_power_down)
9084 {
9085         uint32_t val;
9086
9087         assert_can_disable_lcpll(dev_priv);
9088
9089         val = I915_READ(LCPLL_CTL);
9090
9091         if (switch_to_fclk) {
9092                 val |= LCPLL_CD_SOURCE_FCLK;
9093                 I915_WRITE(LCPLL_CTL, val);
9094
9095                 if (wait_for_us(I915_READ(LCPLL_CTL) &
9096                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9097                         DRM_ERROR("Switching to FCLK failed\n");
9098
9099                 val = I915_READ(LCPLL_CTL);
9100         }
9101
9102         val |= LCPLL_PLL_DISABLE;
9103         I915_WRITE(LCPLL_CTL, val);
9104         POSTING_READ(LCPLL_CTL);
9105
9106         if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9107                 DRM_ERROR("LCPLL still locked\n");
9108
9109         val = hsw_read_dcomp(dev_priv);
9110         val |= D_COMP_COMP_DISABLE;
9111         hsw_write_dcomp(dev_priv, val);
9112         ndelay(100);
9113
9114         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9115                      1))
9116                 DRM_ERROR("D_COMP RCOMP still in progress\n");
9117
9118         if (allow_power_down) {
9119                 val = I915_READ(LCPLL_CTL);
9120                 val |= LCPLL_POWER_DOWN_ALLOW;
9121                 I915_WRITE(LCPLL_CTL, val);
9122                 POSTING_READ(LCPLL_CTL);
9123         }
9124 }
9125
9126 /*
9127  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9128  * source.
9129  */
9130 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9131 {
9132         uint32_t val;
9133
9134         val = I915_READ(LCPLL_CTL);
9135
9136         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9137                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9138                 return;
9139
9140         /*
9141          * Make sure we're not on PC8 state before disabling PC8, otherwise
9142          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9143          */
9144         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9145
9146         if (val & LCPLL_POWER_DOWN_ALLOW) {
9147                 val &= ~LCPLL_POWER_DOWN_ALLOW;
9148                 I915_WRITE(LCPLL_CTL, val);
9149                 POSTING_READ(LCPLL_CTL);
9150         }
9151
9152         val = hsw_read_dcomp(dev_priv);
9153         val |= D_COMP_COMP_FORCE;
9154         val &= ~D_COMP_COMP_DISABLE;
9155         hsw_write_dcomp(dev_priv, val);
9156
9157         val = I915_READ(LCPLL_CTL);
9158         val &= ~LCPLL_PLL_DISABLE;
9159         I915_WRITE(LCPLL_CTL, val);
9160
9161         if (intel_wait_for_register(dev_priv,
9162                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9163                                     5))
9164                 DRM_ERROR("LCPLL not locked yet\n");
9165
9166         if (val & LCPLL_CD_SOURCE_FCLK) {
9167                 val = I915_READ(LCPLL_CTL);
9168                 val &= ~LCPLL_CD_SOURCE_FCLK;
9169                 I915_WRITE(LCPLL_CTL, val);
9170
9171                 if (wait_for_us((I915_READ(LCPLL_CTL) &
9172                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9173                         DRM_ERROR("Switching back to LCPLL failed\n");
9174         }
9175
9176         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9177
9178         intel_update_cdclk(dev_priv);
9179         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9180 }
9181
9182 /*
9183  * Package states C8 and deeper are really deep PC states that can only be
9184  * reached when all the devices on the system allow it, so even if the graphics
9185  * device allows PC8+, it doesn't mean the system will actually get to these
9186  * states. Our driver only allows PC8+ when going into runtime PM.
9187  *
9188  * The requirements for PC8+ are that all the outputs are disabled, the power
9189  * well is disabled and most interrupts are disabled, and these are also
9190  * requirements for runtime PM. When these conditions are met, we manually do
9191  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9192  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9193  * hang the machine.
9194  *
9195  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9196  * the state of some registers, so when we come back from PC8+ we need to
9197  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9198  * need to take care of the registers kept by RC6. Notice that this happens even
9199  * if we don't put the device in PCI D3 state (which is what currently happens
9200  * because of the runtime PM support).
9201  *
9202  * For more, read "Display Sequences for Package C8" on the hardware
9203  * documentation.
9204  */
9205 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9206 {
9207         uint32_t val;
9208
9209         DRM_DEBUG_KMS("Enabling package C8+\n");
9210
9211         if (HAS_PCH_LPT_LP(dev_priv)) {
9212                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9213                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9214                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9215         }
9216
9217         lpt_disable_clkout_dp(dev_priv);
9218         hsw_disable_lcpll(dev_priv, true, true);
9219 }
9220
9221 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9222 {
9223         uint32_t val;
9224
9225         DRM_DEBUG_KMS("Disabling package C8+\n");
9226
9227         hsw_restore_lcpll(dev_priv);
9228         lpt_init_pch_refclk(dev_priv);
9229
9230         if (HAS_PCH_LPT_LP(dev_priv)) {
9231                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9232                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9233                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9234         }
9235 }
9236
9237 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9238                                       struct intel_crtc_state *crtc_state)
9239 {
9240         struct intel_atomic_state *state =
9241                 to_intel_atomic_state(crtc_state->base.state);
9242
9243         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
9244                 struct intel_encoder *encoder =
9245                         intel_get_crtc_new_encoder(state, crtc_state);
9246
9247                 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
9248                         DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9249                                          pipe_name(crtc->pipe));
9250                         return -EINVAL;
9251                 }
9252         }
9253
9254         return 0;
9255 }
9256
9257 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9258                                    enum port port,
9259                                    struct intel_crtc_state *pipe_config)
9260 {
9261         enum intel_dpll_id id;
9262         u32 temp;
9263
9264         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9265         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9266
9267         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9268                 return;
9269
9270         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9271 }
9272
9273 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9274                                 enum port port,
9275                                 struct intel_crtc_state *pipe_config)
9276 {
9277         enum intel_dpll_id id;
9278         u32 temp;
9279
9280         /* TODO: TBT pll not implemented. */
9281         switch (port) {
9282         case PORT_A:
9283         case PORT_B:
9284                 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9285                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9286                 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9287
9288                 if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
9289                         return;
9290                 break;
9291         case PORT_C:
9292                 id = DPLL_ID_ICL_MGPLL1;
9293                 break;
9294         case PORT_D:
9295                 id = DPLL_ID_ICL_MGPLL2;
9296                 break;
9297         case PORT_E:
9298                 id = DPLL_ID_ICL_MGPLL3;
9299                 break;
9300         case PORT_F:
9301                 id = DPLL_ID_ICL_MGPLL4;
9302                 break;
9303         default:
9304                 MISSING_CASE(port);
9305                 return;
9306         }
9307
9308         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9309 }
9310
9311 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9312                                 enum port port,
9313                                 struct intel_crtc_state *pipe_config)
9314 {
9315         enum intel_dpll_id id;
9316
9317         switch (port) {
9318         case PORT_A:
9319                 id = DPLL_ID_SKL_DPLL0;
9320                 break;
9321         case PORT_B:
9322                 id = DPLL_ID_SKL_DPLL1;
9323                 break;
9324         case PORT_C:
9325                 id = DPLL_ID_SKL_DPLL2;
9326                 break;
9327         default:
9328                 DRM_ERROR("Incorrect port type\n");
9329                 return;
9330         }
9331
9332         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9333 }
9334
9335 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9336                                 enum port port,
9337                                 struct intel_crtc_state *pipe_config)
9338 {
9339         enum intel_dpll_id id;
9340         u32 temp;
9341
9342         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9343         id = temp >> (port * 3 + 1);
9344
9345         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9346                 return;
9347
9348         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9349 }
9350
9351 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9352                                 enum port port,
9353                                 struct intel_crtc_state *pipe_config)
9354 {
9355         enum intel_dpll_id id;
9356         uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9357
9358         switch (ddi_pll_sel) {
9359         case PORT_CLK_SEL_WRPLL1:
9360                 id = DPLL_ID_WRPLL1;
9361                 break;
9362         case PORT_CLK_SEL_WRPLL2:
9363                 id = DPLL_ID_WRPLL2;
9364                 break;
9365         case PORT_CLK_SEL_SPLL:
9366                 id = DPLL_ID_SPLL;
9367                 break;
9368         case PORT_CLK_SEL_LCPLL_810:
9369                 id = DPLL_ID_LCPLL_810;
9370                 break;
9371         case PORT_CLK_SEL_LCPLL_1350:
9372                 id = DPLL_ID_LCPLL_1350;
9373                 break;
9374         case PORT_CLK_SEL_LCPLL_2700:
9375                 id = DPLL_ID_LCPLL_2700;
9376                 break;
9377         default:
9378                 MISSING_CASE(ddi_pll_sel);
9379                 /* fall through */
9380         case PORT_CLK_SEL_NONE:
9381                 return;
9382         }
9383
9384         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9385 }
9386
9387 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9388                                      struct intel_crtc_state *pipe_config,
9389                                      u64 *power_domain_mask)
9390 {
9391         struct drm_device *dev = crtc->base.dev;
9392         struct drm_i915_private *dev_priv = to_i915(dev);
9393         enum intel_display_power_domain power_domain;
9394         u32 tmp;
9395
9396         /*
9397          * The pipe->transcoder mapping is fixed with the exception of the eDP
9398          * transcoder handled below.
9399          */
9400         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9401
9402         /*
9403          * XXX: Do intel_display_power_get_if_enabled before reading this (for
9404          * consistency and less surprising code; it's in always on power).
9405          */
9406         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9407         if (tmp & TRANS_DDI_FUNC_ENABLE) {
9408                 enum pipe trans_edp_pipe;
9409                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9410                 default:
9411                         WARN(1, "unknown pipe linked to edp transcoder\n");
9412                         /* fall through */
9413                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9414                 case TRANS_DDI_EDP_INPUT_A_ON:
9415                         trans_edp_pipe = PIPE_A;
9416                         break;
9417                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9418                         trans_edp_pipe = PIPE_B;
9419                         break;
9420                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9421                         trans_edp_pipe = PIPE_C;
9422                         break;
9423                 }
9424
9425                 if (trans_edp_pipe == crtc->pipe)
9426                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
9427         }
9428
9429         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9430         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9431                 return false;
9432         *power_domain_mask |= BIT_ULL(power_domain);
9433
9434         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9435
9436         return tmp & PIPECONF_ENABLE;
9437 }
9438
9439 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9440                                          struct intel_crtc_state *pipe_config,
9441                                          u64 *power_domain_mask)
9442 {
9443         struct drm_device *dev = crtc->base.dev;
9444         struct drm_i915_private *dev_priv = to_i915(dev);
9445         enum intel_display_power_domain power_domain;
9446         enum port port;
9447         enum transcoder cpu_transcoder;
9448         u32 tmp;
9449
9450         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9451                 if (port == PORT_A)
9452                         cpu_transcoder = TRANSCODER_DSI_A;
9453                 else
9454                         cpu_transcoder = TRANSCODER_DSI_C;
9455
9456                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9457                 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9458                         continue;
9459                 *power_domain_mask |= BIT_ULL(power_domain);
9460
9461                 /*
9462                  * The PLL needs to be enabled with a valid divider
9463                  * configuration, otherwise accessing DSI registers will hang
9464                  * the machine. See BSpec North Display Engine
9465                  * registers/MIPI[BXT]. We can break out here early, since we
9466                  * need the same DSI PLL to be enabled for both DSI ports.
9467                  */
9468                 if (!bxt_dsi_pll_is_enabled(dev_priv))
9469                         break;
9470
9471                 /* XXX: this works for video mode only */
9472                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9473                 if (!(tmp & DPI_ENABLE))
9474                         continue;
9475
9476                 tmp = I915_READ(MIPI_CTRL(port));
9477                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9478                         continue;
9479
9480                 pipe_config->cpu_transcoder = cpu_transcoder;
9481                 break;
9482         }
9483
9484         return transcoder_is_dsi(pipe_config->cpu_transcoder);
9485 }
9486
9487 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9488                                        struct intel_crtc_state *pipe_config)
9489 {
9490         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9491         struct intel_shared_dpll *pll;
9492         enum port port;
9493         uint32_t tmp;
9494
9495         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9496
9497         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9498
9499         if (IS_ICELAKE(dev_priv))
9500                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9501         else if (IS_CANNONLAKE(dev_priv))
9502                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9503         else if (IS_GEN9_BC(dev_priv))
9504                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9505         else if (IS_GEN9_LP(dev_priv))
9506                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9507         else
9508                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9509
9510         pll = pipe_config->shared_dpll;
9511         if (pll) {
9512                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9513                                                 &pipe_config->dpll_hw_state));
9514         }
9515
9516         /*
9517          * Haswell has only FDI/PCH transcoder A. It is which is connected to
9518          * DDI E. So just check whether this pipe is wired to DDI E and whether
9519          * the PCH transcoder is on.
9520          */
9521         if (INTEL_GEN(dev_priv) < 9 &&
9522             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9523                 pipe_config->has_pch_encoder = true;
9524
9525                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9526                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9527                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9528
9529                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9530         }
9531 }
9532
9533 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9534                                     struct intel_crtc_state *pipe_config)
9535 {
9536         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9537         enum intel_display_power_domain power_domain;
9538         u64 power_domain_mask;
9539         bool active;
9540
9541         intel_crtc_init_scalers(crtc, pipe_config);
9542
9543         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9544         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9545                 return false;
9546         power_domain_mask = BIT_ULL(power_domain);
9547
9548         pipe_config->shared_dpll = NULL;
9549
9550         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9551
9552         if (IS_GEN9_LP(dev_priv) &&
9553             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9554                 WARN_ON(active);
9555                 active = true;
9556         }
9557
9558         if (!active)
9559                 goto out;
9560
9561         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9562                 haswell_get_ddi_port_state(crtc, pipe_config);
9563                 intel_get_pipe_timings(crtc, pipe_config);
9564         }
9565
9566         intel_get_pipe_src_size(crtc, pipe_config);
9567
9568         pipe_config->gamma_mode =
9569                 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9570
9571         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
9572                 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
9573                 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
9574
9575                 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
9576                         bool blend_mode_420 = tmp &
9577                                               PIPEMISC_YUV420_MODE_FULL_BLEND;
9578
9579                         pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
9580                         if (pipe_config->ycbcr420 != clrspace_yuv ||
9581                             pipe_config->ycbcr420 != blend_mode_420)
9582                                 DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
9583                 } else if (clrspace_yuv) {
9584                         DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
9585                 }
9586         }
9587
9588         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9589         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9590                 power_domain_mask |= BIT_ULL(power_domain);
9591                 if (INTEL_GEN(dev_priv) >= 9)
9592                         skylake_get_pfit_config(crtc, pipe_config);
9593                 else
9594                         ironlake_get_pfit_config(crtc, pipe_config);
9595         }
9596
9597         if (hsw_crtc_supports_ips(crtc)) {
9598                 if (IS_HASWELL(dev_priv))
9599                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9600                 else {
9601                         /*
9602                          * We cannot readout IPS state on broadwell, set to
9603                          * true so we can set it to a defined state on first
9604                          * commit.
9605                          */
9606                         pipe_config->ips_enabled = true;
9607                 }
9608         }
9609
9610         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9611             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9612                 pipe_config->pixel_multiplier =
9613                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9614         } else {
9615                 pipe_config->pixel_multiplier = 1;
9616         }
9617
9618 out:
9619         for_each_power_domain(power_domain, power_domain_mask)
9620                 intel_display_power_put(dev_priv, power_domain);
9621
9622         return active;
9623 }
9624
9625 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
9626 {
9627         struct drm_i915_private *dev_priv =
9628                 to_i915(plane_state->base.plane->dev);
9629         const struct drm_framebuffer *fb = plane_state->base.fb;
9630         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9631         u32 base;
9632
9633         if (INTEL_INFO(dev_priv)->cursor_needs_physical)
9634                 base = obj->phys_handle->busaddr;
9635         else
9636                 base = intel_plane_ggtt_offset(plane_state);
9637
9638         base += plane_state->color_plane[0].offset;
9639
9640         /* ILK+ do this automagically */
9641         if (HAS_GMCH_DISPLAY(dev_priv) &&
9642             plane_state->base.rotation & DRM_MODE_ROTATE_180)
9643                 base += (plane_state->base.crtc_h *
9644                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9645
9646         return base;
9647 }
9648
9649 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9650 {
9651         int x = plane_state->base.crtc_x;
9652         int y = plane_state->base.crtc_y;
9653         u32 pos = 0;
9654
9655         if (x < 0) {
9656                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9657                 x = -x;
9658         }
9659         pos |= x << CURSOR_X_SHIFT;
9660
9661         if (y < 0) {
9662                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9663                 y = -y;
9664         }
9665         pos |= y << CURSOR_Y_SHIFT;
9666
9667         return pos;
9668 }
9669
9670 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9671 {
9672         const struct drm_mode_config *config =
9673                 &plane_state->base.plane->dev->mode_config;
9674         int width = plane_state->base.crtc_w;
9675         int height = plane_state->base.crtc_h;
9676
9677         return width > 0 && width <= config->cursor_width &&
9678                 height > 0 && height <= config->cursor_height;
9679 }
9680
9681 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
9682 {
9683         const struct drm_framebuffer *fb = plane_state->base.fb;
9684         unsigned int rotation = plane_state->base.rotation;
9685         int src_x, src_y;
9686         u32 offset;
9687
9688         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
9689         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
9690
9691         src_x = plane_state->base.src_x >> 16;
9692         src_y = plane_state->base.src_y >> 16;
9693
9694         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9695         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
9696                                                     plane_state, 0);
9697
9698         if (src_x != 0 || src_y != 0) {
9699                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9700                 return -EINVAL;
9701         }
9702
9703         plane_state->color_plane[0].offset = offset;
9704
9705         return 0;
9706 }
9707
9708 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9709                               struct intel_plane_state *plane_state)
9710 {
9711         const struct drm_framebuffer *fb = plane_state->base.fb;
9712         int ret;
9713
9714         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9715                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9716                 return -EINVAL;
9717         }
9718
9719         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
9720                                                   &crtc_state->base,
9721                                                   DRM_PLANE_HELPER_NO_SCALING,
9722                                                   DRM_PLANE_HELPER_NO_SCALING,
9723                                                   true, true);
9724         if (ret)
9725                 return ret;
9726
9727         if (!plane_state->base.visible)
9728                 return 0;
9729
9730         ret = intel_plane_check_src_coordinates(plane_state);
9731         if (ret)
9732                 return ret;
9733
9734         ret = intel_cursor_check_surface(plane_state);
9735         if (ret)
9736                 return ret;
9737
9738         return 0;
9739 }
9740
9741 static unsigned int
9742 i845_cursor_max_stride(struct intel_plane *plane,
9743                        u32 pixel_format, u64 modifier,
9744                        unsigned int rotation)
9745 {
9746         return 2048;
9747 }
9748
9749 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9750                            const struct intel_plane_state *plane_state)
9751 {
9752         return CURSOR_ENABLE |
9753                 CURSOR_GAMMA_ENABLE |
9754                 CURSOR_FORMAT_ARGB |
9755                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
9756 }
9757
9758 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
9759 {
9760         int width = plane_state->base.crtc_w;
9761
9762         /*
9763          * 845g/865g are only limited by the width of their cursors,
9764          * the height is arbitrary up to the precision of the register.
9765          */
9766         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
9767 }
9768
9769 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
9770                              struct intel_plane_state *plane_state)
9771 {
9772         const struct drm_framebuffer *fb = plane_state->base.fb;
9773         int ret;
9774
9775         ret = intel_check_cursor(crtc_state, plane_state);
9776         if (ret)
9777                 return ret;
9778
9779         /* if we want to turn off the cursor ignore width and height */
9780         if (!fb)
9781                 return 0;
9782
9783         /* Check for which cursor types we support */
9784         if (!i845_cursor_size_ok(plane_state)) {
9785                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9786                           plane_state->base.crtc_w,
9787                           plane_state->base.crtc_h);
9788                 return -EINVAL;
9789         }
9790
9791         WARN_ON(plane_state->base.visible &&
9792                 plane_state->color_plane[0].stride != fb->pitches[0]);
9793
9794         switch (fb->pitches[0]) {
9795         case 256:
9796         case 512:
9797         case 1024:
9798         case 2048:
9799                 break;
9800         default:
9801                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
9802                               fb->pitches[0]);
9803                 return -EINVAL;
9804         }
9805
9806         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
9807
9808         return 0;
9809 }
9810
9811 static void i845_update_cursor(struct intel_plane *plane,
9812                                const struct intel_crtc_state *crtc_state,
9813                                const struct intel_plane_state *plane_state)
9814 {
9815         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9816         u32 cntl = 0, base = 0, pos = 0, size = 0;
9817         unsigned long irqflags;
9818
9819         if (plane_state && plane_state->base.visible) {
9820                 unsigned int width = plane_state->base.crtc_w;
9821                 unsigned int height = plane_state->base.crtc_h;
9822
9823                 cntl = plane_state->ctl;
9824                 size = (height << 12) | width;
9825
9826                 base = intel_cursor_base(plane_state);
9827                 pos = intel_cursor_position(plane_state);
9828         }
9829
9830         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9831
9832         /* On these chipsets we can only modify the base/size/stride
9833          * whilst the cursor is disabled.
9834          */
9835         if (plane->cursor.base != base ||
9836             plane->cursor.size != size ||
9837             plane->cursor.cntl != cntl) {
9838                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
9839                 I915_WRITE_FW(CURBASE(PIPE_A), base);
9840                 I915_WRITE_FW(CURSIZE, size);
9841                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9842                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
9843
9844                 plane->cursor.base = base;
9845                 plane->cursor.size = size;
9846                 plane->cursor.cntl = cntl;
9847         } else {
9848                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9849         }
9850
9851         POSTING_READ_FW(CURCNTR(PIPE_A));
9852
9853         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9854 }
9855
9856 static void i845_disable_cursor(struct intel_plane *plane,
9857                                 struct intel_crtc *crtc)
9858 {
9859         i845_update_cursor(plane, NULL, NULL);
9860 }
9861
9862 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
9863                                      enum pipe *pipe)
9864 {
9865         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9866         enum intel_display_power_domain power_domain;
9867         bool ret;
9868
9869         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
9870         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9871                 return false;
9872
9873         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
9874
9875         *pipe = PIPE_A;
9876
9877         intel_display_power_put(dev_priv, power_domain);
9878
9879         return ret;
9880 }
9881
9882 static unsigned int
9883 i9xx_cursor_max_stride(struct intel_plane *plane,
9884                        u32 pixel_format, u64 modifier,
9885                        unsigned int rotation)
9886 {
9887         return plane->base.dev->mode_config.cursor_width * 4;
9888 }
9889
9890 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9891                            const struct intel_plane_state *plane_state)
9892 {
9893         struct drm_i915_private *dev_priv =
9894                 to_i915(plane_state->base.plane->dev);
9895         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9896         u32 cntl = 0;
9897
9898         if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
9899                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
9900
9901         if (INTEL_GEN(dev_priv) <= 10) {
9902                 cntl |= MCURSOR_GAMMA_ENABLE;
9903
9904                 if (HAS_DDI(dev_priv))
9905                         cntl |= MCURSOR_PIPE_CSC_ENABLE;
9906         }
9907
9908         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
9909                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
9910
9911         switch (plane_state->base.crtc_w) {
9912         case 64:
9913                 cntl |= MCURSOR_MODE_64_ARGB_AX;
9914                 break;
9915         case 128:
9916                 cntl |= MCURSOR_MODE_128_ARGB_AX;
9917                 break;
9918         case 256:
9919                 cntl |= MCURSOR_MODE_256_ARGB_AX;
9920                 break;
9921         default:
9922                 MISSING_CASE(plane_state->base.crtc_w);
9923                 return 0;
9924         }
9925
9926         if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
9927                 cntl |= MCURSOR_ROTATE_180;
9928
9929         return cntl;
9930 }
9931
9932 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
9933 {
9934         struct drm_i915_private *dev_priv =
9935                 to_i915(plane_state->base.plane->dev);
9936         int width = plane_state->base.crtc_w;
9937         int height = plane_state->base.crtc_h;
9938
9939         if (!intel_cursor_size_ok(plane_state))
9940                 return false;
9941
9942         /* Cursor width is limited to a few power-of-two sizes */
9943         switch (width) {
9944         case 256:
9945         case 128:
9946         case 64:
9947                 break;
9948         default:
9949                 return false;
9950         }
9951
9952         /*
9953          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
9954          * height from 8 lines up to the cursor width, when the
9955          * cursor is not rotated. Everything else requires square
9956          * cursors.
9957          */
9958         if (HAS_CUR_FBC(dev_priv) &&
9959             plane_state->base.rotation & DRM_MODE_ROTATE_0) {
9960                 if (height < 8 || height > width)
9961                         return false;
9962         } else {
9963                 if (height != width)
9964                         return false;
9965         }
9966
9967         return true;
9968 }
9969
9970 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
9971                              struct intel_plane_state *plane_state)
9972 {
9973         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
9974         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9975         const struct drm_framebuffer *fb = plane_state->base.fb;
9976         enum pipe pipe = plane->pipe;
9977         int ret;
9978
9979         ret = intel_check_cursor(crtc_state, plane_state);
9980         if (ret)
9981                 return ret;
9982
9983         /* if we want to turn off the cursor ignore width and height */
9984         if (!fb)
9985                 return 0;
9986
9987         /* Check for which cursor types we support */
9988         if (!i9xx_cursor_size_ok(plane_state)) {
9989                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9990                           plane_state->base.crtc_w,
9991                           plane_state->base.crtc_h);
9992                 return -EINVAL;
9993         }
9994
9995         WARN_ON(plane_state->base.visible &&
9996                 plane_state->color_plane[0].stride != fb->pitches[0]);
9997
9998         if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
9999                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10000                               fb->pitches[0], plane_state->base.crtc_w);
10001                 return -EINVAL;
10002         }
10003
10004         /*
10005          * There's something wrong with the cursor on CHV pipe C.
10006          * If it straddles the left edge of the screen then
10007          * moving it away from the edge or disabling it often
10008          * results in a pipe underrun, and often that can lead to
10009          * dead pipe (constant underrun reported, and it scans
10010          * out just a solid color). To recover from that, the
10011          * display power well must be turned off and on again.
10012          * Refuse the put the cursor into that compromised position.
10013          */
10014         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10015             plane_state->base.visible && plane_state->base.crtc_x < 0) {
10016                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10017                 return -EINVAL;
10018         }
10019
10020         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10021
10022         return 0;
10023 }
10024
10025 static void i9xx_update_cursor(struct intel_plane *plane,
10026                                const struct intel_crtc_state *crtc_state,
10027                                const struct intel_plane_state *plane_state)
10028 {
10029         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10030         enum pipe pipe = plane->pipe;
10031         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10032         unsigned long irqflags;
10033
10034         if (plane_state && plane_state->base.visible) {
10035                 cntl = plane_state->ctl;
10036
10037                 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10038                         fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10039
10040                 base = intel_cursor_base(plane_state);
10041                 pos = intel_cursor_position(plane_state);
10042         }
10043
10044         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10045
10046         /*
10047          * On some platforms writing CURCNTR first will also
10048          * cause CURPOS to be armed by the CURBASE write.
10049          * Without the CURCNTR write the CURPOS write would
10050          * arm itself. Thus we always start the full update
10051          * with a CURCNTR write.
10052          *
10053          * On other platforms CURPOS always requires the
10054          * CURBASE write to arm the update. Additonally
10055          * a write to any of the cursor register will cancel
10056          * an already armed cursor update. Thus leaving out
10057          * the CURBASE write after CURPOS could lead to a
10058          * cursor that doesn't appear to move, or even change
10059          * shape. Thus we always write CURBASE.
10060          *
10061          * CURCNTR and CUR_FBC_CTL are always
10062          * armed by the CURBASE write only.
10063          */
10064         if (plane->cursor.base != base ||
10065             plane->cursor.size != fbc_ctl ||
10066             plane->cursor.cntl != cntl) {
10067                 I915_WRITE_FW(CURCNTR(pipe), cntl);
10068                 if (HAS_CUR_FBC(dev_priv))
10069                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10070                 I915_WRITE_FW(CURPOS(pipe), pos);
10071                 I915_WRITE_FW(CURBASE(pipe), base);
10072
10073                 plane->cursor.base = base;
10074                 plane->cursor.size = fbc_ctl;
10075                 plane->cursor.cntl = cntl;
10076         } else {
10077                 I915_WRITE_FW(CURPOS(pipe), pos);
10078                 I915_WRITE_FW(CURBASE(pipe), base);
10079         }
10080
10081         POSTING_READ_FW(CURBASE(pipe));
10082
10083         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10084 }
10085
10086 static void i9xx_disable_cursor(struct intel_plane *plane,
10087                                 struct intel_crtc *crtc)
10088 {
10089         i9xx_update_cursor(plane, NULL, NULL);
10090 }
10091
10092 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10093                                      enum pipe *pipe)
10094 {
10095         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10096         enum intel_display_power_domain power_domain;
10097         bool ret;
10098         u32 val;
10099
10100         /*
10101          * Not 100% correct for planes that can move between pipes,
10102          * but that's only the case for gen2-3 which don't have any
10103          * display power wells.
10104          */
10105         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10106         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10107                 return false;
10108
10109         val = I915_READ(CURCNTR(plane->pipe));
10110
10111         ret = val & MCURSOR_MODE;
10112
10113         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10114                 *pipe = plane->pipe;
10115         else
10116                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10117                         MCURSOR_PIPE_SELECT_SHIFT;
10118
10119         intel_display_power_put(dev_priv, power_domain);
10120
10121         return ret;
10122 }
10123
10124 /* VESA 640x480x72Hz mode to set on the pipe */
10125 static const struct drm_display_mode load_detect_mode = {
10126         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10127                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10128 };
10129
10130 struct drm_framebuffer *
10131 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10132                          struct drm_mode_fb_cmd2 *mode_cmd)
10133 {
10134         struct intel_framebuffer *intel_fb;
10135         int ret;
10136
10137         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10138         if (!intel_fb)
10139                 return ERR_PTR(-ENOMEM);
10140
10141         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10142         if (ret)
10143                 goto err;
10144
10145         return &intel_fb->base;
10146
10147 err:
10148         kfree(intel_fb);
10149         return ERR_PTR(ret);
10150 }
10151
10152 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10153                                         struct drm_crtc *crtc)
10154 {
10155         struct drm_plane *plane;
10156         struct drm_plane_state *plane_state;
10157         int ret, i;
10158
10159         ret = drm_atomic_add_affected_planes(state, crtc);
10160         if (ret)
10161                 return ret;
10162
10163         for_each_new_plane_in_state(state, plane, plane_state, i) {
10164                 if (plane_state->crtc != crtc)
10165                         continue;
10166
10167                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10168                 if (ret)
10169                         return ret;
10170
10171                 drm_atomic_set_fb_for_plane(plane_state, NULL);
10172         }
10173
10174         return 0;
10175 }
10176
10177 int intel_get_load_detect_pipe(struct drm_connector *connector,
10178                                const struct drm_display_mode *mode,
10179                                struct intel_load_detect_pipe *old,
10180                                struct drm_modeset_acquire_ctx *ctx)
10181 {
10182         struct intel_crtc *intel_crtc;
10183         struct intel_encoder *intel_encoder =
10184                 intel_attached_encoder(connector);
10185         struct drm_crtc *possible_crtc;
10186         struct drm_encoder *encoder = &intel_encoder->base;
10187         struct drm_crtc *crtc = NULL;
10188         struct drm_device *dev = encoder->dev;
10189         struct drm_i915_private *dev_priv = to_i915(dev);
10190         struct drm_mode_config *config = &dev->mode_config;
10191         struct drm_atomic_state *state = NULL, *restore_state = NULL;
10192         struct drm_connector_state *connector_state;
10193         struct intel_crtc_state *crtc_state;
10194         int ret, i = -1;
10195
10196         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10197                       connector->base.id, connector->name,
10198                       encoder->base.id, encoder->name);
10199
10200         old->restore_state = NULL;
10201
10202         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10203
10204         /*
10205          * Algorithm gets a little messy:
10206          *
10207          *   - if the connector already has an assigned crtc, use it (but make
10208          *     sure it's on first)
10209          *
10210          *   - try to find the first unused crtc that can drive this connector,
10211          *     and use that if we find one
10212          */
10213
10214         /* See if we already have a CRTC for this connector */
10215         if (connector->state->crtc) {
10216                 crtc = connector->state->crtc;
10217
10218                 ret = drm_modeset_lock(&crtc->mutex, ctx);
10219                 if (ret)
10220                         goto fail;
10221
10222                 /* Make sure the crtc and connector are running */
10223                 goto found;
10224         }
10225
10226         /* Find an unused one (if possible) */
10227         for_each_crtc(dev, possible_crtc) {
10228                 i++;
10229                 if (!(encoder->possible_crtcs & (1 << i)))
10230                         continue;
10231
10232                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10233                 if (ret)
10234                         goto fail;
10235
10236                 if (possible_crtc->state->enable) {
10237                         drm_modeset_unlock(&possible_crtc->mutex);
10238                         continue;
10239                 }
10240
10241                 crtc = possible_crtc;
10242                 break;
10243         }
10244
10245         /*
10246          * If we didn't find an unused CRTC, don't use any.
10247          */
10248         if (!crtc) {
10249                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10250                 ret = -ENODEV;
10251                 goto fail;
10252         }
10253
10254 found:
10255         intel_crtc = to_intel_crtc(crtc);
10256
10257         state = drm_atomic_state_alloc(dev);
10258         restore_state = drm_atomic_state_alloc(dev);
10259         if (!state || !restore_state) {
10260                 ret = -ENOMEM;
10261                 goto fail;
10262         }
10263
10264         state->acquire_ctx = ctx;
10265         restore_state->acquire_ctx = ctx;
10266
10267         connector_state = drm_atomic_get_connector_state(state, connector);
10268         if (IS_ERR(connector_state)) {
10269                 ret = PTR_ERR(connector_state);
10270                 goto fail;
10271         }
10272
10273         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10274         if (ret)
10275                 goto fail;
10276
10277         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10278         if (IS_ERR(crtc_state)) {
10279                 ret = PTR_ERR(crtc_state);
10280                 goto fail;
10281         }
10282
10283         crtc_state->base.active = crtc_state->base.enable = true;
10284
10285         if (!mode)
10286                 mode = &load_detect_mode;
10287
10288         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10289         if (ret)
10290                 goto fail;
10291
10292         ret = intel_modeset_disable_planes(state, crtc);
10293         if (ret)
10294                 goto fail;
10295
10296         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10297         if (!ret)
10298                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10299         if (!ret)
10300                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10301         if (ret) {
10302                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10303                 goto fail;
10304         }
10305
10306         ret = drm_atomic_commit(state);
10307         if (ret) {
10308                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10309                 goto fail;
10310         }
10311
10312         old->restore_state = restore_state;
10313         drm_atomic_state_put(state);
10314
10315         /* let the connector get through one full cycle before testing */
10316         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10317         return true;
10318
10319 fail:
10320         if (state) {
10321                 drm_atomic_state_put(state);
10322                 state = NULL;
10323         }
10324         if (restore_state) {
10325                 drm_atomic_state_put(restore_state);
10326                 restore_state = NULL;
10327         }
10328
10329         if (ret == -EDEADLK)
10330                 return ret;
10331
10332         return false;
10333 }
10334
10335 void intel_release_load_detect_pipe(struct drm_connector *connector,
10336                                     struct intel_load_detect_pipe *old,
10337                                     struct drm_modeset_acquire_ctx *ctx)
10338 {
10339         struct intel_encoder *intel_encoder =
10340                 intel_attached_encoder(connector);
10341         struct drm_encoder *encoder = &intel_encoder->base;
10342         struct drm_atomic_state *state = old->restore_state;
10343         int ret;
10344
10345         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10346                       connector->base.id, connector->name,
10347                       encoder->base.id, encoder->name);
10348
10349         if (!state)
10350                 return;
10351
10352         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10353         if (ret)
10354                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10355         drm_atomic_state_put(state);
10356 }
10357
10358 static int i9xx_pll_refclk(struct drm_device *dev,
10359                            const struct intel_crtc_state *pipe_config)
10360 {
10361         struct drm_i915_private *dev_priv = to_i915(dev);
10362         u32 dpll = pipe_config->dpll_hw_state.dpll;
10363
10364         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10365                 return dev_priv->vbt.lvds_ssc_freq;
10366         else if (HAS_PCH_SPLIT(dev_priv))
10367                 return 120000;
10368         else if (!IS_GEN2(dev_priv))
10369                 return 96000;
10370         else
10371                 return 48000;
10372 }
10373
10374 /* Returns the clock of the currently programmed mode of the given pipe. */
10375 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10376                                 struct intel_crtc_state *pipe_config)
10377 {
10378         struct drm_device *dev = crtc->base.dev;
10379         struct drm_i915_private *dev_priv = to_i915(dev);
10380         int pipe = pipe_config->cpu_transcoder;
10381         u32 dpll = pipe_config->dpll_hw_state.dpll;
10382         u32 fp;
10383         struct dpll clock;
10384         int port_clock;
10385         int refclk = i9xx_pll_refclk(dev, pipe_config);
10386
10387         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10388                 fp = pipe_config->dpll_hw_state.fp0;
10389         else
10390                 fp = pipe_config->dpll_hw_state.fp1;
10391
10392         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10393         if (IS_PINEVIEW(dev_priv)) {
10394                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10395                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10396         } else {
10397                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10398                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10399         }
10400
10401         if (!IS_GEN2(dev_priv)) {
10402                 if (IS_PINEVIEW(dev_priv))
10403                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10404                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10405                 else
10406                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10407                                DPLL_FPA01_P1_POST_DIV_SHIFT);
10408
10409                 switch (dpll & DPLL_MODE_MASK) {
10410                 case DPLLB_MODE_DAC_SERIAL:
10411                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10412                                 5 : 10;
10413                         break;
10414                 case DPLLB_MODE_LVDS:
10415                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10416                                 7 : 14;
10417                         break;
10418                 default:
10419                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10420                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
10421                         return;
10422                 }
10423
10424                 if (IS_PINEVIEW(dev_priv))
10425                         port_clock = pnv_calc_dpll_params(refclk, &clock);
10426                 else
10427                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
10428         } else {
10429                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10430                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10431
10432                 if (is_lvds) {
10433                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10434                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
10435
10436                         if (lvds & LVDS_CLKB_POWER_UP)
10437                                 clock.p2 = 7;
10438                         else
10439                                 clock.p2 = 14;
10440                 } else {
10441                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
10442                                 clock.p1 = 2;
10443                         else {
10444                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10445                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10446                         }
10447                         if (dpll & PLL_P2_DIVIDE_BY_4)
10448                                 clock.p2 = 4;
10449                         else
10450                                 clock.p2 = 2;
10451                 }
10452
10453                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10454         }
10455
10456         /*
10457          * This value includes pixel_multiplier. We will use
10458          * port_clock to compute adjusted_mode.crtc_clock in the
10459          * encoder's get_config() function.
10460          */
10461         pipe_config->port_clock = port_clock;
10462 }
10463
10464 int intel_dotclock_calculate(int link_freq,
10465                              const struct intel_link_m_n *m_n)
10466 {
10467         /*
10468          * The calculation for the data clock is:
10469          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10470          * But we want to avoid losing precison if possible, so:
10471          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10472          *
10473          * and the link clock is simpler:
10474          * link_clock = (m * link_clock) / n
10475          */
10476
10477         if (!m_n->link_n)
10478                 return 0;
10479
10480         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10481 }
10482
10483 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10484                                    struct intel_crtc_state *pipe_config)
10485 {
10486         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10487
10488         /* read out port_clock from the DPLL */
10489         i9xx_crtc_clock_get(crtc, pipe_config);
10490
10491         /*
10492          * In case there is an active pipe without active ports,
10493          * we may need some idea for the dotclock anyway.
10494          * Calculate one based on the FDI configuration.
10495          */
10496         pipe_config->base.adjusted_mode.crtc_clock =
10497                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10498                                          &pipe_config->fdi_m_n);
10499 }
10500
10501 /* Returns the currently programmed mode of the given encoder. */
10502 struct drm_display_mode *
10503 intel_encoder_current_mode(struct intel_encoder *encoder)
10504 {
10505         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10506         struct intel_crtc_state *crtc_state;
10507         struct drm_display_mode *mode;
10508         struct intel_crtc *crtc;
10509         enum pipe pipe;
10510
10511         if (!encoder->get_hw_state(encoder, &pipe))
10512                 return NULL;
10513
10514         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10515
10516         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10517         if (!mode)
10518                 return NULL;
10519
10520         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10521         if (!crtc_state) {
10522                 kfree(mode);
10523                 return NULL;
10524         }
10525
10526         crtc_state->base.crtc = &crtc->base;
10527
10528         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10529                 kfree(crtc_state);
10530                 kfree(mode);
10531                 return NULL;
10532         }
10533
10534         encoder->get_config(encoder, crtc_state);
10535
10536         intel_mode_from_pipe_config(mode, crtc_state);
10537
10538         kfree(crtc_state);
10539
10540         return mode;
10541 }
10542
10543 static void intel_crtc_destroy(struct drm_crtc *crtc)
10544 {
10545         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10546
10547         drm_crtc_cleanup(crtc);
10548         kfree(intel_crtc);
10549 }
10550
10551 /**
10552  * intel_wm_need_update - Check whether watermarks need updating
10553  * @plane: drm plane
10554  * @state: new plane state
10555  *
10556  * Check current plane state versus the new one to determine whether
10557  * watermarks need to be recalculated.
10558  *
10559  * Returns true or false.
10560  */
10561 static bool intel_wm_need_update(struct drm_plane *plane,
10562                                  struct drm_plane_state *state)
10563 {
10564         struct intel_plane_state *new = to_intel_plane_state(state);
10565         struct intel_plane_state *cur = to_intel_plane_state(plane->state);
10566
10567         /* Update watermarks on tiling or size changes. */
10568         if (new->base.visible != cur->base.visible)
10569                 return true;
10570
10571         if (!cur->base.fb || !new->base.fb)
10572                 return false;
10573
10574         if (cur->base.fb->modifier != new->base.fb->modifier ||
10575             cur->base.rotation != new->base.rotation ||
10576             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10577             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10578             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10579             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
10580                 return true;
10581
10582         return false;
10583 }
10584
10585 static bool needs_scaling(const struct intel_plane_state *state)
10586 {
10587         int src_w = drm_rect_width(&state->base.src) >> 16;
10588         int src_h = drm_rect_height(&state->base.src) >> 16;
10589         int dst_w = drm_rect_width(&state->base.dst);
10590         int dst_h = drm_rect_height(&state->base.dst);
10591
10592         return (src_w != dst_w || src_h != dst_h);
10593 }
10594
10595 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10596                                     struct drm_crtc_state *crtc_state,
10597                                     const struct intel_plane_state *old_plane_state,
10598                                     struct drm_plane_state *plane_state)
10599 {
10600         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
10601         struct drm_crtc *crtc = crtc_state->crtc;
10602         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10603         struct intel_plane *plane = to_intel_plane(plane_state->plane);
10604         struct drm_device *dev = crtc->dev;
10605         struct drm_i915_private *dev_priv = to_i915(dev);
10606         bool mode_changed = needs_modeset(crtc_state);
10607         bool was_crtc_enabled = old_crtc_state->base.active;
10608         bool is_crtc_enabled = crtc_state->active;
10609         bool turn_off, turn_on, visible, was_visible;
10610         struct drm_framebuffer *fb = plane_state->fb;
10611         int ret;
10612
10613         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
10614                 ret = skl_update_scaler_plane(
10615                         to_intel_crtc_state(crtc_state),
10616                         to_intel_plane_state(plane_state));
10617                 if (ret)
10618                         return ret;
10619         }
10620
10621         was_visible = old_plane_state->base.visible;
10622         visible = plane_state->visible;
10623
10624         if (!was_crtc_enabled && WARN_ON(was_visible))
10625                 was_visible = false;
10626
10627         /*
10628          * Visibility is calculated as if the crtc was on, but
10629          * after scaler setup everything depends on it being off
10630          * when the crtc isn't active.
10631          *
10632          * FIXME this is wrong for watermarks. Watermarks should also
10633          * be computed as if the pipe would be active. Perhaps move
10634          * per-plane wm computation to the .check_plane() hook, and
10635          * only combine the results from all planes in the current place?
10636          */
10637         if (!is_crtc_enabled) {
10638                 plane_state->visible = visible = false;
10639                 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10640         }
10641
10642         if (!was_visible && !visible)
10643                 return 0;
10644
10645         if (fb != old_plane_state->base.fb)
10646                 pipe_config->fb_changed = true;
10647
10648         turn_off = was_visible && (!visible || mode_changed);
10649         turn_on = visible && (!was_visible || mode_changed);
10650
10651         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
10652                          intel_crtc->base.base.id, intel_crtc->base.name,
10653                          plane->base.base.id, plane->base.name,
10654                          fb ? fb->base.id : -1);
10655
10656         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
10657                          plane->base.base.id, plane->base.name,
10658                          was_visible, visible,
10659                          turn_off, turn_on, mode_changed);
10660
10661         if (turn_on) {
10662                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10663                         pipe_config->update_wm_pre = true;
10664
10665                 /* must disable cxsr around plane enable/disable */
10666                 if (plane->id != PLANE_CURSOR)
10667                         pipe_config->disable_cxsr = true;
10668         } else if (turn_off) {
10669                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10670                         pipe_config->update_wm_post = true;
10671
10672                 /* must disable cxsr around plane enable/disable */
10673                 if (plane->id != PLANE_CURSOR)
10674                         pipe_config->disable_cxsr = true;
10675         } else if (intel_wm_need_update(&plane->base, plane_state)) {
10676                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
10677                         /* FIXME bollocks */
10678                         pipe_config->update_wm_pre = true;
10679                         pipe_config->update_wm_post = true;
10680                 }
10681         }
10682
10683         if (visible || was_visible)
10684                 pipe_config->fb_bits |= plane->frontbuffer_bit;
10685
10686         /*
10687          * WaCxSRDisabledForSpriteScaling:ivb
10688          *
10689          * cstate->update_wm was already set above, so this flag will
10690          * take effect when we commit and program watermarks.
10691          */
10692         if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
10693             needs_scaling(to_intel_plane_state(plane_state)) &&
10694             !needs_scaling(old_plane_state))
10695                 pipe_config->disable_lp_wm = true;
10696
10697         return 0;
10698 }
10699
10700 static bool encoders_cloneable(const struct intel_encoder *a,
10701                                const struct intel_encoder *b)
10702 {
10703         /* masks could be asymmetric, so check both ways */
10704         return a == b || (a->cloneable & (1 << b->type) &&
10705                           b->cloneable & (1 << a->type));
10706 }
10707
10708 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10709                                          struct intel_crtc *crtc,
10710                                          struct intel_encoder *encoder)
10711 {
10712         struct intel_encoder *source_encoder;
10713         struct drm_connector *connector;
10714         struct drm_connector_state *connector_state;
10715         int i;
10716
10717         for_each_new_connector_in_state(state, connector, connector_state, i) {
10718                 if (connector_state->crtc != &crtc->base)
10719                         continue;
10720
10721                 source_encoder =
10722                         to_intel_encoder(connector_state->best_encoder);
10723                 if (!encoders_cloneable(encoder, source_encoder))
10724                         return false;
10725         }
10726
10727         return true;
10728 }
10729
10730 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10731                                    struct drm_crtc_state *crtc_state)
10732 {
10733         struct drm_device *dev = crtc->dev;
10734         struct drm_i915_private *dev_priv = to_i915(dev);
10735         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10736         struct intel_crtc_state *pipe_config =
10737                 to_intel_crtc_state(crtc_state);
10738         struct drm_atomic_state *state = crtc_state->state;
10739         int ret;
10740         bool mode_changed = needs_modeset(crtc_state);
10741
10742         if (mode_changed && !crtc_state->active)
10743                 pipe_config->update_wm_post = true;
10744
10745         if (mode_changed && crtc_state->enable &&
10746             dev_priv->display.crtc_compute_clock &&
10747             !WARN_ON(pipe_config->shared_dpll)) {
10748                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
10749                                                            pipe_config);
10750                 if (ret)
10751                         return ret;
10752         }
10753
10754         if (crtc_state->color_mgmt_changed) {
10755                 ret = intel_color_check(crtc, crtc_state);
10756                 if (ret)
10757                         return ret;
10758
10759                 /*
10760                  * Changing color management on Intel hardware is
10761                  * handled as part of planes update.
10762                  */
10763                 crtc_state->planes_changed = true;
10764         }
10765
10766         ret = 0;
10767         if (dev_priv->display.compute_pipe_wm) {
10768                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
10769                 if (ret) {
10770                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
10771                         return ret;
10772                 }
10773         }
10774
10775         if (dev_priv->display.compute_intermediate_wm &&
10776             !to_intel_atomic_state(state)->skip_intermediate_wm) {
10777                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
10778                         return 0;
10779
10780                 /*
10781                  * Calculate 'intermediate' watermarks that satisfy both the
10782                  * old state and the new state.  We can program these
10783                  * immediately.
10784                  */
10785                 ret = dev_priv->display.compute_intermediate_wm(dev,
10786                                                                 intel_crtc,
10787                                                                 pipe_config);
10788                 if (ret) {
10789                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
10790                         return ret;
10791                 }
10792         } else if (dev_priv->display.compute_intermediate_wm) {
10793                 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
10794                         pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
10795         }
10796
10797         if (INTEL_GEN(dev_priv) >= 9) {
10798                 if (mode_changed)
10799                         ret = skl_update_scaler_crtc(pipe_config);
10800
10801                 if (!ret)
10802                         ret = skl_check_pipe_max_pixel_rate(intel_crtc,
10803                                                             pipe_config);
10804                 if (!ret)
10805                         ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
10806                                                          pipe_config);
10807         }
10808
10809         if (HAS_IPS(dev_priv))
10810                 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
10811
10812         return ret;
10813 }
10814
10815 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
10816         .atomic_begin = intel_begin_crtc_commit,
10817         .atomic_flush = intel_finish_crtc_commit,
10818         .atomic_check = intel_crtc_atomic_check,
10819 };
10820
10821 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10822 {
10823         struct intel_connector *connector;
10824         struct drm_connector_list_iter conn_iter;
10825
10826         drm_connector_list_iter_begin(dev, &conn_iter);
10827         for_each_intel_connector_iter(connector, &conn_iter) {
10828                 if (connector->base.state->crtc)
10829                         drm_connector_put(&connector->base);
10830
10831                 if (connector->base.encoder) {
10832                         connector->base.state->best_encoder =
10833                                 connector->base.encoder;
10834                         connector->base.state->crtc =
10835                                 connector->base.encoder->crtc;
10836
10837                         drm_connector_get(&connector->base);
10838                 } else {
10839                         connector->base.state->best_encoder = NULL;
10840                         connector->base.state->crtc = NULL;
10841                 }
10842         }
10843         drm_connector_list_iter_end(&conn_iter);
10844 }
10845
10846 static void
10847 connected_sink_compute_bpp(struct intel_connector *connector,
10848                            struct intel_crtc_state *pipe_config)
10849 {
10850         const struct drm_display_info *info = &connector->base.display_info;
10851         int bpp = pipe_config->pipe_bpp;
10852
10853         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
10854                       connector->base.base.id,
10855                       connector->base.name);
10856
10857         /* Don't use an invalid EDID bpc value */
10858         if (info->bpc != 0 && info->bpc * 3 < bpp) {
10859                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
10860                               bpp, info->bpc * 3);
10861                 pipe_config->pipe_bpp = info->bpc * 3;
10862         }
10863
10864         /* Clamp bpp to 8 on screens without EDID 1.4 */
10865         if (info->bpc == 0 && bpp > 24) {
10866                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
10867                               bpp);
10868                 pipe_config->pipe_bpp = 24;
10869         }
10870 }
10871
10872 static int
10873 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10874                           struct intel_crtc_state *pipe_config)
10875 {
10876         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10877         struct drm_atomic_state *state;
10878         struct drm_connector *connector;
10879         struct drm_connector_state *connector_state;
10880         int bpp, i;
10881
10882         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
10883             IS_CHERRYVIEW(dev_priv)))
10884                 bpp = 10*3;
10885         else if (INTEL_GEN(dev_priv) >= 5)
10886                 bpp = 12*3;
10887         else
10888                 bpp = 8*3;
10889
10890
10891         pipe_config->pipe_bpp = bpp;
10892
10893         state = pipe_config->base.state;
10894
10895         /* Clamp display bpp to EDID value */
10896         for_each_new_connector_in_state(state, connector, connector_state, i) {
10897                 if (connector_state->crtc != &crtc->base)
10898                         continue;
10899
10900                 connected_sink_compute_bpp(to_intel_connector(connector),
10901                                            pipe_config);
10902         }
10903
10904         return bpp;
10905 }
10906
10907 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
10908 {
10909         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
10910                         "type: 0x%x flags: 0x%x\n",
10911                 mode->crtc_clock,
10912                 mode->crtc_hdisplay, mode->crtc_hsync_start,
10913                 mode->crtc_hsync_end, mode->crtc_htotal,
10914                 mode->crtc_vdisplay, mode->crtc_vsync_start,
10915                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
10916 }
10917
10918 static inline void
10919 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
10920                       unsigned int lane_count, struct intel_link_m_n *m_n)
10921 {
10922         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10923                       id, lane_count,
10924                       m_n->gmch_m, m_n->gmch_n,
10925                       m_n->link_m, m_n->link_n, m_n->tu);
10926 }
10927
10928 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
10929
10930 static const char * const output_type_str[] = {
10931         OUTPUT_TYPE(UNUSED),
10932         OUTPUT_TYPE(ANALOG),
10933         OUTPUT_TYPE(DVO),
10934         OUTPUT_TYPE(SDVO),
10935         OUTPUT_TYPE(LVDS),
10936         OUTPUT_TYPE(TVOUT),
10937         OUTPUT_TYPE(HDMI),
10938         OUTPUT_TYPE(DP),
10939         OUTPUT_TYPE(EDP),
10940         OUTPUT_TYPE(DSI),
10941         OUTPUT_TYPE(DDI),
10942         OUTPUT_TYPE(DP_MST),
10943 };
10944
10945 #undef OUTPUT_TYPE
10946
10947 static void snprintf_output_types(char *buf, size_t len,
10948                                   unsigned int output_types)
10949 {
10950         char *str = buf;
10951         int i;
10952
10953         str[0] = '\0';
10954
10955         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
10956                 int r;
10957
10958                 if ((output_types & BIT(i)) == 0)
10959                         continue;
10960
10961                 r = snprintf(str, len, "%s%s",
10962                              str != buf ? "," : "", output_type_str[i]);
10963                 if (r >= len)
10964                         break;
10965                 str += r;
10966                 len -= r;
10967
10968                 output_types &= ~BIT(i);
10969         }
10970
10971         WARN_ON_ONCE(output_types != 0);
10972 }
10973
10974 static void intel_dump_pipe_config(struct intel_crtc *crtc,
10975                                    struct intel_crtc_state *pipe_config,
10976                                    const char *context)
10977 {
10978         struct drm_device *dev = crtc->base.dev;
10979         struct drm_i915_private *dev_priv = to_i915(dev);
10980         struct drm_plane *plane;
10981         struct intel_plane *intel_plane;
10982         struct intel_plane_state *state;
10983         struct drm_framebuffer *fb;
10984         char buf[64];
10985
10986         DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
10987                       crtc->base.base.id, crtc->base.name, context);
10988
10989         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
10990         DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
10991                       buf, pipe_config->output_types);
10992
10993         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
10994                       transcoder_name(pipe_config->cpu_transcoder),
10995                       pipe_config->pipe_bpp, pipe_config->dither);
10996
10997         if (pipe_config->has_pch_encoder)
10998                 intel_dump_m_n_config(pipe_config, "fdi",
10999                                       pipe_config->fdi_lanes,
11000                                       &pipe_config->fdi_m_n);
11001
11002         if (pipe_config->ycbcr420)
11003                 DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
11004
11005         if (intel_crtc_has_dp_encoder(pipe_config)) {
11006                 intel_dump_m_n_config(pipe_config, "dp m_n",
11007                                 pipe_config->lane_count, &pipe_config->dp_m_n);
11008                 if (pipe_config->has_drrs)
11009                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
11010                                               pipe_config->lane_count,
11011                                               &pipe_config->dp_m2_n2);
11012         }
11013
11014         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11015                       pipe_config->has_audio, pipe_config->has_infoframe);
11016
11017         DRM_DEBUG_KMS("requested mode:\n");
11018         drm_mode_debug_printmodeline(&pipe_config->base.mode);
11019         DRM_DEBUG_KMS("adjusted mode:\n");
11020         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11021         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11022         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11023                       pipe_config->port_clock,
11024                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11025                       pipe_config->pixel_rate);
11026
11027         if (INTEL_GEN(dev_priv) >= 9)
11028                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11029                               crtc->num_scalers,
11030                               pipe_config->scaler_state.scaler_users,
11031                               pipe_config->scaler_state.scaler_id);
11032
11033         if (HAS_GMCH_DISPLAY(dev_priv))
11034                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11035                               pipe_config->gmch_pfit.control,
11036                               pipe_config->gmch_pfit.pgm_ratios,
11037                               pipe_config->gmch_pfit.lvds_border_bits);
11038         else
11039                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11040                               pipe_config->pch_pfit.pos,
11041                               pipe_config->pch_pfit.size,
11042                               enableddisabled(pipe_config->pch_pfit.enabled));
11043
11044         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11045                       pipe_config->ips_enabled, pipe_config->double_wide);
11046
11047         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11048
11049         DRM_DEBUG_KMS("planes on this crtc\n");
11050         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11051                 struct drm_format_name_buf format_name;
11052                 intel_plane = to_intel_plane(plane);
11053                 if (intel_plane->pipe != crtc->pipe)
11054                         continue;
11055
11056                 state = to_intel_plane_state(plane->state);
11057                 fb = state->base.fb;
11058                 if (!fb) {
11059                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11060                                       plane->base.id, plane->name, state->scaler_id);
11061                         continue;
11062                 }
11063
11064                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11065                               plane->base.id, plane->name,
11066                               fb->base.id, fb->width, fb->height,
11067                               drm_get_format_name(fb->format->format, &format_name));
11068                 if (INTEL_GEN(dev_priv) >= 9)
11069                         DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11070                                       state->scaler_id,
11071                                       state->base.src.x1 >> 16,
11072                                       state->base.src.y1 >> 16,
11073                                       drm_rect_width(&state->base.src) >> 16,
11074                                       drm_rect_height(&state->base.src) >> 16,
11075                                       state->base.dst.x1, state->base.dst.y1,
11076                                       drm_rect_width(&state->base.dst),
11077                                       drm_rect_height(&state->base.dst));
11078         }
11079 }
11080
11081 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11082 {
11083         struct drm_device *dev = state->dev;
11084         struct drm_connector *connector;
11085         struct drm_connector_list_iter conn_iter;
11086         unsigned int used_ports = 0;
11087         unsigned int used_mst_ports = 0;
11088         bool ret = true;
11089
11090         /*
11091          * Walk the connector list instead of the encoder
11092          * list to detect the problem on ddi platforms
11093          * where there's just one encoder per digital port.
11094          */
11095         drm_connector_list_iter_begin(dev, &conn_iter);
11096         drm_for_each_connector_iter(connector, &conn_iter) {
11097                 struct drm_connector_state *connector_state;
11098                 struct intel_encoder *encoder;
11099
11100                 connector_state = drm_atomic_get_new_connector_state(state, connector);
11101                 if (!connector_state)
11102                         connector_state = connector->state;
11103
11104                 if (!connector_state->best_encoder)
11105                         continue;
11106
11107                 encoder = to_intel_encoder(connector_state->best_encoder);
11108
11109                 WARN_ON(!connector_state->crtc);
11110
11111                 switch (encoder->type) {
11112                         unsigned int port_mask;
11113                 case INTEL_OUTPUT_DDI:
11114                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
11115                                 break;
11116                         /* else: fall through */
11117                 case INTEL_OUTPUT_DP:
11118                 case INTEL_OUTPUT_HDMI:
11119                 case INTEL_OUTPUT_EDP:
11120                         port_mask = 1 << encoder->port;
11121
11122                         /* the same port mustn't appear more than once */
11123                         if (used_ports & port_mask)
11124                                 ret = false;
11125
11126                         used_ports |= port_mask;
11127                         break;
11128                 case INTEL_OUTPUT_DP_MST:
11129                         used_mst_ports |=
11130                                 1 << encoder->port;
11131                         break;
11132                 default:
11133                         break;
11134                 }
11135         }
11136         drm_connector_list_iter_end(&conn_iter);
11137
11138         /* can't mix MST and SST/HDMI on the same port */
11139         if (used_ports & used_mst_ports)
11140                 return false;
11141
11142         return ret;
11143 }
11144
11145 static void
11146 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11147 {
11148         struct drm_i915_private *dev_priv =
11149                 to_i915(crtc_state->base.crtc->dev);
11150         struct intel_crtc_scaler_state scaler_state;
11151         struct intel_dpll_hw_state dpll_hw_state;
11152         struct intel_shared_dpll *shared_dpll;
11153         struct intel_crtc_wm_state wm_state;
11154         bool force_thru, ips_force_disable;
11155
11156         /* FIXME: before the switch to atomic started, a new pipe_config was
11157          * kzalloc'd. Code that depends on any field being zero should be
11158          * fixed, so that the crtc_state can be safely duplicated. For now,
11159          * only fields that are know to not cause problems are preserved. */
11160
11161         scaler_state = crtc_state->scaler_state;
11162         shared_dpll = crtc_state->shared_dpll;
11163         dpll_hw_state = crtc_state->dpll_hw_state;
11164         force_thru = crtc_state->pch_pfit.force_thru;
11165         ips_force_disable = crtc_state->ips_force_disable;
11166         if (IS_G4X(dev_priv) ||
11167             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11168                 wm_state = crtc_state->wm;
11169
11170         /* Keep base drm_crtc_state intact, only clear our extended struct */
11171         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11172         memset(&crtc_state->base + 1, 0,
11173                sizeof(*crtc_state) - sizeof(crtc_state->base));
11174
11175         crtc_state->scaler_state = scaler_state;
11176         crtc_state->shared_dpll = shared_dpll;
11177         crtc_state->dpll_hw_state = dpll_hw_state;
11178         crtc_state->pch_pfit.force_thru = force_thru;
11179         crtc_state->ips_force_disable = ips_force_disable;
11180         if (IS_G4X(dev_priv) ||
11181             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11182                 crtc_state->wm = wm_state;
11183 }
11184
11185 static int
11186 intel_modeset_pipe_config(struct drm_crtc *crtc,
11187                           struct intel_crtc_state *pipe_config)
11188 {
11189         struct drm_atomic_state *state = pipe_config->base.state;
11190         struct intel_encoder *encoder;
11191         struct drm_connector *connector;
11192         struct drm_connector_state *connector_state;
11193         int base_bpp, ret = -EINVAL;
11194         int i;
11195         bool retry = true;
11196
11197         clear_intel_crtc_state(pipe_config);
11198
11199         pipe_config->cpu_transcoder =
11200                 (enum transcoder) to_intel_crtc(crtc)->pipe;
11201
11202         /*
11203          * Sanitize sync polarity flags based on requested ones. If neither
11204          * positive or negative polarity is requested, treat this as meaning
11205          * negative polarity.
11206          */
11207         if (!(pipe_config->base.adjusted_mode.flags &
11208               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11209                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11210
11211         if (!(pipe_config->base.adjusted_mode.flags &
11212               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11213                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11214
11215         base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11216                                              pipe_config);
11217         if (base_bpp < 0)
11218                 goto fail;
11219
11220         /*
11221          * Determine the real pipe dimensions. Note that stereo modes can
11222          * increase the actual pipe size due to the frame doubling and
11223          * insertion of additional space for blanks between the frame. This
11224          * is stored in the crtc timings. We use the requested mode to do this
11225          * computation to clearly distinguish it from the adjusted mode, which
11226          * can be changed by the connectors in the below retry loop.
11227          */
11228         drm_mode_get_hv_timing(&pipe_config->base.mode,
11229                                &pipe_config->pipe_src_w,
11230                                &pipe_config->pipe_src_h);
11231
11232         for_each_new_connector_in_state(state, connector, connector_state, i) {
11233                 if (connector_state->crtc != crtc)
11234                         continue;
11235
11236                 encoder = to_intel_encoder(connector_state->best_encoder);
11237
11238                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11239                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11240                         goto fail;
11241                 }
11242
11243                 /*
11244                  * Determine output_types before calling the .compute_config()
11245                  * hooks so that the hooks can use this information safely.
11246                  */
11247                 if (encoder->compute_output_type)
11248                         pipe_config->output_types |=
11249                                 BIT(encoder->compute_output_type(encoder, pipe_config,
11250                                                                  connector_state));
11251                 else
11252                         pipe_config->output_types |= BIT(encoder->type);
11253         }
11254
11255 encoder_retry:
11256         /* Ensure the port clock defaults are reset when retrying. */
11257         pipe_config->port_clock = 0;
11258         pipe_config->pixel_multiplier = 1;
11259
11260         /* Fill in default crtc timings, allow encoders to overwrite them. */
11261         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11262                               CRTC_STEREO_DOUBLE);
11263
11264         /* Pass our mode to the connectors and the CRTC to give them a chance to
11265          * adjust it according to limitations or connector properties, and also
11266          * a chance to reject the mode entirely.
11267          */
11268         for_each_new_connector_in_state(state, connector, connector_state, i) {
11269                 if (connector_state->crtc != crtc)
11270                         continue;
11271
11272                 encoder = to_intel_encoder(connector_state->best_encoder);
11273
11274                 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
11275                         DRM_DEBUG_KMS("Encoder config failure\n");
11276                         goto fail;
11277                 }
11278         }
11279
11280         /* Set default port clock if not overwritten by the encoder. Needs to be
11281          * done afterwards in case the encoder adjusts the mode. */
11282         if (!pipe_config->port_clock)
11283                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11284                         * pipe_config->pixel_multiplier;
11285
11286         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11287         if (ret < 0) {
11288                 DRM_DEBUG_KMS("CRTC fixup failed\n");
11289                 goto fail;
11290         }
11291
11292         if (ret == RETRY) {
11293                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
11294                         ret = -EINVAL;
11295                         goto fail;
11296                 }
11297
11298                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11299                 retry = false;
11300                 goto encoder_retry;
11301         }
11302
11303         /* Dithering seems to not pass-through bits correctly when it should, so
11304          * only enable it on 6bpc panels and when its not a compliance
11305          * test requesting 6bpc video pattern.
11306          */
11307         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11308                 !pipe_config->dither_force_disable;
11309         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11310                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11311
11312 fail:
11313         return ret;
11314 }
11315
11316 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11317 {
11318         int diff;
11319
11320         if (clock1 == clock2)
11321                 return true;
11322
11323         if (!clock1 || !clock2)
11324                 return false;
11325
11326         diff = abs(clock1 - clock2);
11327
11328         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11329                 return true;
11330
11331         return false;
11332 }
11333
11334 static bool
11335 intel_compare_m_n(unsigned int m, unsigned int n,
11336                   unsigned int m2, unsigned int n2,
11337                   bool exact)
11338 {
11339         if (m == m2 && n == n2)
11340                 return true;
11341
11342         if (exact || !m || !n || !m2 || !n2)
11343                 return false;
11344
11345         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11346
11347         if (n > n2) {
11348                 while (n > n2) {
11349                         m2 <<= 1;
11350                         n2 <<= 1;
11351                 }
11352         } else if (n < n2) {
11353                 while (n < n2) {
11354                         m <<= 1;
11355                         n <<= 1;
11356                 }
11357         }
11358
11359         if (n != n2)
11360                 return false;
11361
11362         return intel_fuzzy_clock_check(m, m2);
11363 }
11364
11365 static bool
11366 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11367                        struct intel_link_m_n *m2_n2,
11368                        bool adjust)
11369 {
11370         if (m_n->tu == m2_n2->tu &&
11371             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11372                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11373             intel_compare_m_n(m_n->link_m, m_n->link_n,
11374                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
11375                 if (adjust)
11376                         *m2_n2 = *m_n;
11377
11378                 return true;
11379         }
11380
11381         return false;
11382 }
11383
11384 static void __printf(3, 4)
11385 pipe_config_err(bool adjust, const char *name, const char *format, ...)
11386 {
11387         struct va_format vaf;
11388         va_list args;
11389
11390         va_start(args, format);
11391         vaf.fmt = format;
11392         vaf.va = &args;
11393
11394         if (adjust)
11395                 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11396         else
11397                 drm_err("mismatch in %s %pV", name, &vaf);
11398
11399         va_end(args);
11400 }
11401
11402 static bool
11403 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11404                           struct intel_crtc_state *current_config,
11405                           struct intel_crtc_state *pipe_config,
11406                           bool adjust)
11407 {
11408         bool ret = true;
11409         bool fixup_inherited = adjust &&
11410                 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11411                 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
11412
11413 #define PIPE_CONF_CHECK_X(name) do { \
11414         if (current_config->name != pipe_config->name) { \
11415                 pipe_config_err(adjust, __stringify(name), \
11416                           "(expected 0x%08x, found 0x%08x)\n", \
11417                           current_config->name, \
11418                           pipe_config->name); \
11419                 ret = false; \
11420         } \
11421 } while (0)
11422
11423 #define PIPE_CONF_CHECK_I(name) do { \
11424         if (current_config->name != pipe_config->name) { \
11425                 pipe_config_err(adjust, __stringify(name), \
11426                           "(expected %i, found %i)\n", \
11427                           current_config->name, \
11428                           pipe_config->name); \
11429                 ret = false; \
11430         } \
11431 } while (0)
11432
11433 #define PIPE_CONF_CHECK_BOOL(name) do { \
11434         if (current_config->name != pipe_config->name) { \
11435                 pipe_config_err(adjust, __stringify(name), \
11436                           "(expected %s, found %s)\n", \
11437                           yesno(current_config->name), \
11438                           yesno(pipe_config->name)); \
11439                 ret = false; \
11440         } \
11441 } while (0)
11442
11443 /*
11444  * Checks state where we only read out the enabling, but not the entire
11445  * state itself (like full infoframes or ELD for audio). These states
11446  * require a full modeset on bootup to fix up.
11447  */
11448 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
11449         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11450                 PIPE_CONF_CHECK_BOOL(name); \
11451         } else { \
11452                 pipe_config_err(adjust, __stringify(name), \
11453                           "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11454                           yesno(current_config->name), \
11455                           yesno(pipe_config->name)); \
11456                 ret = false; \
11457         } \
11458 } while (0)
11459
11460 #define PIPE_CONF_CHECK_P(name) do { \
11461         if (current_config->name != pipe_config->name) { \
11462                 pipe_config_err(adjust, __stringify(name), \
11463                           "(expected %p, found %p)\n", \
11464                           current_config->name, \
11465                           pipe_config->name); \
11466                 ret = false; \
11467         } \
11468 } while (0)
11469
11470 #define PIPE_CONF_CHECK_M_N(name) do { \
11471         if (!intel_compare_link_m_n(&current_config->name, \
11472                                     &pipe_config->name,\
11473                                     adjust)) { \
11474                 pipe_config_err(adjust, __stringify(name), \
11475                           "(expected tu %i gmch %i/%i link %i/%i, " \
11476                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11477                           current_config->name.tu, \
11478                           current_config->name.gmch_m, \
11479                           current_config->name.gmch_n, \
11480                           current_config->name.link_m, \
11481                           current_config->name.link_n, \
11482                           pipe_config->name.tu, \
11483                           pipe_config->name.gmch_m, \
11484                           pipe_config->name.gmch_n, \
11485                           pipe_config->name.link_m, \
11486                           pipe_config->name.link_n); \
11487                 ret = false; \
11488         } \
11489 } while (0)
11490
11491 /* This is required for BDW+ where there is only one set of registers for
11492  * switching between high and low RR.
11493  * This macro can be used whenever a comparison has to be made between one
11494  * hw state and multiple sw state variables.
11495  */
11496 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
11497         if (!intel_compare_link_m_n(&current_config->name, \
11498                                     &pipe_config->name, adjust) && \
11499             !intel_compare_link_m_n(&current_config->alt_name, \
11500                                     &pipe_config->name, adjust)) { \
11501                 pipe_config_err(adjust, __stringify(name), \
11502                           "(expected tu %i gmch %i/%i link %i/%i, " \
11503                           "or tu %i gmch %i/%i link %i/%i, " \
11504                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11505                           current_config->name.tu, \
11506                           current_config->name.gmch_m, \
11507                           current_config->name.gmch_n, \
11508                           current_config->name.link_m, \
11509                           current_config->name.link_n, \
11510                           current_config->alt_name.tu, \
11511                           current_config->alt_name.gmch_m, \
11512                           current_config->alt_name.gmch_n, \
11513                           current_config->alt_name.link_m, \
11514                           current_config->alt_name.link_n, \
11515                           pipe_config->name.tu, \
11516                           pipe_config->name.gmch_m, \
11517                           pipe_config->name.gmch_n, \
11518                           pipe_config->name.link_m, \
11519                           pipe_config->name.link_n); \
11520                 ret = false; \
11521         } \
11522 } while (0)
11523
11524 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
11525         if ((current_config->name ^ pipe_config->name) & (mask)) { \
11526                 pipe_config_err(adjust, __stringify(name), \
11527                           "(%x) (expected %i, found %i)\n", \
11528                           (mask), \
11529                           current_config->name & (mask), \
11530                           pipe_config->name & (mask)); \
11531                 ret = false; \
11532         } \
11533 } while (0)
11534
11535 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
11536         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
11537                 pipe_config_err(adjust, __stringify(name), \
11538                           "(expected %i, found %i)\n", \
11539                           current_config->name, \
11540                           pipe_config->name); \
11541                 ret = false; \
11542         } \
11543 } while (0)
11544
11545 #define PIPE_CONF_QUIRK(quirk)  \
11546         ((current_config->quirks | pipe_config->quirks) & (quirk))
11547
11548         PIPE_CONF_CHECK_I(cpu_transcoder);
11549
11550         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
11551         PIPE_CONF_CHECK_I(fdi_lanes);
11552         PIPE_CONF_CHECK_M_N(fdi_m_n);
11553
11554         PIPE_CONF_CHECK_I(lane_count);
11555         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
11556
11557         if (INTEL_GEN(dev_priv) < 8) {
11558                 PIPE_CONF_CHECK_M_N(dp_m_n);
11559
11560                 if (current_config->has_drrs)
11561                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
11562         } else
11563                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
11564
11565         PIPE_CONF_CHECK_X(output_types);
11566
11567         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11568         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11569         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11570         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11571         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11572         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
11573
11574         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11575         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11576         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11577         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11578         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11579         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
11580
11581         PIPE_CONF_CHECK_I(pixel_multiplier);
11582         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
11583         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
11584             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11585                 PIPE_CONF_CHECK_BOOL(limited_color_range);
11586
11587         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11588         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
11589         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
11590         PIPE_CONF_CHECK_BOOL(ycbcr420);
11591
11592         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
11593
11594         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11595                               DRM_MODE_FLAG_INTERLACE);
11596
11597         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
11598                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11599                                       DRM_MODE_FLAG_PHSYNC);
11600                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11601                                       DRM_MODE_FLAG_NHSYNC);
11602                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11603                                       DRM_MODE_FLAG_PVSYNC);
11604                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11605                                       DRM_MODE_FLAG_NVSYNC);
11606         }
11607
11608         PIPE_CONF_CHECK_X(gmch_pfit.control);
11609         /* pfit ratios are autocomputed by the hw on gen4+ */
11610         if (INTEL_GEN(dev_priv) < 4)
11611                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
11612         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
11613
11614         if (!adjust) {
11615                 PIPE_CONF_CHECK_I(pipe_src_w);
11616                 PIPE_CONF_CHECK_I(pipe_src_h);
11617
11618                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
11619                 if (current_config->pch_pfit.enabled) {
11620                         PIPE_CONF_CHECK_X(pch_pfit.pos);
11621                         PIPE_CONF_CHECK_X(pch_pfit.size);
11622                 }
11623
11624                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
11625                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
11626         }
11627
11628         PIPE_CONF_CHECK_BOOL(double_wide);
11629
11630         PIPE_CONF_CHECK_P(shared_dpll);
11631         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
11632         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
11633         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11634         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
11635         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
11636         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
11637         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11638         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11639         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
11640         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
11641         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
11642         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
11643         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
11644         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
11645         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
11646         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
11647         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
11648         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
11649         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
11650         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
11651         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
11652         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
11653         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
11654         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
11655         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
11656         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
11657         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
11658         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
11659         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
11660         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
11661         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
11662
11663         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11664         PIPE_CONF_CHECK_X(dsi_pll.div);
11665
11666         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
11667                 PIPE_CONF_CHECK_I(pipe_bpp);
11668
11669         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
11670         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
11671
11672         PIPE_CONF_CHECK_I(min_voltage_level);
11673
11674 #undef PIPE_CONF_CHECK_X
11675 #undef PIPE_CONF_CHECK_I
11676 #undef PIPE_CONF_CHECK_BOOL
11677 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
11678 #undef PIPE_CONF_CHECK_P
11679 #undef PIPE_CONF_CHECK_FLAGS
11680 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
11681 #undef PIPE_CONF_QUIRK
11682
11683         return ret;
11684 }
11685
11686 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11687                                            const struct intel_crtc_state *pipe_config)
11688 {
11689         if (pipe_config->has_pch_encoder) {
11690                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11691                                                             &pipe_config->fdi_m_n);
11692                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11693
11694                 /*
11695                  * FDI already provided one idea for the dotclock.
11696                  * Yell if the encoder disagrees.
11697                  */
11698                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11699                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11700                      fdi_dotclock, dotclock);
11701         }
11702 }
11703
11704 static void verify_wm_state(struct drm_crtc *crtc,
11705                             struct drm_crtc_state *new_state)
11706 {
11707         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11708         struct skl_ddb_allocation hw_ddb, *sw_ddb;
11709         struct skl_pipe_wm hw_wm, *sw_wm;
11710         struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
11711         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
11712         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11713         const enum pipe pipe = intel_crtc->pipe;
11714         int plane, level, max_level = ilk_wm_max_level(dev_priv);
11715
11716         if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
11717                 return;
11718
11719         skl_pipe_wm_get_hw_state(crtc, &hw_wm);
11720         sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
11721
11722         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11723         sw_ddb = &dev_priv->wm.skl_hw.ddb;
11724
11725         if (INTEL_GEN(dev_priv) >= 11)
11726                 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
11727                         DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
11728                                   sw_ddb->enabled_slices,
11729                                   hw_ddb.enabled_slices);
11730         /* planes */
11731         for_each_universal_plane(dev_priv, pipe, plane) {
11732                 hw_plane_wm = &hw_wm.planes[plane];
11733                 sw_plane_wm = &sw_wm->planes[plane];
11734
11735                 /* Watermarks */
11736                 for (level = 0; level <= max_level; level++) {
11737                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11738                                                 &sw_plane_wm->wm[level]))
11739                                 continue;
11740
11741                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11742                                   pipe_name(pipe), plane + 1, level,
11743                                   sw_plane_wm->wm[level].plane_en,
11744                                   sw_plane_wm->wm[level].plane_res_b,
11745                                   sw_plane_wm->wm[level].plane_res_l,
11746                                   hw_plane_wm->wm[level].plane_en,
11747                                   hw_plane_wm->wm[level].plane_res_b,
11748                                   hw_plane_wm->wm[level].plane_res_l);
11749                 }
11750
11751                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11752                                          &sw_plane_wm->trans_wm)) {
11753                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11754                                   pipe_name(pipe), plane + 1,
11755                                   sw_plane_wm->trans_wm.plane_en,
11756                                   sw_plane_wm->trans_wm.plane_res_b,
11757                                   sw_plane_wm->trans_wm.plane_res_l,
11758                                   hw_plane_wm->trans_wm.plane_en,
11759                                   hw_plane_wm->trans_wm.plane_res_b,
11760                                   hw_plane_wm->trans_wm.plane_res_l);
11761                 }
11762
11763                 /* DDB */
11764                 hw_ddb_entry = &hw_ddb.plane[pipe][plane];
11765                 sw_ddb_entry = &sw_ddb->plane[pipe][plane];
11766
11767                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11768                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
11769                                   pipe_name(pipe), plane + 1,
11770                                   sw_ddb_entry->start, sw_ddb_entry->end,
11771                                   hw_ddb_entry->start, hw_ddb_entry->end);
11772                 }
11773         }
11774
11775         /*
11776          * cursor
11777          * If the cursor plane isn't active, we may not have updated it's ddb
11778          * allocation. In that case since the ddb allocation will be updated
11779          * once the plane becomes visible, we can skip this check
11780          */
11781         if (1) {
11782                 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
11783                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
11784
11785                 /* Watermarks */
11786                 for (level = 0; level <= max_level; level++) {
11787                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11788                                                 &sw_plane_wm->wm[level]))
11789                                 continue;
11790
11791                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11792                                   pipe_name(pipe), level,
11793                                   sw_plane_wm->wm[level].plane_en,
11794                                   sw_plane_wm->wm[level].plane_res_b,
11795                                   sw_plane_wm->wm[level].plane_res_l,
11796                                   hw_plane_wm->wm[level].plane_en,
11797                                   hw_plane_wm->wm[level].plane_res_b,
11798                                   hw_plane_wm->wm[level].plane_res_l);
11799                 }
11800
11801                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11802                                          &sw_plane_wm->trans_wm)) {
11803                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11804                                   pipe_name(pipe),
11805                                   sw_plane_wm->trans_wm.plane_en,
11806                                   sw_plane_wm->trans_wm.plane_res_b,
11807                                   sw_plane_wm->trans_wm.plane_res_l,
11808                                   hw_plane_wm->trans_wm.plane_en,
11809                                   hw_plane_wm->trans_wm.plane_res_b,
11810                                   hw_plane_wm->trans_wm.plane_res_l);
11811                 }
11812
11813                 /* DDB */
11814                 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
11815                 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
11816
11817                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11818                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
11819                                   pipe_name(pipe),
11820                                   sw_ddb_entry->start, sw_ddb_entry->end,
11821                                   hw_ddb_entry->start, hw_ddb_entry->end);
11822                 }
11823         }
11824 }
11825
11826 static void
11827 verify_connector_state(struct drm_device *dev,
11828                        struct drm_atomic_state *state,
11829                        struct drm_crtc *crtc)
11830 {
11831         struct drm_connector *connector;
11832         struct drm_connector_state *new_conn_state;
11833         int i;
11834
11835         for_each_new_connector_in_state(state, connector, new_conn_state, i) {
11836                 struct drm_encoder *encoder = connector->encoder;
11837                 struct drm_crtc_state *crtc_state = NULL;
11838
11839                 if (new_conn_state->crtc != crtc)
11840                         continue;
11841
11842                 if (crtc)
11843                         crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
11844
11845                 intel_connector_verify_state(crtc_state, new_conn_state);
11846
11847                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
11848                      "connector's atomic encoder doesn't match legacy encoder\n");
11849         }
11850 }
11851
11852 static void
11853 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
11854 {
11855         struct intel_encoder *encoder;
11856         struct drm_connector *connector;
11857         struct drm_connector_state *old_conn_state, *new_conn_state;
11858         int i;
11859
11860         for_each_intel_encoder(dev, encoder) {
11861                 bool enabled = false, found = false;
11862                 enum pipe pipe;
11863
11864                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
11865                               encoder->base.base.id,
11866                               encoder->base.name);
11867
11868                 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
11869                                                    new_conn_state, i) {
11870                         if (old_conn_state->best_encoder == &encoder->base)
11871                                 found = true;
11872
11873                         if (new_conn_state->best_encoder != &encoder->base)
11874                                 continue;
11875                         found = enabled = true;
11876
11877                         I915_STATE_WARN(new_conn_state->crtc !=
11878                                         encoder->base.crtc,
11879                              "connector's crtc doesn't match encoder crtc\n");
11880                 }
11881
11882                 if (!found)
11883                         continue;
11884
11885                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
11886                      "encoder's enabled state mismatch "
11887                      "(expected %i, found %i)\n",
11888                      !!encoder->base.crtc, enabled);
11889
11890                 if (!encoder->base.crtc) {
11891                         bool active;
11892
11893                         active = encoder->get_hw_state(encoder, &pipe);
11894                         I915_STATE_WARN(active,
11895                              "encoder detached but still enabled on pipe %c.\n",
11896                              pipe_name(pipe));
11897                 }
11898         }
11899 }
11900
11901 static void
11902 verify_crtc_state(struct drm_crtc *crtc,
11903                   struct drm_crtc_state *old_crtc_state,
11904                   struct drm_crtc_state *new_crtc_state)
11905 {
11906         struct drm_device *dev = crtc->dev;
11907         struct drm_i915_private *dev_priv = to_i915(dev);
11908         struct intel_encoder *encoder;
11909         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11910         struct intel_crtc_state *pipe_config, *sw_config;
11911         struct drm_atomic_state *old_state;
11912         bool active;
11913
11914         old_state = old_crtc_state->state;
11915         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
11916         pipe_config = to_intel_crtc_state(old_crtc_state);
11917         memset(pipe_config, 0, sizeof(*pipe_config));
11918         pipe_config->base.crtc = crtc;
11919         pipe_config->base.state = old_state;
11920
11921         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
11922
11923         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
11924
11925         /* we keep both pipes enabled on 830 */
11926         if (IS_I830(dev_priv))
11927                 active = new_crtc_state->active;
11928
11929         I915_STATE_WARN(new_crtc_state->active != active,
11930              "crtc active state doesn't match with hw state "
11931              "(expected %i, found %i)\n", new_crtc_state->active, active);
11932
11933         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
11934              "transitional active state does not match atomic hw state "
11935              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
11936
11937         for_each_encoder_on_crtc(dev, crtc, encoder) {
11938                 enum pipe pipe;
11939
11940                 active = encoder->get_hw_state(encoder, &pipe);
11941                 I915_STATE_WARN(active != new_crtc_state->active,
11942                         "[ENCODER:%i] active %i with crtc active %i\n",
11943                         encoder->base.base.id, active, new_crtc_state->active);
11944
11945                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
11946                                 "Encoder connected to wrong pipe %c\n",
11947                                 pipe_name(pipe));
11948
11949                 if (active)
11950                         encoder->get_config(encoder, pipe_config);
11951         }
11952
11953         intel_crtc_compute_pixel_rate(pipe_config);
11954
11955         if (!new_crtc_state->active)
11956                 return;
11957
11958         intel_pipe_config_sanity_check(dev_priv, pipe_config);
11959
11960         sw_config = to_intel_crtc_state(new_crtc_state);
11961         if (!intel_pipe_config_compare(dev_priv, sw_config,
11962                                        pipe_config, false)) {
11963                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
11964                 intel_dump_pipe_config(intel_crtc, pipe_config,
11965                                        "[hw state]");
11966                 intel_dump_pipe_config(intel_crtc, sw_config,
11967                                        "[sw state]");
11968         }
11969 }
11970
11971 static void
11972 intel_verify_planes(struct intel_atomic_state *state)
11973 {
11974         struct intel_plane *plane;
11975         const struct intel_plane_state *plane_state;
11976         int i;
11977
11978         for_each_new_intel_plane_in_state(state, plane,
11979                                           plane_state, i)
11980                 assert_plane(plane, plane_state->base.visible);
11981 }
11982
11983 static void
11984 verify_single_dpll_state(struct drm_i915_private *dev_priv,
11985                          struct intel_shared_dpll *pll,
11986                          struct drm_crtc *crtc,
11987                          struct drm_crtc_state *new_state)
11988 {
11989         struct intel_dpll_hw_state dpll_hw_state;
11990         unsigned int crtc_mask;
11991         bool active;
11992
11993         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
11994
11995         DRM_DEBUG_KMS("%s\n", pll->info->name);
11996
11997         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
11998
11999         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12000                 I915_STATE_WARN(!pll->on && pll->active_mask,
12001                      "pll in active use but not on in sw tracking\n");
12002                 I915_STATE_WARN(pll->on && !pll->active_mask,
12003                      "pll is on but not used by any active crtc\n");
12004                 I915_STATE_WARN(pll->on != active,
12005                      "pll on state mismatch (expected %i, found %i)\n",
12006                      pll->on, active);
12007         }
12008
12009         if (!crtc) {
12010                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12011                                 "more active pll users than references: %x vs %x\n",
12012                                 pll->active_mask, pll->state.crtc_mask);
12013
12014                 return;
12015         }
12016
12017         crtc_mask = drm_crtc_mask(crtc);
12018
12019         if (new_state->active)
12020                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12021                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12022                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12023         else
12024                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12025                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12026                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12027
12028         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12029                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12030                         crtc_mask, pll->state.crtc_mask);
12031
12032         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12033                                           &dpll_hw_state,
12034                                           sizeof(dpll_hw_state)),
12035                         "pll hw state mismatch\n");
12036 }
12037
12038 static void
12039 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12040                          struct drm_crtc_state *old_crtc_state,
12041                          struct drm_crtc_state *new_crtc_state)
12042 {
12043         struct drm_i915_private *dev_priv = to_i915(dev);
12044         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12045         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12046
12047         if (new_state->shared_dpll)
12048                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12049
12050         if (old_state->shared_dpll &&
12051             old_state->shared_dpll != new_state->shared_dpll) {
12052                 unsigned int crtc_mask = drm_crtc_mask(crtc);
12053                 struct intel_shared_dpll *pll = old_state->shared_dpll;
12054
12055                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12056                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12057                                 pipe_name(drm_crtc_index(crtc)));
12058                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
12059                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12060                                 pipe_name(drm_crtc_index(crtc)));
12061         }
12062 }
12063
12064 static void
12065 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12066                           struct drm_atomic_state *state,
12067                           struct drm_crtc_state *old_state,
12068                           struct drm_crtc_state *new_state)
12069 {
12070         if (!needs_modeset(new_state) &&
12071             !to_intel_crtc_state(new_state)->update_pipe)
12072                 return;
12073
12074         verify_wm_state(crtc, new_state);
12075         verify_connector_state(crtc->dev, state, crtc);
12076         verify_crtc_state(crtc, old_state, new_state);
12077         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12078 }
12079
12080 static void
12081 verify_disabled_dpll_state(struct drm_device *dev)
12082 {
12083         struct drm_i915_private *dev_priv = to_i915(dev);
12084         int i;
12085
12086         for (i = 0; i < dev_priv->num_shared_dpll; i++)
12087                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12088 }
12089
12090 static void
12091 intel_modeset_verify_disabled(struct drm_device *dev,
12092                               struct drm_atomic_state *state)
12093 {
12094         verify_encoder_state(dev, state);
12095         verify_connector_state(dev, state, NULL);
12096         verify_disabled_dpll_state(dev);
12097 }
12098
12099 static void update_scanline_offset(struct intel_crtc *crtc)
12100 {
12101         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12102
12103         /*
12104          * The scanline counter increments at the leading edge of hsync.
12105          *
12106          * On most platforms it starts counting from vtotal-1 on the
12107          * first active line. That means the scanline counter value is
12108          * always one less than what we would expect. Ie. just after
12109          * start of vblank, which also occurs at start of hsync (on the
12110          * last active line), the scanline counter will read vblank_start-1.
12111          *
12112          * On gen2 the scanline counter starts counting from 1 instead
12113          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12114          * to keep the value positive), instead of adding one.
12115          *
12116          * On HSW+ the behaviour of the scanline counter depends on the output
12117          * type. For DP ports it behaves like most other platforms, but on HDMI
12118          * there's an extra 1 line difference. So we need to add two instead of
12119          * one to the value.
12120          *
12121          * On VLV/CHV DSI the scanline counter would appear to increment
12122          * approx. 1/3 of a scanline before start of vblank. Unfortunately
12123          * that means we can't tell whether we're in vblank or not while
12124          * we're on that particular line. We must still set scanline_offset
12125          * to 1 so that the vblank timestamps come out correct when we query
12126          * the scanline counter from within the vblank interrupt handler.
12127          * However if queried just before the start of vblank we'll get an
12128          * answer that's slightly in the future.
12129          */
12130         if (IS_GEN2(dev_priv)) {
12131                 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
12132                 int vtotal;
12133
12134                 vtotal = adjusted_mode->crtc_vtotal;
12135                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12136                         vtotal /= 2;
12137
12138                 crtc->scanline_offset = vtotal - 1;
12139         } else if (HAS_DDI(dev_priv) &&
12140                    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
12141                 crtc->scanline_offset = 2;
12142         } else
12143                 crtc->scanline_offset = 1;
12144 }
12145
12146 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12147 {
12148         struct drm_device *dev = state->dev;
12149         struct drm_i915_private *dev_priv = to_i915(dev);
12150         struct drm_crtc *crtc;
12151         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12152         int i;
12153
12154         if (!dev_priv->display.crtc_compute_clock)
12155                 return;
12156
12157         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12158                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12159                 struct intel_shared_dpll *old_dpll =
12160                         to_intel_crtc_state(old_crtc_state)->shared_dpll;
12161
12162                 if (!needs_modeset(new_crtc_state))
12163                         continue;
12164
12165                 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12166
12167                 if (!old_dpll)
12168                         continue;
12169
12170                 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12171         }
12172 }
12173
12174 /*
12175  * This implements the workaround described in the "notes" section of the mode
12176  * set sequence documentation. When going from no pipes or single pipe to
12177  * multiple pipes, and planes are enabled after the pipe, we need to wait at
12178  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12179  */
12180 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12181 {
12182         struct drm_crtc_state *crtc_state;
12183         struct intel_crtc *intel_crtc;
12184         struct drm_crtc *crtc;
12185         struct intel_crtc_state *first_crtc_state = NULL;
12186         struct intel_crtc_state *other_crtc_state = NULL;
12187         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12188         int i;
12189
12190         /* look at all crtc's that are going to be enabled in during modeset */
12191         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12192                 intel_crtc = to_intel_crtc(crtc);
12193
12194                 if (!crtc_state->active || !needs_modeset(crtc_state))
12195                         continue;
12196
12197                 if (first_crtc_state) {
12198                         other_crtc_state = to_intel_crtc_state(crtc_state);
12199                         break;
12200                 } else {
12201                         first_crtc_state = to_intel_crtc_state(crtc_state);
12202                         first_pipe = intel_crtc->pipe;
12203                 }
12204         }
12205
12206         /* No workaround needed? */
12207         if (!first_crtc_state)
12208                 return 0;
12209
12210         /* w/a possibly needed, check how many crtc's are already enabled. */
12211         for_each_intel_crtc(state->dev, intel_crtc) {
12212                 struct intel_crtc_state *pipe_config;
12213
12214                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12215                 if (IS_ERR(pipe_config))
12216                         return PTR_ERR(pipe_config);
12217
12218                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12219
12220                 if (!pipe_config->base.active ||
12221                     needs_modeset(&pipe_config->base))
12222                         continue;
12223
12224                 /* 2 or more enabled crtcs means no need for w/a */
12225                 if (enabled_pipe != INVALID_PIPE)
12226                         return 0;
12227
12228                 enabled_pipe = intel_crtc->pipe;
12229         }
12230
12231         if (enabled_pipe != INVALID_PIPE)
12232                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12233         else if (other_crtc_state)
12234                 other_crtc_state->hsw_workaround_pipe = first_pipe;
12235
12236         return 0;
12237 }
12238
12239 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12240 {
12241         struct drm_crtc *crtc;
12242
12243         /* Add all pipes to the state */
12244         for_each_crtc(state->dev, crtc) {
12245                 struct drm_crtc_state *crtc_state;
12246
12247                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12248                 if (IS_ERR(crtc_state))
12249                         return PTR_ERR(crtc_state);
12250         }
12251
12252         return 0;
12253 }
12254
12255 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12256 {
12257         struct drm_crtc *crtc;
12258
12259         /*
12260          * Add all pipes to the state, and force
12261          * a modeset on all the active ones.
12262          */
12263         for_each_crtc(state->dev, crtc) {
12264                 struct drm_crtc_state *crtc_state;
12265                 int ret;
12266
12267                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12268                 if (IS_ERR(crtc_state))
12269                         return PTR_ERR(crtc_state);
12270
12271                 if (!crtc_state->active || needs_modeset(crtc_state))
12272                         continue;
12273
12274                 crtc_state->mode_changed = true;
12275
12276                 ret = drm_atomic_add_affected_connectors(state, crtc);
12277                 if (ret)
12278                         return ret;
12279
12280                 ret = drm_atomic_add_affected_planes(state, crtc);
12281                 if (ret)
12282                         return ret;
12283         }
12284
12285         return 0;
12286 }
12287
12288 static int intel_modeset_checks(struct drm_atomic_state *state)
12289 {
12290         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12291         struct drm_i915_private *dev_priv = to_i915(state->dev);
12292         struct drm_crtc *crtc;
12293         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12294         int ret = 0, i;
12295
12296         if (!check_digital_port_conflicts(state)) {
12297                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12298                 return -EINVAL;
12299         }
12300
12301         intel_state->modeset = true;
12302         intel_state->active_crtcs = dev_priv->active_crtcs;
12303         intel_state->cdclk.logical = dev_priv->cdclk.logical;
12304         intel_state->cdclk.actual = dev_priv->cdclk.actual;
12305
12306         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12307                 if (new_crtc_state->active)
12308                         intel_state->active_crtcs |= 1 << i;
12309                 else
12310                         intel_state->active_crtcs &= ~(1 << i);
12311
12312                 if (old_crtc_state->active != new_crtc_state->active)
12313                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
12314         }
12315
12316         /*
12317          * See if the config requires any additional preparation, e.g.
12318          * to adjust global state with pipes off.  We need to do this
12319          * here so we can get the modeset_pipe updated config for the new
12320          * mode set on this crtc.  For other crtcs we need to use the
12321          * adjusted_mode bits in the crtc directly.
12322          */
12323         if (dev_priv->display.modeset_calc_cdclk) {
12324                 ret = dev_priv->display.modeset_calc_cdclk(state);
12325                 if (ret < 0)
12326                         return ret;
12327
12328                 /*
12329                  * Writes to dev_priv->cdclk.logical must protected by
12330                  * holding all the crtc locks, even if we don't end up
12331                  * touching the hardware
12332                  */
12333                 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12334                                         &intel_state->cdclk.logical)) {
12335                         ret = intel_lock_all_pipes(state);
12336                         if (ret < 0)
12337                                 return ret;
12338                 }
12339
12340                 /* All pipes must be switched off while we change the cdclk. */
12341                 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12342                                               &intel_state->cdclk.actual)) {
12343                         ret = intel_modeset_all_pipes(state);
12344                         if (ret < 0)
12345                                 return ret;
12346                 }
12347
12348                 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12349                               intel_state->cdclk.logical.cdclk,
12350                               intel_state->cdclk.actual.cdclk);
12351                 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12352                               intel_state->cdclk.logical.voltage_level,
12353                               intel_state->cdclk.actual.voltage_level);
12354         } else {
12355                 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
12356         }
12357
12358         intel_modeset_clear_plls(state);
12359
12360         if (IS_HASWELL(dev_priv))
12361                 return haswell_mode_set_planes_workaround(state);
12362
12363         return 0;
12364 }
12365
12366 /*
12367  * Handle calculation of various watermark data at the end of the atomic check
12368  * phase.  The code here should be run after the per-crtc and per-plane 'check'
12369  * handlers to ensure that all derived state has been updated.
12370  */
12371 static int calc_watermark_data(struct drm_atomic_state *state)
12372 {
12373         struct drm_device *dev = state->dev;
12374         struct drm_i915_private *dev_priv = to_i915(dev);
12375
12376         /* Is there platform-specific watermark information to calculate? */
12377         if (dev_priv->display.compute_global_watermarks)
12378                 return dev_priv->display.compute_global_watermarks(state);
12379
12380         return 0;
12381 }
12382
12383 /**
12384  * intel_atomic_check - validate state object
12385  * @dev: drm device
12386  * @state: state to validate
12387  */
12388 static int intel_atomic_check(struct drm_device *dev,
12389                               struct drm_atomic_state *state)
12390 {
12391         struct drm_i915_private *dev_priv = to_i915(dev);
12392         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12393         struct drm_crtc *crtc;
12394         struct drm_crtc_state *old_crtc_state, *crtc_state;
12395         int ret, i;
12396         bool any_ms = false;
12397
12398         /* Catch I915_MODE_FLAG_INHERITED */
12399         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12400                                       crtc_state, i) {
12401                 if (crtc_state->mode.private_flags !=
12402                     old_crtc_state->mode.private_flags)
12403                         crtc_state->mode_changed = true;
12404         }
12405
12406         ret = drm_atomic_helper_check_modeset(dev, state);
12407         if (ret)
12408                 return ret;
12409
12410         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
12411                 struct intel_crtc_state *pipe_config =
12412                         to_intel_crtc_state(crtc_state);
12413
12414                 if (!needs_modeset(crtc_state))
12415                         continue;
12416
12417                 if (!crtc_state->enable) {
12418                         any_ms = true;
12419                         continue;
12420                 }
12421
12422                 ret = intel_modeset_pipe_config(crtc, pipe_config);
12423                 if (ret) {
12424                         intel_dump_pipe_config(to_intel_crtc(crtc),
12425                                                pipe_config, "[failed]");
12426                         return ret;
12427                 }
12428
12429                 if (i915_modparams.fastboot &&
12430                     intel_pipe_config_compare(dev_priv,
12431                                         to_intel_crtc_state(old_crtc_state),
12432                                         pipe_config, true)) {
12433                         crtc_state->mode_changed = false;
12434                         pipe_config->update_pipe = true;
12435                 }
12436
12437                 if (needs_modeset(crtc_state))
12438                         any_ms = true;
12439
12440                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12441                                        needs_modeset(crtc_state) ?
12442                                        "[modeset]" : "[fastset]");
12443         }
12444
12445         if (any_ms) {
12446                 ret = intel_modeset_checks(state);
12447
12448                 if (ret)
12449                         return ret;
12450         } else {
12451                 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12452         }
12453
12454         ret = drm_atomic_helper_check_planes(dev, state);
12455         if (ret)
12456                 return ret;
12457
12458         intel_fbc_choose_crtc(dev_priv, intel_state);
12459         return calc_watermark_data(state);
12460 }
12461
12462 static int intel_atomic_prepare_commit(struct drm_device *dev,
12463                                        struct drm_atomic_state *state)
12464 {
12465         return drm_atomic_helper_prepare_planes(dev, state);
12466 }
12467
12468 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12469 {
12470         struct drm_device *dev = crtc->base.dev;
12471
12472         if (!dev->max_vblank_count)
12473                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
12474
12475         return dev->driver->get_vblank_counter(dev, crtc->pipe);
12476 }
12477
12478 static void intel_update_crtc(struct drm_crtc *crtc,
12479                               struct drm_atomic_state *state,
12480                               struct drm_crtc_state *old_crtc_state,
12481                               struct drm_crtc_state *new_crtc_state)
12482 {
12483         struct drm_device *dev = crtc->dev;
12484         struct drm_i915_private *dev_priv = to_i915(dev);
12485         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12486         struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12487         bool modeset = needs_modeset(new_crtc_state);
12488         struct intel_plane_state *new_plane_state =
12489                 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
12490                                                  to_intel_plane(crtc->primary));
12491
12492         if (modeset) {
12493                 update_scanline_offset(intel_crtc);
12494                 dev_priv->display.crtc_enable(pipe_config, state);
12495
12496                 /* vblanks work again, re-enable pipe CRC. */
12497                 intel_crtc_enable_pipe_crc(intel_crtc);
12498         } else {
12499                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12500                                        pipe_config);
12501         }
12502
12503         if (new_plane_state)
12504                 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
12505
12506         drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
12507 }
12508
12509 static void intel_update_crtcs(struct drm_atomic_state *state)
12510 {
12511         struct drm_crtc *crtc;
12512         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12513         int i;
12514
12515         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12516                 if (!new_crtc_state->active)
12517                         continue;
12518
12519                 intel_update_crtc(crtc, state, old_crtc_state,
12520                                   new_crtc_state);
12521         }
12522 }
12523
12524 static void skl_update_crtcs(struct drm_atomic_state *state)
12525 {
12526         struct drm_i915_private *dev_priv = to_i915(state->dev);
12527         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12528         struct drm_crtc *crtc;
12529         struct intel_crtc *intel_crtc;
12530         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12531         struct intel_crtc_state *cstate;
12532         unsigned int updated = 0;
12533         bool progress;
12534         enum pipe pipe;
12535         int i;
12536         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12537         u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
12538
12539         const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12540
12541         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
12542                 /* ignore allocations for crtc's that have been turned off. */
12543                 if (new_crtc_state->active)
12544                         entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
12545
12546         /* If 2nd DBuf slice required, enable it here */
12547         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
12548                 icl_dbuf_slices_update(dev_priv, required_slices);
12549
12550         /*
12551          * Whenever the number of active pipes changes, we need to make sure we
12552          * update the pipes in the right order so that their ddb allocations
12553          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12554          * cause pipe underruns and other bad stuff.
12555          */
12556         do {
12557                 progress = false;
12558
12559                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12560                         bool vbl_wait = false;
12561                         unsigned int cmask = drm_crtc_mask(crtc);
12562
12563                         intel_crtc = to_intel_crtc(crtc);
12564                         cstate = to_intel_crtc_state(new_crtc_state);
12565                         pipe = intel_crtc->pipe;
12566
12567                         if (updated & cmask || !cstate->base.active)
12568                                 continue;
12569
12570                         if (skl_ddb_allocation_overlaps(dev_priv,
12571                                                         entries,
12572                                                         &cstate->wm.skl.ddb,
12573                                                         i))
12574                                 continue;
12575
12576                         updated |= cmask;
12577                         entries[i] = &cstate->wm.skl.ddb;
12578
12579                         /*
12580                          * If this is an already active pipe, it's DDB changed,
12581                          * and this isn't the last pipe that needs updating
12582                          * then we need to wait for a vblank to pass for the
12583                          * new ddb allocation to take effect.
12584                          */
12585                         if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
12586                                                  &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
12587                             !new_crtc_state->active_changed &&
12588                             intel_state->wm_results.dirty_pipes != updated)
12589                                 vbl_wait = true;
12590
12591                         intel_update_crtc(crtc, state, old_crtc_state,
12592                                           new_crtc_state);
12593
12594                         if (vbl_wait)
12595                                 intel_wait_for_vblank(dev_priv, pipe);
12596
12597                         progress = true;
12598                 }
12599         } while (progress);
12600
12601         /* If 2nd DBuf slice is no more required disable it */
12602         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
12603                 icl_dbuf_slices_update(dev_priv, required_slices);
12604 }
12605
12606 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12607 {
12608         struct intel_atomic_state *state, *next;
12609         struct llist_node *freed;
12610
12611         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12612         llist_for_each_entry_safe(state, next, freed, freed)
12613                 drm_atomic_state_put(&state->base);
12614 }
12615
12616 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12617 {
12618         struct drm_i915_private *dev_priv =
12619                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12620
12621         intel_atomic_helper_free_state(dev_priv);
12622 }
12623
12624 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12625 {
12626         struct wait_queue_entry wait_fence, wait_reset;
12627         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12628
12629         init_wait_entry(&wait_fence, 0);
12630         init_wait_entry(&wait_reset, 0);
12631         for (;;) {
12632                 prepare_to_wait(&intel_state->commit_ready.wait,
12633                                 &wait_fence, TASK_UNINTERRUPTIBLE);
12634                 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
12635                                 &wait_reset, TASK_UNINTERRUPTIBLE);
12636
12637
12638                 if (i915_sw_fence_done(&intel_state->commit_ready)
12639                     || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
12640                         break;
12641
12642                 schedule();
12643         }
12644         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12645         finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
12646 }
12647
12648 static void intel_atomic_cleanup_work(struct work_struct *work)
12649 {
12650         struct drm_atomic_state *state =
12651                 container_of(work, struct drm_atomic_state, commit_work);
12652         struct drm_i915_private *i915 = to_i915(state->dev);
12653
12654         drm_atomic_helper_cleanup_planes(&i915->drm, state);
12655         drm_atomic_helper_commit_cleanup_done(state);
12656         drm_atomic_state_put(state);
12657
12658         intel_atomic_helper_free_state(i915);
12659 }
12660
12661 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12662 {
12663         struct drm_device *dev = state->dev;
12664         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12665         struct drm_i915_private *dev_priv = to_i915(dev);
12666         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12667         struct drm_crtc *crtc;
12668         struct intel_crtc_state *intel_cstate;
12669         u64 put_domains[I915_MAX_PIPES] = {};
12670         int i;
12671
12672         intel_atomic_commit_fence_wait(intel_state);
12673
12674         drm_atomic_helper_wait_for_dependencies(state);
12675
12676         if (intel_state->modeset)
12677                 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12678
12679         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12680                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12681
12682                 if (needs_modeset(new_crtc_state) ||
12683                     to_intel_crtc_state(new_crtc_state)->update_pipe) {
12684
12685                         put_domains[to_intel_crtc(crtc)->pipe] =
12686                                 modeset_get_crtc_power_domains(crtc,
12687                                         to_intel_crtc_state(new_crtc_state));
12688                 }
12689
12690                 if (!needs_modeset(new_crtc_state))
12691                         continue;
12692
12693                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12694                                        to_intel_crtc_state(new_crtc_state));
12695
12696                 if (old_crtc_state->active) {
12697                         intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
12698
12699                         /*
12700                          * We need to disable pipe CRC before disabling the pipe,
12701                          * or we race against vblank off.
12702                          */
12703                         intel_crtc_disable_pipe_crc(intel_crtc);
12704
12705                         dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
12706                         intel_crtc->active = false;
12707                         intel_fbc_disable(intel_crtc);
12708                         intel_disable_shared_dpll(intel_crtc);
12709
12710                         /*
12711                          * Underruns don't always raise
12712                          * interrupts, so check manually.
12713                          */
12714                         intel_check_cpu_fifo_underruns(dev_priv);
12715                         intel_check_pch_fifo_underruns(dev_priv);
12716
12717                         if (!new_crtc_state->active) {
12718                                 /*
12719                                  * Make sure we don't call initial_watermarks
12720                                  * for ILK-style watermark updates.
12721                                  *
12722                                  * No clue what this is supposed to achieve.
12723                                  */
12724                                 if (INTEL_GEN(dev_priv) >= 9)
12725                                         dev_priv->display.initial_watermarks(intel_state,
12726                                                                              to_intel_crtc_state(new_crtc_state));
12727                         }
12728                 }
12729         }
12730
12731         /* FIXME: Eventually get rid of our intel_crtc->config pointer */
12732         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
12733                 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
12734
12735         if (intel_state->modeset) {
12736                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
12737
12738                 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
12739
12740                 /*
12741                  * SKL workaround: bspec recommends we disable the SAGV when we
12742                  * have more then one pipe enabled
12743                  */
12744                 if (!intel_can_enable_sagv(state))
12745                         intel_disable_sagv(dev_priv);
12746
12747                 intel_modeset_verify_disabled(dev, state);
12748         }
12749
12750         /* Complete the events for pipes that have now been disabled */
12751         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12752                 bool modeset = needs_modeset(new_crtc_state);
12753
12754                 /* Complete events for now disable pipes here. */
12755                 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
12756                         spin_lock_irq(&dev->event_lock);
12757                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
12758                         spin_unlock_irq(&dev->event_lock);
12759
12760                         new_crtc_state->event = NULL;
12761                 }
12762         }
12763
12764         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
12765         dev_priv->display.update_crtcs(state);
12766
12767         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
12768          * already, but still need the state for the delayed optimization. To
12769          * fix this:
12770          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
12771          * - schedule that vblank worker _before_ calling hw_done
12772          * - at the start of commit_tail, cancel it _synchrously
12773          * - switch over to the vblank wait helper in the core after that since
12774          *   we don't need out special handling any more.
12775          */
12776         drm_atomic_helper_wait_for_flip_done(dev, state);
12777
12778         /*
12779          * Now that the vblank has passed, we can go ahead and program the
12780          * optimal watermarks on platforms that need two-step watermark
12781          * programming.
12782          *
12783          * TODO: Move this (and other cleanup) to an async worker eventually.
12784          */
12785         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12786                 intel_cstate = to_intel_crtc_state(new_crtc_state);
12787
12788                 if (dev_priv->display.optimize_watermarks)
12789                         dev_priv->display.optimize_watermarks(intel_state,
12790                                                               intel_cstate);
12791         }
12792
12793         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12794                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
12795
12796                 if (put_domains[i])
12797                         modeset_put_power_domains(dev_priv, put_domains[i]);
12798
12799                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
12800         }
12801
12802         if (intel_state->modeset)
12803                 intel_verify_planes(intel_state);
12804
12805         if (intel_state->modeset && intel_can_enable_sagv(state))
12806                 intel_enable_sagv(dev_priv);
12807
12808         drm_atomic_helper_commit_hw_done(state);
12809
12810         if (intel_state->modeset) {
12811                 /* As one of the primary mmio accessors, KMS has a high
12812                  * likelihood of triggering bugs in unclaimed access. After we
12813                  * finish modesetting, see if an error has been flagged, and if
12814                  * so enable debugging for the next modeset - and hope we catch
12815                  * the culprit.
12816                  */
12817                 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
12818                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
12819         }
12820
12821         /*
12822          * Defer the cleanup of the old state to a separate worker to not
12823          * impede the current task (userspace for blocking modesets) that
12824          * are executed inline. For out-of-line asynchronous modesets/flips,
12825          * deferring to a new worker seems overkill, but we would place a
12826          * schedule point (cond_resched()) here anyway to keep latencies
12827          * down.
12828          */
12829         INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
12830         queue_work(system_highpri_wq, &state->commit_work);
12831 }
12832
12833 static void intel_atomic_commit_work(struct work_struct *work)
12834 {
12835         struct drm_atomic_state *state =
12836                 container_of(work, struct drm_atomic_state, commit_work);
12837
12838         intel_atomic_commit_tail(state);
12839 }
12840
12841 static int __i915_sw_fence_call
12842 intel_atomic_commit_ready(struct i915_sw_fence *fence,
12843                           enum i915_sw_fence_notify notify)
12844 {
12845         struct intel_atomic_state *state =
12846                 container_of(fence, struct intel_atomic_state, commit_ready);
12847
12848         switch (notify) {
12849         case FENCE_COMPLETE:
12850                 /* we do blocking waits in the worker, nothing to do here */
12851                 break;
12852         case FENCE_FREE:
12853                 {
12854                         struct intel_atomic_helper *helper =
12855                                 &to_i915(state->base.dev)->atomic_helper;
12856
12857                         if (llist_add(&state->freed, &helper->free_list))
12858                                 schedule_work(&helper->free_work);
12859                         break;
12860                 }
12861         }
12862
12863         return NOTIFY_DONE;
12864 }
12865
12866 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
12867 {
12868         struct drm_plane_state *old_plane_state, *new_plane_state;
12869         struct drm_plane *plane;
12870         int i;
12871
12872         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
12873                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
12874                                   intel_fb_obj(new_plane_state->fb),
12875                                   to_intel_plane(plane)->frontbuffer_bit);
12876 }
12877
12878 /**
12879  * intel_atomic_commit - commit validated state object
12880  * @dev: DRM device
12881  * @state: the top-level driver state object
12882  * @nonblock: nonblocking commit
12883  *
12884  * This function commits a top-level state object that has been validated
12885  * with drm_atomic_helper_check().
12886  *
12887  * RETURNS
12888  * Zero for success or -errno.
12889  */
12890 static int intel_atomic_commit(struct drm_device *dev,
12891                                struct drm_atomic_state *state,
12892                                bool nonblock)
12893 {
12894         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12895         struct drm_i915_private *dev_priv = to_i915(dev);
12896         int ret = 0;
12897
12898         drm_atomic_state_get(state);
12899         i915_sw_fence_init(&intel_state->commit_ready,
12900                            intel_atomic_commit_ready);
12901
12902         /*
12903          * The intel_legacy_cursor_update() fast path takes care
12904          * of avoiding the vblank waits for simple cursor
12905          * movement and flips. For cursor on/off and size changes,
12906          * we want to perform the vblank waits so that watermark
12907          * updates happen during the correct frames. Gen9+ have
12908          * double buffered watermarks and so shouldn't need this.
12909          *
12910          * Unset state->legacy_cursor_update before the call to
12911          * drm_atomic_helper_setup_commit() because otherwise
12912          * drm_atomic_helper_wait_for_flip_done() is a noop and
12913          * we get FIFO underruns because we didn't wait
12914          * for vblank.
12915          *
12916          * FIXME doing watermarks and fb cleanup from a vblank worker
12917          * (assuming we had any) would solve these problems.
12918          */
12919         if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
12920                 struct intel_crtc_state *new_crtc_state;
12921                 struct intel_crtc *crtc;
12922                 int i;
12923
12924                 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
12925                         if (new_crtc_state->wm.need_postvbl_update ||
12926                             new_crtc_state->update_wm_post)
12927                                 state->legacy_cursor_update = false;
12928         }
12929
12930         ret = intel_atomic_prepare_commit(dev, state);
12931         if (ret) {
12932                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
12933                 i915_sw_fence_commit(&intel_state->commit_ready);
12934                 return ret;
12935         }
12936
12937         ret = drm_atomic_helper_setup_commit(state, nonblock);
12938         if (!ret)
12939                 ret = drm_atomic_helper_swap_state(state, true);
12940
12941         if (ret) {
12942                 i915_sw_fence_commit(&intel_state->commit_ready);
12943
12944                 drm_atomic_helper_cleanup_planes(dev, state);
12945                 return ret;
12946         }
12947         dev_priv->wm.distrust_bios_wm = false;
12948         intel_shared_dpll_swap_state(state);
12949         intel_atomic_track_fbs(state);
12950
12951         if (intel_state->modeset) {
12952                 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
12953                        sizeof(intel_state->min_cdclk));
12954                 memcpy(dev_priv->min_voltage_level,
12955                        intel_state->min_voltage_level,
12956                        sizeof(intel_state->min_voltage_level));
12957                 dev_priv->active_crtcs = intel_state->active_crtcs;
12958                 dev_priv->cdclk.logical = intel_state->cdclk.logical;
12959                 dev_priv->cdclk.actual = intel_state->cdclk.actual;
12960         }
12961
12962         drm_atomic_state_get(state);
12963         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
12964
12965         i915_sw_fence_commit(&intel_state->commit_ready);
12966         if (nonblock && intel_state->modeset) {
12967                 queue_work(dev_priv->modeset_wq, &state->commit_work);
12968         } else if (nonblock) {
12969                 queue_work(system_unbound_wq, &state->commit_work);
12970         } else {
12971                 if (intel_state->modeset)
12972                         flush_workqueue(dev_priv->modeset_wq);
12973                 intel_atomic_commit_tail(state);
12974         }
12975
12976         return 0;
12977 }
12978
12979 static const struct drm_crtc_funcs intel_crtc_funcs = {
12980         .gamma_set = drm_atomic_helper_legacy_gamma_set,
12981         .set_config = drm_atomic_helper_set_config,
12982         .destroy = intel_crtc_destroy,
12983         .page_flip = drm_atomic_helper_page_flip,
12984         .atomic_duplicate_state = intel_crtc_duplicate_state,
12985         .atomic_destroy_state = intel_crtc_destroy_state,
12986         .set_crc_source = intel_crtc_set_crc_source,
12987 };
12988
12989 struct wait_rps_boost {
12990         struct wait_queue_entry wait;
12991
12992         struct drm_crtc *crtc;
12993         struct i915_request *request;
12994 };
12995
12996 static int do_rps_boost(struct wait_queue_entry *_wait,
12997                         unsigned mode, int sync, void *key)
12998 {
12999         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
13000         struct i915_request *rq = wait->request;
13001
13002         /*
13003          * If we missed the vblank, but the request is already running it
13004          * is reasonable to assume that it will complete before the next
13005          * vblank without our intervention, so leave RPS alone.
13006          */
13007         if (!i915_request_started(rq))
13008                 gen6_rps_boost(rq, NULL);
13009         i915_request_put(rq);
13010
13011         drm_crtc_vblank_put(wait->crtc);
13012
13013         list_del(&wait->wait.entry);
13014         kfree(wait);
13015         return 1;
13016 }
13017
13018 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13019                                        struct dma_fence *fence)
13020 {
13021         struct wait_rps_boost *wait;
13022
13023         if (!dma_fence_is_i915(fence))
13024                 return;
13025
13026         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13027                 return;
13028
13029         if (drm_crtc_vblank_get(crtc))
13030                 return;
13031
13032         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13033         if (!wait) {
13034                 drm_crtc_vblank_put(crtc);
13035                 return;
13036         }
13037
13038         wait->request = to_request(dma_fence_get(fence));
13039         wait->crtc = crtc;
13040
13041         wait->wait.func = do_rps_boost;
13042         wait->wait.flags = 0;
13043
13044         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13045 }
13046
13047 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13048 {
13049         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13050         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13051         struct drm_framebuffer *fb = plane_state->base.fb;
13052         struct i915_vma *vma;
13053
13054         if (plane->id == PLANE_CURSOR &&
13055             INTEL_INFO(dev_priv)->cursor_needs_physical) {
13056                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13057                 const int align = intel_cursor_alignment(dev_priv);
13058                 int err;
13059
13060                 err = i915_gem_object_attach_phys(obj, align);
13061                 if (err)
13062                         return err;
13063         }
13064
13065         vma = intel_pin_and_fence_fb_obj(fb,
13066                                          &plane_state->view,
13067                                          intel_plane_uses_fence(plane_state),
13068                                          &plane_state->flags);
13069         if (IS_ERR(vma))
13070                 return PTR_ERR(vma);
13071
13072         plane_state->vma = vma;
13073
13074         return 0;
13075 }
13076
13077 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13078 {
13079         struct i915_vma *vma;
13080
13081         vma = fetch_and_zero(&old_plane_state->vma);
13082         if (vma)
13083                 intel_unpin_fb_vma(vma, old_plane_state->flags);
13084 }
13085
13086 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13087 {
13088         struct i915_sched_attr attr = {
13089                 .priority = I915_PRIORITY_DISPLAY,
13090         };
13091
13092         i915_gem_object_wait_priority(obj, 0, &attr);
13093 }
13094
13095 /**
13096  * intel_prepare_plane_fb - Prepare fb for usage on plane
13097  * @plane: drm plane to prepare for
13098  * @new_state: the plane state being prepared
13099  *
13100  * Prepares a framebuffer for usage on a display plane.  Generally this
13101  * involves pinning the underlying object and updating the frontbuffer tracking
13102  * bits.  Some older platforms need special physical address handling for
13103  * cursor planes.
13104  *
13105  * Must be called with struct_mutex held.
13106  *
13107  * Returns 0 on success, negative error code on failure.
13108  */
13109 int
13110 intel_prepare_plane_fb(struct drm_plane *plane,
13111                        struct drm_plane_state *new_state)
13112 {
13113         struct intel_atomic_state *intel_state =
13114                 to_intel_atomic_state(new_state->state);
13115         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13116         struct drm_framebuffer *fb = new_state->fb;
13117         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13118         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13119         int ret;
13120
13121         if (old_obj) {
13122                 struct drm_crtc_state *crtc_state =
13123                         drm_atomic_get_new_crtc_state(new_state->state,
13124                                                       plane->state->crtc);
13125
13126                 /* Big Hammer, we also need to ensure that any pending
13127                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13128                  * current scanout is retired before unpinning the old
13129                  * framebuffer. Note that we rely on userspace rendering
13130                  * into the buffer attached to the pipe they are waiting
13131                  * on. If not, userspace generates a GPU hang with IPEHR
13132                  * point to the MI_WAIT_FOR_EVENT.
13133                  *
13134                  * This should only fail upon a hung GPU, in which case we
13135                  * can safely continue.
13136                  */
13137                 if (needs_modeset(crtc_state)) {
13138                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13139                                                               old_obj->resv, NULL,
13140                                                               false, 0,
13141                                                               GFP_KERNEL);
13142                         if (ret < 0)
13143                                 return ret;
13144                 }
13145         }
13146
13147         if (new_state->fence) { /* explicit fencing */
13148                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13149                                                     new_state->fence,
13150                                                     I915_FENCE_TIMEOUT,
13151                                                     GFP_KERNEL);
13152                 if (ret < 0)
13153                         return ret;
13154         }
13155
13156         if (!obj)
13157                 return 0;
13158
13159         ret = i915_gem_object_pin_pages(obj);
13160         if (ret)
13161                 return ret;
13162
13163         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13164         if (ret) {
13165                 i915_gem_object_unpin_pages(obj);
13166                 return ret;
13167         }
13168
13169         ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13170
13171         fb_obj_bump_render_priority(obj);
13172
13173         mutex_unlock(&dev_priv->drm.struct_mutex);
13174         i915_gem_object_unpin_pages(obj);
13175         if (ret)
13176                 return ret;
13177
13178         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13179
13180         if (!new_state->fence) { /* implicit fencing */
13181                 struct dma_fence *fence;
13182
13183                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13184                                                       obj->resv, NULL,
13185                                                       false, I915_FENCE_TIMEOUT,
13186                                                       GFP_KERNEL);
13187                 if (ret < 0)
13188                         return ret;
13189
13190                 fence = reservation_object_get_excl_rcu(obj->resv);
13191                 if (fence) {
13192                         add_rps_boost_after_vblank(new_state->crtc, fence);
13193                         dma_fence_put(fence);
13194                 }
13195         } else {
13196                 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
13197         }
13198
13199         /*
13200          * We declare pageflips to be interactive and so merit a small bias
13201          * towards upclocking to deliver the frame on time. By only changing
13202          * the RPS thresholds to sample more regularly and aim for higher
13203          * clocks we can hopefully deliver low power workloads (like kodi)
13204          * that are not quite steady state without resorting to forcing
13205          * maximum clocks following a vblank miss (see do_rps_boost()).
13206          */
13207         if (!intel_state->rps_interactive) {
13208                 intel_rps_mark_interactive(dev_priv, true);
13209                 intel_state->rps_interactive = true;
13210         }
13211
13212         return 0;
13213 }
13214
13215 /**
13216  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13217  * @plane: drm plane to clean up for
13218  * @old_state: the state from the previous modeset
13219  *
13220  * Cleans up a framebuffer that has just been removed from a plane.
13221  *
13222  * Must be called with struct_mutex held.
13223  */
13224 void
13225 intel_cleanup_plane_fb(struct drm_plane *plane,
13226                        struct drm_plane_state *old_state)
13227 {
13228         struct intel_atomic_state *intel_state =
13229                 to_intel_atomic_state(old_state->state);
13230         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13231
13232         if (intel_state->rps_interactive) {
13233                 intel_rps_mark_interactive(dev_priv, false);
13234                 intel_state->rps_interactive = false;
13235         }
13236
13237         /* Should only be called after a successful intel_prepare_plane_fb()! */
13238         mutex_lock(&dev_priv->drm.struct_mutex);
13239         intel_plane_unpin_fb(to_intel_plane_state(old_state));
13240         mutex_unlock(&dev_priv->drm.struct_mutex);
13241 }
13242
13243 int
13244 skl_max_scale(const struct intel_crtc_state *crtc_state,
13245               u32 pixel_format)
13246 {
13247         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13248         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13249         int max_scale, mult;
13250         int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
13251
13252         if (!crtc_state->base.enable)
13253                 return DRM_PLANE_HELPER_NO_SCALING;
13254
13255         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13256         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13257
13258         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
13259                 max_dotclk *= 2;
13260
13261         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
13262                 return DRM_PLANE_HELPER_NO_SCALING;
13263
13264         /*
13265          * skl max scale is lower of:
13266          *    close to 3 but not 3, -1 is for that purpose
13267          *            or
13268          *    cdclk/crtc_clock
13269          */
13270         mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
13271         tmpclk1 = (1 << 16) * mult - 1;
13272         tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13273         max_scale = min(tmpclk1, tmpclk2);
13274
13275         return max_scale;
13276 }
13277
13278 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13279                                     struct drm_crtc_state *old_crtc_state)
13280 {
13281         struct drm_device *dev = crtc->dev;
13282         struct drm_i915_private *dev_priv = to_i915(dev);
13283         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13284         struct intel_crtc_state *old_intel_cstate =
13285                 to_intel_crtc_state(old_crtc_state);
13286         struct intel_atomic_state *old_intel_state =
13287                 to_intel_atomic_state(old_crtc_state->state);
13288         struct intel_crtc_state *intel_cstate =
13289                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13290         bool modeset = needs_modeset(&intel_cstate->base);
13291
13292         if (!modeset &&
13293             (intel_cstate->base.color_mgmt_changed ||
13294              intel_cstate->update_pipe)) {
13295                 intel_color_set_csc(&intel_cstate->base);
13296                 intel_color_load_luts(&intel_cstate->base);
13297         }
13298
13299         /* Perform vblank evasion around commit operation */
13300         intel_pipe_update_start(intel_cstate);
13301
13302         if (modeset)
13303                 goto out;
13304
13305         if (intel_cstate->update_pipe)
13306                 intel_update_pipe_config(old_intel_cstate, intel_cstate);
13307         else if (INTEL_GEN(dev_priv) >= 9)
13308                 skl_detach_scalers(intel_crtc);
13309
13310 out:
13311         if (dev_priv->display.atomic_update_watermarks)
13312                 dev_priv->display.atomic_update_watermarks(old_intel_state,
13313                                                            intel_cstate);
13314 }
13315
13316 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13317                                   struct intel_crtc_state *crtc_state)
13318 {
13319         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13320
13321         if (!IS_GEN2(dev_priv))
13322                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13323
13324         if (crtc_state->has_pch_encoder) {
13325                 enum pipe pch_transcoder =
13326                         intel_crtc_pch_transcoder(crtc);
13327
13328                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13329         }
13330 }
13331
13332 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13333                                      struct drm_crtc_state *old_crtc_state)
13334 {
13335         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13336         struct intel_atomic_state *old_intel_state =
13337                 to_intel_atomic_state(old_crtc_state->state);
13338         struct intel_crtc_state *new_crtc_state =
13339                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13340
13341         intel_pipe_update_end(new_crtc_state);
13342
13343         if (new_crtc_state->update_pipe &&
13344             !needs_modeset(&new_crtc_state->base) &&
13345             old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13346                 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
13347 }
13348
13349 /**
13350  * intel_plane_destroy - destroy a plane
13351  * @plane: plane to destroy
13352  *
13353  * Common destruction function for all types of planes (primary, cursor,
13354  * sprite).
13355  */
13356 void intel_plane_destroy(struct drm_plane *plane)
13357 {
13358         drm_plane_cleanup(plane);
13359         kfree(to_intel_plane(plane));
13360 }
13361
13362 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13363                                             u32 format, u64 modifier)
13364 {
13365         switch (modifier) {
13366         case DRM_FORMAT_MOD_LINEAR:
13367         case I915_FORMAT_MOD_X_TILED:
13368                 break;
13369         default:
13370                 return false;
13371         }
13372
13373         switch (format) {
13374         case DRM_FORMAT_C8:
13375         case DRM_FORMAT_RGB565:
13376         case DRM_FORMAT_XRGB1555:
13377         case DRM_FORMAT_XRGB8888:
13378                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13379                         modifier == I915_FORMAT_MOD_X_TILED;
13380         default:
13381                 return false;
13382         }
13383 }
13384
13385 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13386                                             u32 format, u64 modifier)
13387 {
13388         switch (modifier) {
13389         case DRM_FORMAT_MOD_LINEAR:
13390         case I915_FORMAT_MOD_X_TILED:
13391                 break;
13392         default:
13393                 return false;
13394         }
13395
13396         switch (format) {
13397         case DRM_FORMAT_C8:
13398         case DRM_FORMAT_RGB565:
13399         case DRM_FORMAT_XRGB8888:
13400         case DRM_FORMAT_XBGR8888:
13401         case DRM_FORMAT_XRGB2101010:
13402         case DRM_FORMAT_XBGR2101010:
13403                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13404                         modifier == I915_FORMAT_MOD_X_TILED;
13405         default:
13406                 return false;
13407         }
13408 }
13409
13410 static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
13411                                            u32 format, u64 modifier)
13412 {
13413         struct intel_plane *plane = to_intel_plane(_plane);
13414
13415         switch (modifier) {
13416         case DRM_FORMAT_MOD_LINEAR:
13417         case I915_FORMAT_MOD_X_TILED:
13418         case I915_FORMAT_MOD_Y_TILED:
13419         case I915_FORMAT_MOD_Yf_TILED:
13420                 break;
13421         case I915_FORMAT_MOD_Y_TILED_CCS:
13422         case I915_FORMAT_MOD_Yf_TILED_CCS:
13423                 if (!plane->has_ccs)
13424                         return false;
13425                 break;
13426         default:
13427                 return false;
13428         }
13429
13430         switch (format) {
13431         case DRM_FORMAT_XRGB8888:
13432         case DRM_FORMAT_XBGR8888:
13433         case DRM_FORMAT_ARGB8888:
13434         case DRM_FORMAT_ABGR8888:
13435                 if (is_ccs_modifier(modifier))
13436                         return true;
13437                 /* fall through */
13438         case DRM_FORMAT_RGB565:
13439         case DRM_FORMAT_XRGB2101010:
13440         case DRM_FORMAT_XBGR2101010:
13441         case DRM_FORMAT_YUYV:
13442         case DRM_FORMAT_YVYU:
13443         case DRM_FORMAT_UYVY:
13444         case DRM_FORMAT_VYUY:
13445         case DRM_FORMAT_NV12:
13446                 if (modifier == I915_FORMAT_MOD_Yf_TILED)
13447                         return true;
13448                 /* fall through */
13449         case DRM_FORMAT_C8:
13450                 if (modifier == DRM_FORMAT_MOD_LINEAR ||
13451                     modifier == I915_FORMAT_MOD_X_TILED ||
13452                     modifier == I915_FORMAT_MOD_Y_TILED)
13453                         return true;
13454                 /* fall through */
13455         default:
13456                 return false;
13457         }
13458 }
13459
13460 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13461                                               u32 format, u64 modifier)
13462 {
13463         return modifier == DRM_FORMAT_MOD_LINEAR &&
13464                 format == DRM_FORMAT_ARGB8888;
13465 }
13466
13467 static struct drm_plane_funcs skl_plane_funcs = {
13468         .update_plane = drm_atomic_helper_update_plane,
13469         .disable_plane = drm_atomic_helper_disable_plane,
13470         .destroy = intel_plane_destroy,
13471         .atomic_get_property = intel_plane_atomic_get_property,
13472         .atomic_set_property = intel_plane_atomic_set_property,
13473         .atomic_duplicate_state = intel_plane_duplicate_state,
13474         .atomic_destroy_state = intel_plane_destroy_state,
13475         .format_mod_supported = skl_plane_format_mod_supported,
13476 };
13477
13478 static struct drm_plane_funcs i965_plane_funcs = {
13479         .update_plane = drm_atomic_helper_update_plane,
13480         .disable_plane = drm_atomic_helper_disable_plane,
13481         .destroy = intel_plane_destroy,
13482         .atomic_get_property = intel_plane_atomic_get_property,
13483         .atomic_set_property = intel_plane_atomic_set_property,
13484         .atomic_duplicate_state = intel_plane_duplicate_state,
13485         .atomic_destroy_state = intel_plane_destroy_state,
13486         .format_mod_supported = i965_plane_format_mod_supported,
13487 };
13488
13489 static struct drm_plane_funcs i8xx_plane_funcs = {
13490         .update_plane = drm_atomic_helper_update_plane,
13491         .disable_plane = drm_atomic_helper_disable_plane,
13492         .destroy = intel_plane_destroy,
13493         .atomic_get_property = intel_plane_atomic_get_property,
13494         .atomic_set_property = intel_plane_atomic_set_property,
13495         .atomic_duplicate_state = intel_plane_duplicate_state,
13496         .atomic_destroy_state = intel_plane_destroy_state,
13497         .format_mod_supported = i8xx_plane_format_mod_supported,
13498 };
13499
13500 static int
13501 intel_legacy_cursor_update(struct drm_plane *plane,
13502                            struct drm_crtc *crtc,
13503                            struct drm_framebuffer *fb,
13504                            int crtc_x, int crtc_y,
13505                            unsigned int crtc_w, unsigned int crtc_h,
13506                            uint32_t src_x, uint32_t src_y,
13507                            uint32_t src_w, uint32_t src_h,
13508                            struct drm_modeset_acquire_ctx *ctx)
13509 {
13510         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13511         int ret;
13512         struct drm_plane_state *old_plane_state, *new_plane_state;
13513         struct intel_plane *intel_plane = to_intel_plane(plane);
13514         struct drm_framebuffer *old_fb;
13515         struct drm_crtc_state *crtc_state = crtc->state;
13516
13517         /*
13518          * When crtc is inactive or there is a modeset pending,
13519          * wait for it to complete in the slowpath
13520          */
13521         if (!crtc_state->active || needs_modeset(crtc_state) ||
13522             to_intel_crtc_state(crtc_state)->update_pipe)
13523                 goto slow;
13524
13525         old_plane_state = plane->state;
13526         /*
13527          * Don't do an async update if there is an outstanding commit modifying
13528          * the plane.  This prevents our async update's changes from getting
13529          * overridden by a previous synchronous update's state.
13530          */
13531         if (old_plane_state->commit &&
13532             !try_wait_for_completion(&old_plane_state->commit->hw_done))
13533                 goto slow;
13534
13535         /*
13536          * If any parameters change that may affect watermarks,
13537          * take the slowpath. Only changing fb or position should be
13538          * in the fastpath.
13539          */
13540         if (old_plane_state->crtc != crtc ||
13541             old_plane_state->src_w != src_w ||
13542             old_plane_state->src_h != src_h ||
13543             old_plane_state->crtc_w != crtc_w ||
13544             old_plane_state->crtc_h != crtc_h ||
13545             !old_plane_state->fb != !fb)
13546                 goto slow;
13547
13548         new_plane_state = intel_plane_duplicate_state(plane);
13549         if (!new_plane_state)
13550                 return -ENOMEM;
13551
13552         drm_atomic_set_fb_for_plane(new_plane_state, fb);
13553
13554         new_plane_state->src_x = src_x;
13555         new_plane_state->src_y = src_y;
13556         new_plane_state->src_w = src_w;
13557         new_plane_state->src_h = src_h;
13558         new_plane_state->crtc_x = crtc_x;
13559         new_plane_state->crtc_y = crtc_y;
13560         new_plane_state->crtc_w = crtc_w;
13561         new_plane_state->crtc_h = crtc_h;
13562
13563         ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
13564                                                   to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
13565                                                   to_intel_plane_state(plane->state),
13566                                                   to_intel_plane_state(new_plane_state));
13567         if (ret)
13568                 goto out_free;
13569
13570         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13571         if (ret)
13572                 goto out_free;
13573
13574         ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
13575         if (ret)
13576                 goto out_unlock;
13577
13578         intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
13579
13580         old_fb = old_plane_state->fb;
13581         i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13582                           intel_plane->frontbuffer_bit);
13583
13584         /* Swap plane state */
13585         plane->state = new_plane_state;
13586
13587         if (plane->state->visible) {
13588                 trace_intel_update_plane(plane, to_intel_crtc(crtc));
13589                 intel_plane->update_plane(intel_plane,
13590                                           to_intel_crtc_state(crtc->state),
13591                                           to_intel_plane_state(plane->state));
13592         } else {
13593                 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
13594                 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
13595         }
13596
13597         intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
13598
13599 out_unlock:
13600         mutex_unlock(&dev_priv->drm.struct_mutex);
13601 out_free:
13602         if (ret)
13603                 intel_plane_destroy_state(plane, new_plane_state);
13604         else
13605                 intel_plane_destroy_state(plane, old_plane_state);
13606         return ret;
13607
13608 slow:
13609         return drm_atomic_helper_update_plane(plane, crtc, fb,
13610                                               crtc_x, crtc_y, crtc_w, crtc_h,
13611                                               src_x, src_y, src_w, src_h, ctx);
13612 }
13613
13614 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13615         .update_plane = intel_legacy_cursor_update,
13616         .disable_plane = drm_atomic_helper_disable_plane,
13617         .destroy = intel_plane_destroy,
13618         .atomic_get_property = intel_plane_atomic_get_property,
13619         .atomic_set_property = intel_plane_atomic_set_property,
13620         .atomic_duplicate_state = intel_plane_duplicate_state,
13621         .atomic_destroy_state = intel_plane_destroy_state,
13622         .format_mod_supported = intel_cursor_format_mod_supported,
13623 };
13624
13625 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13626                                enum i9xx_plane_id i9xx_plane)
13627 {
13628         if (!HAS_FBC(dev_priv))
13629                 return false;
13630
13631         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
13632                 return i9xx_plane == PLANE_A; /* tied to pipe A */
13633         else if (IS_IVYBRIDGE(dev_priv))
13634                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
13635                         i9xx_plane == PLANE_C;
13636         else if (INTEL_GEN(dev_priv) >= 4)
13637                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
13638         else
13639                 return i9xx_plane == PLANE_A;
13640 }
13641
13642 static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
13643                               enum pipe pipe, enum plane_id plane_id)
13644 {
13645         if (!HAS_FBC(dev_priv))
13646                 return false;
13647
13648         return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
13649 }
13650
13651 bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
13652                           enum pipe pipe, enum plane_id plane_id)
13653 {
13654         /*
13655          * FIXME: ICL requires two hardware planes for scanning out NV12
13656          * framebuffers. Do not advertize support until this is implemented.
13657          */
13658         if (INTEL_GEN(dev_priv) >= 11)
13659                 return false;
13660
13661         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
13662                 return false;
13663
13664         if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
13665                 return false;
13666
13667         if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
13668                 return false;
13669
13670         return true;
13671 }
13672
13673 static struct intel_plane *
13674 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13675 {
13676         struct intel_plane *primary = NULL;
13677         struct intel_plane_state *state = NULL;
13678         const struct drm_plane_funcs *plane_funcs;
13679         const uint32_t *intel_primary_formats;
13680         unsigned int supported_rotations;
13681         unsigned int num_formats;
13682         const uint64_t *modifiers;
13683         int ret;
13684
13685         primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13686         if (!primary) {
13687                 ret = -ENOMEM;
13688                 goto fail;
13689         }
13690
13691         state = intel_create_plane_state(&primary->base);
13692         if (!state) {
13693                 ret = -ENOMEM;
13694                 goto fail;
13695         }
13696
13697         primary->base.state = &state->base;
13698
13699         if (INTEL_GEN(dev_priv) >= 9)
13700                 state->scaler_id = -1;
13701         primary->pipe = pipe;
13702         /*
13703          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13704          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13705          */
13706         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
13707                 primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
13708         else
13709                 primary->i9xx_plane = (enum i9xx_plane_id) pipe;
13710         primary->id = PLANE_PRIMARY;
13711         primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
13712
13713         if (INTEL_GEN(dev_priv) >= 9)
13714                 primary->has_fbc = skl_plane_has_fbc(dev_priv,
13715                                                      primary->pipe,
13716                                                      primary->id);
13717         else
13718                 primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
13719                                                       primary->i9xx_plane);
13720
13721         if (primary->has_fbc) {
13722                 struct intel_fbc *fbc = &dev_priv->fbc;
13723
13724                 fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
13725         }
13726
13727         if (INTEL_GEN(dev_priv) >= 9) {
13728                 primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
13729                                                      PLANE_PRIMARY);
13730
13731                 if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
13732                         intel_primary_formats = skl_pri_planar_formats;
13733                         num_formats = ARRAY_SIZE(skl_pri_planar_formats);
13734                 } else {
13735                         intel_primary_formats = skl_primary_formats;
13736                         num_formats = ARRAY_SIZE(skl_primary_formats);
13737                 }
13738
13739                 if (primary->has_ccs)
13740                         modifiers = skl_format_modifiers_ccs;
13741                 else
13742                         modifiers = skl_format_modifiers_noccs;
13743
13744                 primary->max_stride = skl_plane_max_stride;
13745                 primary->update_plane = skl_update_plane;
13746                 primary->disable_plane = skl_disable_plane;
13747                 primary->get_hw_state = skl_plane_get_hw_state;
13748                 primary->check_plane = skl_plane_check;
13749
13750                 plane_funcs = &skl_plane_funcs;
13751         } else if (INTEL_GEN(dev_priv) >= 4) {
13752                 intel_primary_formats = i965_primary_formats;
13753                 num_formats = ARRAY_SIZE(i965_primary_formats);
13754                 modifiers = i9xx_format_modifiers;
13755
13756                 primary->max_stride = i9xx_plane_max_stride;
13757                 primary->update_plane = i9xx_update_plane;
13758                 primary->disable_plane = i9xx_disable_plane;
13759                 primary->get_hw_state = i9xx_plane_get_hw_state;
13760                 primary->check_plane = i9xx_plane_check;
13761
13762                 plane_funcs = &i965_plane_funcs;
13763         } else {
13764                 intel_primary_formats = i8xx_primary_formats;
13765                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13766                 modifiers = i9xx_format_modifiers;
13767
13768                 primary->max_stride = i9xx_plane_max_stride;
13769                 primary->update_plane = i9xx_update_plane;
13770                 primary->disable_plane = i9xx_disable_plane;
13771                 primary->get_hw_state = i9xx_plane_get_hw_state;
13772                 primary->check_plane = i9xx_plane_check;
13773
13774                 plane_funcs = &i8xx_plane_funcs;
13775         }
13776
13777         if (INTEL_GEN(dev_priv) >= 9)
13778                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13779                                                0, plane_funcs,
13780                                                intel_primary_formats, num_formats,
13781                                                modifiers,
13782                                                DRM_PLANE_TYPE_PRIMARY,
13783                                                "plane 1%c", pipe_name(pipe));
13784         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13785                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13786                                                0, plane_funcs,
13787                                                intel_primary_formats, num_formats,
13788                                                modifiers,
13789                                                DRM_PLANE_TYPE_PRIMARY,
13790                                                "primary %c", pipe_name(pipe));
13791         else
13792                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13793                                                0, plane_funcs,
13794                                                intel_primary_formats, num_formats,
13795                                                modifiers,
13796                                                DRM_PLANE_TYPE_PRIMARY,
13797                                                "plane %c",
13798                                                plane_name(primary->i9xx_plane));
13799         if (ret)
13800                 goto fail;
13801
13802         if (INTEL_GEN(dev_priv) >= 10) {
13803                 supported_rotations =
13804                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13805                         DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
13806                         DRM_MODE_REFLECT_X;
13807         } else if (INTEL_GEN(dev_priv) >= 9) {
13808                 supported_rotations =
13809                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13810                         DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
13811         } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13812                 supported_rotations =
13813                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13814                         DRM_MODE_REFLECT_X;
13815         } else if (INTEL_GEN(dev_priv) >= 4) {
13816                 supported_rotations =
13817                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
13818         } else {
13819                 supported_rotations = DRM_MODE_ROTATE_0;
13820         }
13821
13822         if (INTEL_GEN(dev_priv) >= 4)
13823                 drm_plane_create_rotation_property(&primary->base,
13824                                                    DRM_MODE_ROTATE_0,
13825                                                    supported_rotations);
13826
13827         if (INTEL_GEN(dev_priv) >= 9)
13828                 drm_plane_create_color_properties(&primary->base,
13829                                                   BIT(DRM_COLOR_YCBCR_BT601) |
13830                                                   BIT(DRM_COLOR_YCBCR_BT709),
13831                                                   BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
13832                                                   BIT(DRM_COLOR_YCBCR_FULL_RANGE),
13833                                                   DRM_COLOR_YCBCR_BT709,
13834                                                   DRM_COLOR_YCBCR_LIMITED_RANGE);
13835
13836         drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13837
13838         return primary;
13839
13840 fail:
13841         kfree(state);
13842         kfree(primary);
13843
13844         return ERR_PTR(ret);
13845 }
13846
13847 static struct intel_plane *
13848 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13849                           enum pipe pipe)
13850 {
13851         struct intel_plane *cursor = NULL;
13852         struct intel_plane_state *state = NULL;
13853         int ret;
13854
13855         cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13856         if (!cursor) {
13857                 ret = -ENOMEM;
13858                 goto fail;
13859         }
13860
13861         state = intel_create_plane_state(&cursor->base);
13862         if (!state) {
13863                 ret = -ENOMEM;
13864                 goto fail;
13865         }
13866
13867         cursor->base.state = &state->base;
13868
13869         cursor->pipe = pipe;
13870         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
13871         cursor->id = PLANE_CURSOR;
13872         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
13873
13874         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13875                 cursor->max_stride = i845_cursor_max_stride;
13876                 cursor->update_plane = i845_update_cursor;
13877                 cursor->disable_plane = i845_disable_cursor;
13878                 cursor->get_hw_state = i845_cursor_get_hw_state;
13879                 cursor->check_plane = i845_check_cursor;
13880         } else {
13881                 cursor->max_stride = i9xx_cursor_max_stride;
13882                 cursor->update_plane = i9xx_update_cursor;
13883                 cursor->disable_plane = i9xx_disable_cursor;
13884                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
13885                 cursor->check_plane = i9xx_check_cursor;
13886         }
13887
13888         cursor->cursor.base = ~0;
13889         cursor->cursor.cntl = ~0;
13890
13891         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
13892                 cursor->cursor.size = ~0;
13893
13894         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
13895                                        0, &intel_cursor_plane_funcs,
13896                                        intel_cursor_formats,
13897                                        ARRAY_SIZE(intel_cursor_formats),
13898                                        cursor_format_modifiers,
13899                                        DRM_PLANE_TYPE_CURSOR,
13900                                        "cursor %c", pipe_name(pipe));
13901         if (ret)
13902                 goto fail;
13903
13904         if (INTEL_GEN(dev_priv) >= 4)
13905                 drm_plane_create_rotation_property(&cursor->base,
13906                                                    DRM_MODE_ROTATE_0,
13907                                                    DRM_MODE_ROTATE_0 |
13908                                                    DRM_MODE_ROTATE_180);
13909
13910         if (INTEL_GEN(dev_priv) >= 9)
13911                 state->scaler_id = -1;
13912
13913         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13914
13915         return cursor;
13916
13917 fail:
13918         kfree(state);
13919         kfree(cursor);
13920
13921         return ERR_PTR(ret);
13922 }
13923
13924 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
13925                                     struct intel_crtc_state *crtc_state)
13926 {
13927         struct intel_crtc_scaler_state *scaler_state =
13928                 &crtc_state->scaler_state;
13929         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13930         int i;
13931
13932         crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
13933         if (!crtc->num_scalers)
13934                 return;
13935
13936         for (i = 0; i < crtc->num_scalers; i++) {
13937                 struct intel_scaler *scaler = &scaler_state->scalers[i];
13938
13939                 scaler->in_use = 0;
13940                 scaler->mode = PS_SCALER_MODE_DYN;
13941         }
13942
13943         scaler_state->scaler_id = -1;
13944 }
13945
13946 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
13947 {
13948         struct intel_crtc *intel_crtc;
13949         struct intel_crtc_state *crtc_state = NULL;
13950         struct intel_plane *primary = NULL;
13951         struct intel_plane *cursor = NULL;
13952         int sprite, ret;
13953
13954         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
13955         if (!intel_crtc)
13956                 return -ENOMEM;
13957
13958         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13959         if (!crtc_state) {
13960                 ret = -ENOMEM;
13961                 goto fail;
13962         }
13963         intel_crtc->config = crtc_state;
13964         intel_crtc->base.state = &crtc_state->base;
13965         crtc_state->base.crtc = &intel_crtc->base;
13966
13967         primary = intel_primary_plane_create(dev_priv, pipe);
13968         if (IS_ERR(primary)) {
13969                 ret = PTR_ERR(primary);
13970                 goto fail;
13971         }
13972         intel_crtc->plane_ids_mask |= BIT(primary->id);
13973
13974         for_each_sprite(dev_priv, pipe, sprite) {
13975                 struct intel_plane *plane;
13976
13977                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
13978                 if (IS_ERR(plane)) {
13979                         ret = PTR_ERR(plane);
13980                         goto fail;
13981                 }
13982                 intel_crtc->plane_ids_mask |= BIT(plane->id);
13983         }
13984
13985         cursor = intel_cursor_plane_create(dev_priv, pipe);
13986         if (IS_ERR(cursor)) {
13987                 ret = PTR_ERR(cursor);
13988                 goto fail;
13989         }
13990         intel_crtc->plane_ids_mask |= BIT(cursor->id);
13991
13992         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
13993                                         &primary->base, &cursor->base,
13994                                         &intel_crtc_funcs,
13995                                         "pipe %c", pipe_name(pipe));
13996         if (ret)
13997                 goto fail;
13998
13999         intel_crtc->pipe = pipe;
14000
14001         /* initialize shared scalers */
14002         intel_crtc_init_scalers(intel_crtc, crtc_state);
14003
14004         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14005                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14006         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14007
14008         if (INTEL_GEN(dev_priv) < 9) {
14009                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14010
14011                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14012                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14013                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14014         }
14015
14016         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14017
14018         intel_color_init(&intel_crtc->base);
14019
14020         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14021
14022         return 0;
14023
14024 fail:
14025         /*
14026          * drm_mode_config_cleanup() will free up any
14027          * crtcs/planes already initialized.
14028          */
14029         kfree(crtc_state);
14030         kfree(intel_crtc);
14031
14032         return ret;
14033 }
14034
14035 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14036 {
14037         struct drm_device *dev = connector->base.dev;
14038
14039         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14040
14041         if (!connector->base.state->crtc)
14042                 return INVALID_PIPE;
14043
14044         return to_intel_crtc(connector->base.state->crtc)->pipe;
14045 }
14046
14047 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14048                                       struct drm_file *file)
14049 {
14050         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14051         struct drm_crtc *drmmode_crtc;
14052         struct intel_crtc *crtc;
14053
14054         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14055         if (!drmmode_crtc)
14056                 return -ENOENT;
14057
14058         crtc = to_intel_crtc(drmmode_crtc);
14059         pipe_from_crtc_id->pipe = crtc->pipe;
14060
14061         return 0;
14062 }
14063
14064 static int intel_encoder_clones(struct intel_encoder *encoder)
14065 {
14066         struct drm_device *dev = encoder->base.dev;
14067         struct intel_encoder *source_encoder;
14068         int index_mask = 0;
14069         int entry = 0;
14070
14071         for_each_intel_encoder(dev, source_encoder) {
14072                 if (encoders_cloneable(encoder, source_encoder))
14073                         index_mask |= (1 << entry);
14074
14075                 entry++;
14076         }
14077
14078         return index_mask;
14079 }
14080
14081 static bool has_edp_a(struct drm_i915_private *dev_priv)
14082 {
14083         if (!IS_MOBILE(dev_priv))
14084                 return false;
14085
14086         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14087                 return false;
14088
14089         if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14090                 return false;
14091
14092         return true;
14093 }
14094
14095 static bool intel_crt_present(struct drm_i915_private *dev_priv)
14096 {
14097         if (INTEL_GEN(dev_priv) >= 9)
14098                 return false;
14099
14100         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14101                 return false;
14102
14103         if (IS_CHERRYVIEW(dev_priv))
14104                 return false;
14105
14106         if (HAS_PCH_LPT_H(dev_priv) &&
14107             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14108                 return false;
14109
14110         /* DDI E can't be used if DDI A requires 4 lanes */
14111         if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14112                 return false;
14113
14114         if (!dev_priv->vbt.int_crt_support)
14115                 return false;
14116
14117         return true;
14118 }
14119
14120 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14121 {
14122         int pps_num;
14123         int pps_idx;
14124
14125         if (HAS_DDI(dev_priv))
14126                 return;
14127         /*
14128          * This w/a is needed at least on CPT/PPT, but to be sure apply it
14129          * everywhere where registers can be write protected.
14130          */
14131         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14132                 pps_num = 2;
14133         else
14134                 pps_num = 1;
14135
14136         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14137                 u32 val = I915_READ(PP_CONTROL(pps_idx));
14138
14139                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14140                 I915_WRITE(PP_CONTROL(pps_idx), val);
14141         }
14142 }
14143
14144 static void intel_pps_init(struct drm_i915_private *dev_priv)
14145 {
14146         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14147                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14148         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14149                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14150         else
14151                 dev_priv->pps_mmio_base = PPS_BASE;
14152
14153         intel_pps_unlock_regs_wa(dev_priv);
14154 }
14155
14156 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14157 {
14158         struct intel_encoder *encoder;
14159         bool dpd_is_edp = false;
14160
14161         intel_pps_init(dev_priv);
14162
14163         if (INTEL_INFO(dev_priv)->num_pipes == 0)
14164                 return;
14165
14166         /*
14167          * intel_edp_init_connector() depends on this completing first, to
14168          * prevent the registeration of both eDP and LVDS and the incorrect
14169          * sharing of the PPS.
14170          */
14171         intel_lvds_init(dev_priv);
14172
14173         if (intel_crt_present(dev_priv))
14174                 intel_crt_init(dev_priv);
14175
14176         if (IS_ICELAKE(dev_priv)) {
14177                 intel_ddi_init(dev_priv, PORT_A);
14178                 intel_ddi_init(dev_priv, PORT_B);
14179                 intel_ddi_init(dev_priv, PORT_C);
14180                 intel_ddi_init(dev_priv, PORT_D);
14181                 intel_ddi_init(dev_priv, PORT_E);
14182                 intel_ddi_init(dev_priv, PORT_F);
14183         } else if (IS_GEN9_LP(dev_priv)) {
14184                 /*
14185                  * FIXME: Broxton doesn't support port detection via the
14186                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14187                  * detect the ports.
14188                  */
14189                 intel_ddi_init(dev_priv, PORT_A);
14190                 intel_ddi_init(dev_priv, PORT_B);
14191                 intel_ddi_init(dev_priv, PORT_C);
14192
14193                 vlv_dsi_init(dev_priv);
14194         } else if (HAS_DDI(dev_priv)) {
14195                 int found;
14196
14197                 /*
14198                  * Haswell uses DDI functions to detect digital outputs.
14199                  * On SKL pre-D0 the strap isn't connected, so we assume
14200                  * it's there.
14201                  */
14202                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14203                 /* WaIgnoreDDIAStrap: skl */
14204                 if (found || IS_GEN9_BC(dev_priv))
14205                         intel_ddi_init(dev_priv, PORT_A);
14206
14207                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14208                  * register */
14209                 found = I915_READ(SFUSE_STRAP);
14210
14211                 if (found & SFUSE_STRAP_DDIB_DETECTED)
14212                         intel_ddi_init(dev_priv, PORT_B);
14213                 if (found & SFUSE_STRAP_DDIC_DETECTED)
14214                         intel_ddi_init(dev_priv, PORT_C);
14215                 if (found & SFUSE_STRAP_DDID_DETECTED)
14216                         intel_ddi_init(dev_priv, PORT_D);
14217                 if (found & SFUSE_STRAP_DDIF_DETECTED)
14218                         intel_ddi_init(dev_priv, PORT_F);
14219                 /*
14220                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14221                  */
14222                 if (IS_GEN9_BC(dev_priv) &&
14223                     (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14224                      dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14225                      dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14226                         intel_ddi_init(dev_priv, PORT_E);
14227
14228         } else if (HAS_PCH_SPLIT(dev_priv)) {
14229                 int found;
14230                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14231
14232                 if (has_edp_a(dev_priv))
14233                         intel_dp_init(dev_priv, DP_A, PORT_A);
14234
14235                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14236                         /* PCH SDVOB multiplex with HDMIB */
14237                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14238                         if (!found)
14239                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14240                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14241                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14242                 }
14243
14244                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14245                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14246
14247                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14248                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14249
14250                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14251                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14252
14253                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14254                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14255         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14256                 bool has_edp, has_port;
14257
14258                 /*
14259                  * The DP_DETECTED bit is the latched state of the DDC
14260                  * SDA pin at boot. However since eDP doesn't require DDC
14261                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
14262                  * eDP ports may have been muxed to an alternate function.
14263                  * Thus we can't rely on the DP_DETECTED bit alone to detect
14264                  * eDP ports. Consult the VBT as well as DP_DETECTED to
14265                  * detect eDP ports.
14266                  *
14267                  * Sadly the straps seem to be missing sometimes even for HDMI
14268                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14269                  * and VBT for the presence of the port. Additionally we can't
14270                  * trust the port type the VBT declares as we've seen at least
14271                  * HDMI ports that the VBT claim are DP or eDP.
14272                  */
14273                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14274                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14275                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14276                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14277                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14278                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14279
14280                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14281                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14282                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14283                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14284                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14285                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14286
14287                 if (IS_CHERRYVIEW(dev_priv)) {
14288                         /*
14289                          * eDP not supported on port D,
14290                          * so no need to worry about it
14291                          */
14292                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14293                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14294                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14295                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14296                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14297                 }
14298
14299                 vlv_dsi_init(dev_priv);
14300         } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
14301                 bool found = false;
14302
14303                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14304                         DRM_DEBUG_KMS("probing SDVOB\n");
14305                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14306                         if (!found && IS_G4X(dev_priv)) {
14307                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14308                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14309                         }
14310
14311                         if (!found && IS_G4X(dev_priv))
14312                                 intel_dp_init(dev_priv, DP_B, PORT_B);
14313                 }
14314
14315                 /* Before G4X SDVOC doesn't have its own detect register */
14316
14317                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14318                         DRM_DEBUG_KMS("probing SDVOC\n");
14319                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14320                 }
14321
14322                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14323
14324                         if (IS_G4X(dev_priv)) {
14325                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14326                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14327                         }
14328                         if (IS_G4X(dev_priv))
14329                                 intel_dp_init(dev_priv, DP_C, PORT_C);
14330                 }
14331
14332                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14333                         intel_dp_init(dev_priv, DP_D, PORT_D);
14334         } else if (IS_GEN2(dev_priv))
14335                 intel_dvo_init(dev_priv);
14336
14337         if (SUPPORTS_TV(dev_priv))
14338                 intel_tv_init(dev_priv);
14339
14340         intel_psr_init(dev_priv);
14341
14342         for_each_intel_encoder(&dev_priv->drm, encoder) {
14343                 encoder->base.possible_crtcs = encoder->crtc_mask;
14344                 encoder->base.possible_clones =
14345                         intel_encoder_clones(encoder);
14346         }
14347
14348         intel_init_pch_refclk(dev_priv);
14349
14350         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
14351 }
14352
14353 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14354 {
14355         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14356         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14357
14358         drm_framebuffer_cleanup(fb);
14359
14360         i915_gem_object_lock(obj);
14361         WARN_ON(!obj->framebuffer_references--);
14362         i915_gem_object_unlock(obj);
14363
14364         i915_gem_object_put(obj);
14365
14366         kfree(intel_fb);
14367 }
14368
14369 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14370                                                 struct drm_file *file,
14371                                                 unsigned int *handle)
14372 {
14373         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14374
14375         if (obj->userptr.mm) {
14376                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14377                 return -EINVAL;
14378         }
14379
14380         return drm_gem_handle_create(file, &obj->base, handle);
14381 }
14382
14383 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14384                                         struct drm_file *file,
14385                                         unsigned flags, unsigned color,
14386                                         struct drm_clip_rect *clips,
14387                                         unsigned num_clips)
14388 {
14389         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14390
14391         i915_gem_object_flush_if_display(obj);
14392         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14393
14394         return 0;
14395 }
14396
14397 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14398         .destroy = intel_user_framebuffer_destroy,
14399         .create_handle = intel_user_framebuffer_create_handle,
14400         .dirty = intel_user_framebuffer_dirty,
14401 };
14402
14403 static
14404 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14405                          uint64_t fb_modifier, uint32_t pixel_format)
14406 {
14407         struct intel_crtc *crtc;
14408         struct intel_plane *plane;
14409
14410         /*
14411          * We assume the primary plane for pipe A has
14412          * the highest stride limits of them all.
14413          */
14414         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
14415         plane = to_intel_plane(crtc->base.primary);
14416
14417         return plane->max_stride(plane, pixel_format, fb_modifier,
14418                                  DRM_MODE_ROTATE_0);
14419 }
14420
14421 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14422                                   struct drm_i915_gem_object *obj,
14423                                   struct drm_mode_fb_cmd2 *mode_cmd)
14424 {
14425         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
14426         struct drm_framebuffer *fb = &intel_fb->base;
14427         struct drm_format_name_buf format_name;
14428         u32 pitch_limit;
14429         unsigned int tiling, stride;
14430         int ret = -EINVAL;
14431         int i;
14432
14433         i915_gem_object_lock(obj);
14434         obj->framebuffer_references++;
14435         tiling = i915_gem_object_get_tiling(obj);
14436         stride = i915_gem_object_get_stride(obj);
14437         i915_gem_object_unlock(obj);
14438
14439         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14440                 /*
14441                  * If there's a fence, enforce that
14442                  * the fb modifier and tiling mode match.
14443                  */
14444                 if (tiling != I915_TILING_NONE &&
14445                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14446                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
14447                         goto err;
14448                 }
14449         } else {
14450                 if (tiling == I915_TILING_X) {
14451                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14452                 } else if (tiling == I915_TILING_Y) {
14453                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14454                         goto err;
14455                 }
14456         }
14457
14458         /* Passed in modifier sanity checking. */
14459         switch (mode_cmd->modifier[0]) {
14460         case I915_FORMAT_MOD_Y_TILED_CCS:
14461         case I915_FORMAT_MOD_Yf_TILED_CCS:
14462                 switch (mode_cmd->pixel_format) {
14463                 case DRM_FORMAT_XBGR8888:
14464                 case DRM_FORMAT_ABGR8888:
14465                 case DRM_FORMAT_XRGB8888:
14466                 case DRM_FORMAT_ARGB8888:
14467                         break;
14468                 default:
14469                         DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
14470                         goto err;
14471                 }
14472                 /* fall through */
14473         case I915_FORMAT_MOD_Y_TILED:
14474         case I915_FORMAT_MOD_Yf_TILED:
14475                 if (INTEL_GEN(dev_priv) < 9) {
14476                         DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14477                                       mode_cmd->modifier[0]);
14478                         goto err;
14479                 }
14480         case DRM_FORMAT_MOD_LINEAR:
14481         case I915_FORMAT_MOD_X_TILED:
14482                 break;
14483         default:
14484                 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
14485                               mode_cmd->modifier[0]);
14486                 goto err;
14487         }
14488
14489         /*
14490          * gen2/3 display engine uses the fence if present,
14491          * so the tiling mode must match the fb modifier exactly.
14492          */
14493         if (INTEL_GEN(dev_priv) < 4 &&
14494             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14495                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14496                 goto err;
14497         }
14498
14499         pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
14500                                            mode_cmd->pixel_format);
14501         if (mode_cmd->pitches[0] > pitch_limit) {
14502                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14503                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14504                               "tiled" : "linear",
14505                               mode_cmd->pitches[0], pitch_limit);
14506                 goto err;
14507         }
14508
14509         /*
14510          * If there's a fence, enforce that
14511          * the fb pitch and fence stride match.
14512          */
14513         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14514                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14515                               mode_cmd->pitches[0], stride);
14516                 goto err;
14517         }
14518
14519         /* Reject formats not supported by any plane early. */
14520         switch (mode_cmd->pixel_format) {
14521         case DRM_FORMAT_C8:
14522         case DRM_FORMAT_RGB565:
14523         case DRM_FORMAT_XRGB8888:
14524         case DRM_FORMAT_ARGB8888:
14525                 break;
14526         case DRM_FORMAT_XRGB1555:
14527                 if (INTEL_GEN(dev_priv) > 3) {
14528                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14529                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14530                         goto err;
14531                 }
14532                 break;
14533         case DRM_FORMAT_ABGR8888:
14534                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
14535                     INTEL_GEN(dev_priv) < 9) {
14536                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14537                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14538                         goto err;
14539                 }
14540                 break;
14541         case DRM_FORMAT_XBGR8888:
14542         case DRM_FORMAT_XRGB2101010:
14543         case DRM_FORMAT_XBGR2101010:
14544                 if (INTEL_GEN(dev_priv) < 4) {
14545                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14546                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14547                         goto err;
14548                 }
14549                 break;
14550         case DRM_FORMAT_ABGR2101010:
14551                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
14552                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14553                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14554                         goto err;
14555                 }
14556                 break;
14557         case DRM_FORMAT_YUYV:
14558         case DRM_FORMAT_UYVY:
14559         case DRM_FORMAT_YVYU:
14560         case DRM_FORMAT_VYUY:
14561                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
14562                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14563                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14564                         goto err;
14565                 }
14566                 break;
14567         case DRM_FORMAT_NV12:
14568                 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
14569                     IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
14570                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14571                                       drm_get_format_name(mode_cmd->pixel_format,
14572                                                           &format_name));
14573                         goto err;
14574                 }
14575                 break;
14576         default:
14577                 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14578                               drm_get_format_name(mode_cmd->pixel_format, &format_name));
14579                 goto err;
14580         }
14581
14582         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14583         if (mode_cmd->offsets[0] != 0)
14584                 goto err;
14585
14586         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
14587
14588         if (fb->format->format == DRM_FORMAT_NV12 &&
14589             (fb->width < SKL_MIN_YUV_420_SRC_W ||
14590              fb->height < SKL_MIN_YUV_420_SRC_H ||
14591              (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14592                 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
14593                 return -EINVAL;
14594         }
14595
14596         for (i = 0; i < fb->format->num_planes; i++) {
14597                 u32 stride_alignment;
14598
14599                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14600                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
14601                         goto err;
14602                 }
14603
14604                 stride_alignment = intel_fb_stride_alignment(fb, i);
14605
14606                 /*
14607                  * Display WA #0531: skl,bxt,kbl,glk
14608                  *
14609                  * Render decompression and plane width > 3840
14610                  * combined with horizontal panning requires the
14611                  * plane stride to be a multiple of 4. We'll just
14612                  * require the entire fb to accommodate that to avoid
14613                  * potential runtime errors at plane configuration time.
14614                  */
14615                 if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
14616                     is_ccs_modifier(fb->modifier))
14617                         stride_alignment *= 4;
14618
14619                 if (fb->pitches[i] & (stride_alignment - 1)) {
14620                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14621                                       i, fb->pitches[i], stride_alignment);
14622                         goto err;
14623                 }
14624
14625                 fb->obj[i] = &obj->base;
14626         }
14627
14628         ret = intel_fill_fb_info(dev_priv, fb);
14629         if (ret)
14630                 goto err;
14631
14632         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
14633         if (ret) {
14634                 DRM_ERROR("framebuffer init failed %d\n", ret);
14635                 goto err;
14636         }
14637
14638         return 0;
14639
14640 err:
14641         i915_gem_object_lock(obj);
14642         obj->framebuffer_references--;
14643         i915_gem_object_unlock(obj);
14644         return ret;
14645 }
14646
14647 static struct drm_framebuffer *
14648 intel_user_framebuffer_create(struct drm_device *dev,
14649                               struct drm_file *filp,
14650                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
14651 {
14652         struct drm_framebuffer *fb;
14653         struct drm_i915_gem_object *obj;
14654         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14655
14656         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14657         if (!obj)
14658                 return ERR_PTR(-ENOENT);
14659
14660         fb = intel_framebuffer_create(obj, &mode_cmd);
14661         if (IS_ERR(fb))
14662                 i915_gem_object_put(obj);
14663
14664         return fb;
14665 }
14666
14667 static void intel_atomic_state_free(struct drm_atomic_state *state)
14668 {
14669         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14670
14671         drm_atomic_state_default_release(state);
14672
14673         i915_sw_fence_fini(&intel_state->commit_ready);
14674
14675         kfree(state);
14676 }
14677
14678 static enum drm_mode_status
14679 intel_mode_valid(struct drm_device *dev,
14680                  const struct drm_display_mode *mode)
14681 {
14682         struct drm_i915_private *dev_priv = to_i915(dev);
14683         int hdisplay_max, htotal_max;
14684         int vdisplay_max, vtotal_max;
14685
14686         /*
14687          * Can't reject DBLSCAN here because Xorg ddxen can add piles
14688          * of DBLSCAN modes to the output's mode list when they detect
14689          * the scaling mode property on the connector. And they don't
14690          * ask the kernel to validate those modes in any way until
14691          * modeset time at which point the client gets a protocol error.
14692          * So in order to not upset those clients we silently ignore the
14693          * DBLSCAN flag on such connectors. For other connectors we will
14694          * reject modes with the DBLSCAN flag in encoder->compute_config().
14695          * And we always reject DBLSCAN modes in connector->mode_valid()
14696          * as we never want such modes on the connector's mode list.
14697          */
14698
14699         if (mode->vscan > 1)
14700                 return MODE_NO_VSCAN;
14701
14702         if (mode->flags & DRM_MODE_FLAG_HSKEW)
14703                 return MODE_H_ILLEGAL;
14704
14705         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14706                            DRM_MODE_FLAG_NCSYNC |
14707                            DRM_MODE_FLAG_PCSYNC))
14708                 return MODE_HSYNC;
14709
14710         if (mode->flags & (DRM_MODE_FLAG_BCAST |
14711                            DRM_MODE_FLAG_PIXMUX |
14712                            DRM_MODE_FLAG_CLKDIV2))
14713                 return MODE_BAD;
14714
14715         if (INTEL_GEN(dev_priv) >= 9 ||
14716             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14717                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
14718                 vdisplay_max = 4096;
14719                 htotal_max = 8192;
14720                 vtotal_max = 8192;
14721         } else if (INTEL_GEN(dev_priv) >= 3) {
14722                 hdisplay_max = 4096;
14723                 vdisplay_max = 4096;
14724                 htotal_max = 8192;
14725                 vtotal_max = 8192;
14726         } else {
14727                 hdisplay_max = 2048;
14728                 vdisplay_max = 2048;
14729                 htotal_max = 4096;
14730                 vtotal_max = 4096;
14731         }
14732
14733         if (mode->hdisplay > hdisplay_max ||
14734             mode->hsync_start > htotal_max ||
14735             mode->hsync_end > htotal_max ||
14736             mode->htotal > htotal_max)
14737                 return MODE_H_ILLEGAL;
14738
14739         if (mode->vdisplay > vdisplay_max ||
14740             mode->vsync_start > vtotal_max ||
14741             mode->vsync_end > vtotal_max ||
14742             mode->vtotal > vtotal_max)
14743                 return MODE_V_ILLEGAL;
14744
14745         return MODE_OK;
14746 }
14747
14748 static const struct drm_mode_config_funcs intel_mode_funcs = {
14749         .fb_create = intel_user_framebuffer_create,
14750         .get_format_info = intel_get_format_info,
14751         .output_poll_changed = intel_fbdev_output_poll_changed,
14752         .mode_valid = intel_mode_valid,
14753         .atomic_check = intel_atomic_check,
14754         .atomic_commit = intel_atomic_commit,
14755         .atomic_state_alloc = intel_atomic_state_alloc,
14756         .atomic_state_clear = intel_atomic_state_clear,
14757         .atomic_state_free = intel_atomic_state_free,
14758 };
14759
14760 /**
14761  * intel_init_display_hooks - initialize the display modesetting hooks
14762  * @dev_priv: device private
14763  */
14764 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14765 {
14766         intel_init_cdclk_hooks(dev_priv);
14767
14768         if (INTEL_GEN(dev_priv) >= 9) {
14769                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14770                 dev_priv->display.get_initial_plane_config =
14771                         skylake_get_initial_plane_config;
14772                 dev_priv->display.crtc_compute_clock =
14773                         haswell_crtc_compute_clock;
14774                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14775                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14776         } else if (HAS_DDI(dev_priv)) {
14777                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14778                 dev_priv->display.get_initial_plane_config =
14779                         i9xx_get_initial_plane_config;
14780                 dev_priv->display.crtc_compute_clock =
14781                         haswell_crtc_compute_clock;
14782                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14783                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14784         } else if (HAS_PCH_SPLIT(dev_priv)) {
14785                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14786                 dev_priv->display.get_initial_plane_config =
14787                         i9xx_get_initial_plane_config;
14788                 dev_priv->display.crtc_compute_clock =
14789                         ironlake_crtc_compute_clock;
14790                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14791                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
14792         } else if (IS_CHERRYVIEW(dev_priv)) {
14793                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14794                 dev_priv->display.get_initial_plane_config =
14795                         i9xx_get_initial_plane_config;
14796                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14797                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14798                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14799         } else if (IS_VALLEYVIEW(dev_priv)) {
14800                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14801                 dev_priv->display.get_initial_plane_config =
14802                         i9xx_get_initial_plane_config;
14803                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14804                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14805                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14806         } else if (IS_G4X(dev_priv)) {
14807                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14808                 dev_priv->display.get_initial_plane_config =
14809                         i9xx_get_initial_plane_config;
14810                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14811                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14812                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14813         } else if (IS_PINEVIEW(dev_priv)) {
14814                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14815                 dev_priv->display.get_initial_plane_config =
14816                         i9xx_get_initial_plane_config;
14817                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14818                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14819                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14820         } else if (!IS_GEN2(dev_priv)) {
14821                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14822                 dev_priv->display.get_initial_plane_config =
14823                         i9xx_get_initial_plane_config;
14824                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14825                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14826                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14827         } else {
14828                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14829                 dev_priv->display.get_initial_plane_config =
14830                         i9xx_get_initial_plane_config;
14831                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14832                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14833                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14834         }
14835
14836         if (IS_GEN5(dev_priv)) {
14837                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14838         } else if (IS_GEN6(dev_priv)) {
14839                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14840         } else if (IS_IVYBRIDGE(dev_priv)) {
14841                 /* FIXME: detect B0+ stepping and use auto training */
14842                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14843         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
14844                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14845         }
14846
14847         if (INTEL_GEN(dev_priv) >= 9)
14848                 dev_priv->display.update_crtcs = skl_update_crtcs;
14849         else
14850                 dev_priv->display.update_crtcs = intel_update_crtcs;
14851 }
14852
14853 /*
14854  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14855  */
14856 static void quirk_ssc_force_disable(struct drm_device *dev)
14857 {
14858         struct drm_i915_private *dev_priv = to_i915(dev);
14859         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14860         DRM_INFO("applying lvds SSC disable quirk\n");
14861 }
14862
14863 /*
14864  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14865  * brightness value
14866  */
14867 static void quirk_invert_brightness(struct drm_device *dev)
14868 {
14869         struct drm_i915_private *dev_priv = to_i915(dev);
14870         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14871         DRM_INFO("applying inverted panel brightness quirk\n");
14872 }
14873
14874 /* Some VBT's incorrectly indicate no backlight is present */
14875 static void quirk_backlight_present(struct drm_device *dev)
14876 {
14877         struct drm_i915_private *dev_priv = to_i915(dev);
14878         dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14879         DRM_INFO("applying backlight present quirk\n");
14880 }
14881
14882 /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
14883  * which is 300 ms greater than eDP spec T12 min.
14884  */
14885 static void quirk_increase_t12_delay(struct drm_device *dev)
14886 {
14887         struct drm_i915_private *dev_priv = to_i915(dev);
14888
14889         dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
14890         DRM_INFO("Applying T12 delay quirk\n");
14891 }
14892
14893 /*
14894  * GeminiLake NUC HDMI outputs require additional off time
14895  * this allows the onboard retimer to correctly sync to signal
14896  */
14897 static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
14898 {
14899         struct drm_i915_private *dev_priv = to_i915(dev);
14900
14901         dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
14902         DRM_INFO("Applying Increase DDI Disabled quirk\n");
14903 }
14904
14905 struct intel_quirk {
14906         int device;
14907         int subsystem_vendor;
14908         int subsystem_device;
14909         void (*hook)(struct drm_device *dev);
14910 };
14911
14912 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14913 struct intel_dmi_quirk {
14914         void (*hook)(struct drm_device *dev);
14915         const struct dmi_system_id (*dmi_id_list)[];
14916 };
14917
14918 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14919 {
14920         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14921         return 1;
14922 }
14923
14924 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14925         {
14926                 .dmi_id_list = &(const struct dmi_system_id[]) {
14927                         {
14928                                 .callback = intel_dmi_reverse_brightness,
14929                                 .ident = "NCR Corporation",
14930                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14931                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
14932                                 },
14933                         },
14934                         { }  /* terminating entry */
14935                 },
14936                 .hook = quirk_invert_brightness,
14937         },
14938 };
14939
14940 static struct intel_quirk intel_quirks[] = {
14941         /* Lenovo U160 cannot use SSC on LVDS */
14942         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14943
14944         /* Sony Vaio Y cannot use SSC on LVDS */
14945         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
14946
14947         /* Acer Aspire 5734Z must invert backlight brightness */
14948         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14949
14950         /* Acer/eMachines G725 */
14951         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14952
14953         /* Acer/eMachines e725 */
14954         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14955
14956         /* Acer/Packard Bell NCL20 */
14957         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14958
14959         /* Acer Aspire 4736Z */
14960         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
14961
14962         /* Acer Aspire 5336 */
14963         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
14964
14965         /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14966         { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
14967
14968         /* Acer C720 Chromebook (Core i3 4005U) */
14969         { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14970
14971         /* Apple Macbook 2,1 (Core 2 T7400) */
14972         { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14973
14974         /* Apple Macbook 4,1 */
14975         { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14976
14977         /* Toshiba CB35 Chromebook (Celeron 2955U) */
14978         { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14979
14980         /* HP Chromebook 14 (Celeron 2955U) */
14981         { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
14982
14983         /* Dell Chromebook 11 */
14984         { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14985
14986         /* Dell Chromebook 11 (2015 version) */
14987         { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
14988
14989         /* Toshiba Satellite P50-C-18C */
14990         { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
14991
14992         /* GeminiLake NUC */
14993         { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
14994         { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
14995         /* ASRock ITX*/
14996         { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
14997         { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
14998 };
14999
15000 static void intel_init_quirks(struct drm_device *dev)
15001 {
15002         struct pci_dev *d = dev->pdev;
15003         int i;
15004
15005         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15006                 struct intel_quirk *q = &intel_quirks[i];
15007
15008                 if (d->device == q->device &&
15009                     (d->subsystem_vendor == q->subsystem_vendor ||
15010                      q->subsystem_vendor == PCI_ANY_ID) &&
15011                     (d->subsystem_device == q->subsystem_device ||
15012                      q->subsystem_device == PCI_ANY_ID))
15013                         q->hook(dev);
15014         }
15015         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15016                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15017                         intel_dmi_quirks[i].hook(dev);
15018         }
15019 }
15020
15021 /* Disable the VGA plane that we never use */
15022 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15023 {
15024         struct pci_dev *pdev = dev_priv->drm.pdev;
15025         u8 sr1;
15026         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15027
15028         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15029         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15030         outb(SR01, VGA_SR_INDEX);
15031         sr1 = inb(VGA_SR_DATA);
15032         outb(sr1 | 1<<5, VGA_SR_DATA);
15033         vga_put(pdev, VGA_RSRC_LEGACY_IO);
15034         udelay(300);
15035
15036         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15037         POSTING_READ(vga_reg);
15038 }
15039
15040 void intel_modeset_init_hw(struct drm_device *dev)
15041 {
15042         struct drm_i915_private *dev_priv = to_i915(dev);
15043
15044         intel_update_cdclk(dev_priv);
15045         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15046         dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15047 }
15048
15049 /*
15050  * Calculate what we think the watermarks should be for the state we've read
15051  * out of the hardware and then immediately program those watermarks so that
15052  * we ensure the hardware settings match our internal state.
15053  *
15054  * We can calculate what we think WM's should be by creating a duplicate of the
15055  * current state (which was constructed during hardware readout) and running it
15056  * through the atomic check code to calculate new watermark values in the
15057  * state object.
15058  */
15059 static void sanitize_watermarks(struct drm_device *dev)
15060 {
15061         struct drm_i915_private *dev_priv = to_i915(dev);
15062         struct drm_atomic_state *state;
15063         struct intel_atomic_state *intel_state;
15064         struct drm_crtc *crtc;
15065         struct drm_crtc_state *cstate;
15066         struct drm_modeset_acquire_ctx ctx;
15067         int ret;
15068         int i;
15069
15070         /* Only supported on platforms that use atomic watermark design */
15071         if (!dev_priv->display.optimize_watermarks)
15072                 return;
15073
15074         /*
15075          * We need to hold connection_mutex before calling duplicate_state so
15076          * that the connector loop is protected.
15077          */
15078         drm_modeset_acquire_init(&ctx, 0);
15079 retry:
15080         ret = drm_modeset_lock_all_ctx(dev, &ctx);
15081         if (ret == -EDEADLK) {
15082                 drm_modeset_backoff(&ctx);
15083                 goto retry;
15084         } else if (WARN_ON(ret)) {
15085                 goto fail;
15086         }
15087
15088         state = drm_atomic_helper_duplicate_state(dev, &ctx);
15089         if (WARN_ON(IS_ERR(state)))
15090                 goto fail;
15091
15092         intel_state = to_intel_atomic_state(state);
15093
15094         /*
15095          * Hardware readout is the only time we don't want to calculate
15096          * intermediate watermarks (since we don't trust the current
15097          * watermarks).
15098          */
15099         if (!HAS_GMCH_DISPLAY(dev_priv))
15100                 intel_state->skip_intermediate_wm = true;
15101
15102         ret = intel_atomic_check(dev, state);
15103         if (ret) {
15104                 /*
15105                  * If we fail here, it means that the hardware appears to be
15106                  * programmed in a way that shouldn't be possible, given our
15107                  * understanding of watermark requirements.  This might mean a
15108                  * mistake in the hardware readout code or a mistake in the
15109                  * watermark calculations for a given platform.  Raise a WARN
15110                  * so that this is noticeable.
15111                  *
15112                  * If this actually happens, we'll have to just leave the
15113                  * BIOS-programmed watermarks untouched and hope for the best.
15114                  */
15115                 WARN(true, "Could not determine valid watermarks for inherited state\n");
15116                 goto put_state;
15117         }
15118
15119         /* Write calculated watermark values back */
15120         for_each_new_crtc_in_state(state, crtc, cstate, i) {
15121                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15122
15123                 cs->wm.need_postvbl_update = true;
15124                 dev_priv->display.optimize_watermarks(intel_state, cs);
15125
15126                 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15127         }
15128
15129 put_state:
15130         drm_atomic_state_put(state);
15131 fail:
15132         drm_modeset_drop_locks(&ctx);
15133         drm_modeset_acquire_fini(&ctx);
15134 }
15135
15136 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15137 {
15138         if (IS_GEN5(dev_priv)) {
15139                 u32 fdi_pll_clk =
15140                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15141
15142                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15143         } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
15144                 dev_priv->fdi_pll_freq = 270000;
15145         } else {
15146                 return;
15147         }
15148
15149         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15150 }
15151
15152 static int intel_initial_commit(struct drm_device *dev)
15153 {
15154         struct drm_atomic_state *state = NULL;
15155         struct drm_modeset_acquire_ctx ctx;
15156         struct drm_crtc *crtc;
15157         struct drm_crtc_state *crtc_state;
15158         int ret = 0;
15159
15160         state = drm_atomic_state_alloc(dev);
15161         if (!state)
15162                 return -ENOMEM;
15163
15164         drm_modeset_acquire_init(&ctx, 0);
15165
15166 retry:
15167         state->acquire_ctx = &ctx;
15168
15169         drm_for_each_crtc(crtc, dev) {
15170                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15171                 if (IS_ERR(crtc_state)) {
15172                         ret = PTR_ERR(crtc_state);
15173                         goto out;
15174                 }
15175
15176                 if (crtc_state->active) {
15177                         ret = drm_atomic_add_affected_planes(state, crtc);
15178                         if (ret)
15179                                 goto out;
15180                 }
15181         }
15182
15183         ret = drm_atomic_commit(state);
15184
15185 out:
15186         if (ret == -EDEADLK) {
15187                 drm_atomic_state_clear(state);
15188                 drm_modeset_backoff(&ctx);
15189                 goto retry;
15190         }
15191
15192         drm_atomic_state_put(state);
15193
15194         drm_modeset_drop_locks(&ctx);
15195         drm_modeset_acquire_fini(&ctx);
15196
15197         return ret;
15198 }
15199
15200 int intel_modeset_init(struct drm_device *dev)
15201 {
15202         struct drm_i915_private *dev_priv = to_i915(dev);
15203         struct i915_ggtt *ggtt = &dev_priv->ggtt;
15204         enum pipe pipe;
15205         struct intel_crtc *crtc;
15206         int ret;
15207
15208         dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15209
15210         drm_mode_config_init(dev);
15211
15212         dev->mode_config.min_width = 0;
15213         dev->mode_config.min_height = 0;
15214
15215         dev->mode_config.preferred_depth = 24;
15216         dev->mode_config.prefer_shadow = 1;
15217
15218         dev->mode_config.allow_fb_modifiers = true;
15219
15220         dev->mode_config.funcs = &intel_mode_funcs;
15221
15222         init_llist_head(&dev_priv->atomic_helper.free_list);
15223         INIT_WORK(&dev_priv->atomic_helper.free_work,
15224                   intel_atomic_helper_free_state_worker);
15225
15226         intel_init_quirks(dev);
15227
15228         intel_init_pm(dev_priv);
15229
15230         /*
15231          * There may be no VBT; and if the BIOS enabled SSC we can
15232          * just keep using it to avoid unnecessary flicker.  Whereas if the
15233          * BIOS isn't using it, don't assume it will work even if the VBT
15234          * indicates as much.
15235          */
15236         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15237                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15238                                             DREF_SSC1_ENABLE);
15239
15240                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15241                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15242                                      bios_lvds_use_ssc ? "en" : "dis",
15243                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15244                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15245                 }
15246         }
15247
15248         /* maximum framebuffer dimensions */
15249         if (IS_GEN2(dev_priv)) {
15250                 dev->mode_config.max_width = 2048;
15251                 dev->mode_config.max_height = 2048;
15252         } else if (IS_GEN3(dev_priv)) {
15253                 dev->mode_config.max_width = 4096;
15254                 dev->mode_config.max_height = 4096;
15255         } else {
15256                 dev->mode_config.max_width = 8192;
15257                 dev->mode_config.max_height = 8192;
15258         }
15259
15260         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15261                 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15262                 dev->mode_config.cursor_height = 1023;
15263         } else if (IS_GEN2(dev_priv)) {
15264                 dev->mode_config.cursor_width = 64;
15265                 dev->mode_config.cursor_height = 64;
15266         } else {
15267                 dev->mode_config.cursor_width = 256;
15268                 dev->mode_config.cursor_height = 256;
15269         }
15270
15271         dev->mode_config.fb_base = ggtt->gmadr.start;
15272
15273         DRM_DEBUG_KMS("%d display pipe%s available.\n",
15274                       INTEL_INFO(dev_priv)->num_pipes,
15275                       INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15276
15277         for_each_pipe(dev_priv, pipe) {
15278                 ret = intel_crtc_init(dev_priv, pipe);
15279                 if (ret) {
15280                         drm_mode_config_cleanup(dev);
15281                         return ret;
15282                 }
15283         }
15284
15285         intel_shared_dpll_init(dev);
15286         intel_update_fdi_pll_freq(dev_priv);
15287
15288         intel_update_czclk(dev_priv);
15289         intel_modeset_init_hw(dev);
15290
15291         if (dev_priv->max_cdclk_freq == 0)
15292                 intel_update_max_cdclk(dev_priv);
15293
15294         /* Just disable it once at startup */
15295         i915_disable_vga(dev_priv);
15296         intel_setup_outputs(dev_priv);
15297
15298         drm_modeset_lock_all(dev);
15299         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15300         drm_modeset_unlock_all(dev);
15301
15302         for_each_intel_crtc(dev, crtc) {
15303                 struct intel_initial_plane_config plane_config = {};
15304
15305                 if (!crtc->active)
15306                         continue;
15307
15308                 /*
15309                  * Note that reserving the BIOS fb up front prevents us
15310                  * from stuffing other stolen allocations like the ring
15311                  * on top.  This prevents some ugliness at boot time, and
15312                  * can even allow for smooth boot transitions if the BIOS
15313                  * fb is large enough for the active pipe configuration.
15314                  */
15315                 dev_priv->display.get_initial_plane_config(crtc,
15316                                                            &plane_config);
15317
15318                 /*
15319                  * If the fb is shared between multiple heads, we'll
15320                  * just get the first one.
15321                  */
15322                 intel_find_initial_plane_obj(crtc, &plane_config);
15323         }
15324
15325         /*
15326          * Make sure hardware watermarks really match the state we read out.
15327          * Note that we need to do this after reconstructing the BIOS fb's
15328          * since the watermark calculation done here will use pstate->fb.
15329          */
15330         if (!HAS_GMCH_DISPLAY(dev_priv))
15331                 sanitize_watermarks(dev);
15332
15333         /*
15334          * Force all active planes to recompute their states. So that on
15335          * mode_setcrtc after probe, all the intel_plane_state variables
15336          * are already calculated and there is no assert_plane warnings
15337          * during bootup.
15338          */
15339         ret = intel_initial_commit(dev);
15340         if (ret)
15341                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15342
15343         return 0;
15344 }
15345
15346 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15347 {
15348         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15349         /* 640x480@60Hz, ~25175 kHz */
15350         struct dpll clock = {
15351                 .m1 = 18,
15352                 .m2 = 7,
15353                 .p1 = 13,
15354                 .p2 = 4,
15355                 .n = 2,
15356         };
15357         u32 dpll, fp;
15358         int i;
15359
15360         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15361
15362         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15363                       pipe_name(pipe), clock.vco, clock.dot);
15364
15365         fp = i9xx_dpll_compute_fp(&clock);
15366         dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15367                 DPLL_VGA_MODE_DIS |
15368                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15369                 PLL_P2_DIVIDE_BY_4 |
15370                 PLL_REF_INPUT_DREFCLK |
15371                 DPLL_VCO_ENABLE;
15372
15373         I915_WRITE(FP0(pipe), fp);
15374         I915_WRITE(FP1(pipe), fp);
15375
15376         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15377         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15378         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15379         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15380         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15381         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15382         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15383
15384         /*
15385          * Apparently we need to have VGA mode enabled prior to changing
15386          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15387          * dividers, even though the register value does change.
15388          */
15389         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15390         I915_WRITE(DPLL(pipe), dpll);
15391
15392         /* Wait for the clocks to stabilize. */
15393         POSTING_READ(DPLL(pipe));
15394         udelay(150);
15395
15396         /* The pixel multiplier can only be updated once the
15397          * DPLL is enabled and the clocks are stable.
15398          *
15399          * So write it again.
15400          */
15401         I915_WRITE(DPLL(pipe), dpll);
15402
15403         /* We do this three times for luck */
15404         for (i = 0; i < 3 ; i++) {
15405                 I915_WRITE(DPLL(pipe), dpll);
15406                 POSTING_READ(DPLL(pipe));
15407                 udelay(150); /* wait for warmup */
15408         }
15409
15410         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15411         POSTING_READ(PIPECONF(pipe));
15412
15413         intel_wait_for_pipe_scanline_moving(crtc);
15414 }
15415
15416 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15417 {
15418         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15419
15420         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15421                       pipe_name(pipe));
15422
15423         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15424         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15425         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15426         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15427         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15428
15429         I915_WRITE(PIPECONF(pipe), 0);
15430         POSTING_READ(PIPECONF(pipe));
15431
15432         intel_wait_for_pipe_scanline_stopped(crtc);
15433
15434         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15435         POSTING_READ(DPLL(pipe));
15436 }
15437
15438 static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
15439                                    struct intel_plane *plane)
15440 {
15441         enum pipe pipe;
15442
15443         if (!plane->get_hw_state(plane, &pipe))
15444                 return true;
15445
15446         return pipe == crtc->pipe;
15447 }
15448
15449 static void
15450 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15451 {
15452         struct intel_crtc *crtc;
15453
15454         if (INTEL_GEN(dev_priv) >= 4)
15455                 return;
15456
15457         for_each_intel_crtc(&dev_priv->drm, crtc) {
15458                 struct intel_plane *plane =
15459                         to_intel_plane(crtc->base.primary);
15460
15461                 if (intel_plane_mapping_ok(crtc, plane))
15462                         continue;
15463
15464                 DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
15465                               plane->base.name);
15466                 intel_plane_disable_noatomic(crtc, plane);
15467         }
15468 }
15469
15470 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15471 {
15472         struct drm_device *dev = crtc->base.dev;
15473         struct intel_encoder *encoder;
15474
15475         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15476                 return true;
15477
15478         return false;
15479 }
15480
15481 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15482 {
15483         struct drm_device *dev = encoder->base.dev;
15484         struct intel_connector *connector;
15485
15486         for_each_connector_on_encoder(dev, &encoder->base, connector)
15487                 return connector;
15488
15489         return NULL;
15490 }
15491
15492 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15493                               enum pipe pch_transcoder)
15494 {
15495         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15496                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15497 }
15498
15499 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15500                                 struct drm_modeset_acquire_ctx *ctx)
15501 {
15502         struct drm_device *dev = crtc->base.dev;
15503         struct drm_i915_private *dev_priv = to_i915(dev);
15504         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15505
15506         /* Clear any frame start delays used for debugging left by the BIOS */
15507         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15508                 i915_reg_t reg = PIPECONF(cpu_transcoder);
15509
15510                 I915_WRITE(reg,
15511                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15512         }
15513
15514         /* restore vblank interrupts to correct state */
15515         drm_crtc_vblank_reset(&crtc->base);
15516         if (crtc->active) {
15517                 struct intel_plane *plane;
15518
15519                 drm_crtc_vblank_on(&crtc->base);
15520
15521                 /* Disable everything but the primary plane */
15522                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15523                         const struct intel_plane_state *plane_state =
15524                                 to_intel_plane_state(plane->base.state);
15525
15526                         if (plane_state->base.visible &&
15527                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15528                                 intel_plane_disable_noatomic(crtc, plane);
15529                 }
15530         }
15531
15532         /* Adjust the state of the output pipe according to whether we
15533          * have active connectors/encoders. */
15534         if (crtc->active && !intel_crtc_has_encoders(crtc))
15535                 intel_crtc_disable_noatomic(&crtc->base, ctx);
15536
15537         if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
15538                 /*
15539                  * We start out with underrun reporting disabled to avoid races.
15540                  * For correct bookkeeping mark this on active crtcs.
15541                  *
15542                  * Also on gmch platforms we dont have any hardware bits to
15543                  * disable the underrun reporting. Which means we need to start
15544                  * out with underrun reporting disabled also on inactive pipes,
15545                  * since otherwise we'll complain about the garbage we read when
15546                  * e.g. coming up after runtime pm.
15547                  *
15548                  * No protection against concurrent access is required - at
15549                  * worst a fifo underrun happens which also sets this to false.
15550                  */
15551                 crtc->cpu_fifo_underrun_disabled = true;
15552                 /*
15553                  * We track the PCH trancoder underrun reporting state
15554                  * within the crtc. With crtc for pipe A housing the underrun
15555                  * reporting state for PCH transcoder A, crtc for pipe B housing
15556                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15557                  * and marking underrun reporting as disabled for the non-existing
15558                  * PCH transcoders B and C would prevent enabling the south
15559                  * error interrupt (see cpt_can_enable_serr_int()).
15560                  */
15561                 if (has_pch_trancoder(dev_priv, crtc->pipe))
15562                         crtc->pch_fifo_underrun_disabled = true;
15563         }
15564 }
15565
15566 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15567 {
15568         struct intel_connector *connector;
15569
15570         /* We need to check both for a crtc link (meaning that the
15571          * encoder is active and trying to read from a pipe) and the
15572          * pipe itself being active. */
15573         bool has_active_crtc = encoder->base.crtc &&
15574                 to_intel_crtc(encoder->base.crtc)->active;
15575
15576         connector = intel_encoder_find_connector(encoder);
15577         if (connector && !has_active_crtc) {
15578                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15579                               encoder->base.base.id,
15580                               encoder->base.name);
15581
15582                 /* Connector is active, but has no active pipe. This is
15583                  * fallout from our resume register restoring. Disable
15584                  * the encoder manually again. */
15585                 if (encoder->base.crtc) {
15586                         struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15587
15588                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15589                                       encoder->base.base.id,
15590                                       encoder->base.name);
15591                         encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15592                         if (encoder->post_disable)
15593                                 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15594                 }
15595                 encoder->base.crtc = NULL;
15596
15597                 /* Inconsistent output/port/pipe state happens presumably due to
15598                  * a bug in one of the get_hw_state functions. Or someplace else
15599                  * in our code, like the register restore mess on resume. Clamp
15600                  * things to off as a safer default. */
15601
15602                 connector->base.dpms = DRM_MODE_DPMS_OFF;
15603                 connector->base.encoder = NULL;
15604         }
15605
15606         /* notify opregion of the sanitized encoder state */
15607         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
15608 }
15609
15610 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
15611 {
15612         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15613
15614         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15615                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15616                 i915_disable_vga(dev_priv);
15617         }
15618 }
15619
15620 void i915_redisable_vga(struct drm_i915_private *dev_priv)
15621 {
15622         /* This function can be called both from intel_modeset_setup_hw_state or
15623          * at a very early point in our resume sequence, where the power well
15624          * structures are not yet restored. Since this function is at a very
15625          * paranoid "someone might have enabled VGA while we were not looking"
15626          * level, just check if the power well is enabled instead of trying to
15627          * follow the "don't touch the power well if we don't need it" policy
15628          * the rest of the driver uses. */
15629         if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15630                 return;
15631
15632         i915_redisable_vga_power_on(dev_priv);
15633
15634         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15635 }
15636
15637 /* FIXME read out full plane state for all planes */
15638 static void readout_plane_state(struct intel_crtc *crtc)
15639 {
15640         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15641         struct intel_crtc_state *crtc_state =
15642                 to_intel_crtc_state(crtc->base.state);
15643         struct intel_plane *plane;
15644
15645         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
15646                 struct intel_plane_state *plane_state =
15647                         to_intel_plane_state(plane->base.state);
15648                 enum pipe pipe;
15649                 bool visible;
15650
15651                 visible = plane->get_hw_state(plane, &pipe);
15652
15653                 intel_set_plane_visible(crtc_state, plane_state, visible);
15654         }
15655 }
15656
15657 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15658 {
15659         struct drm_i915_private *dev_priv = to_i915(dev);
15660         enum pipe pipe;
15661         struct intel_crtc *crtc;
15662         struct intel_encoder *encoder;
15663         struct intel_connector *connector;
15664         struct drm_connector_list_iter conn_iter;
15665         int i;
15666
15667         dev_priv->active_crtcs = 0;
15668
15669         for_each_intel_crtc(dev, crtc) {
15670                 struct intel_crtc_state *crtc_state =
15671                         to_intel_crtc_state(crtc->base.state);
15672
15673                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15674                 memset(crtc_state, 0, sizeof(*crtc_state));
15675                 crtc_state->base.crtc = &crtc->base;
15676
15677                 crtc_state->base.active = crtc_state->base.enable =
15678                         dev_priv->display.get_pipe_config(crtc, crtc_state);
15679
15680                 crtc->base.enabled = crtc_state->base.enable;
15681                 crtc->active = crtc_state->base.active;
15682
15683                 if (crtc_state->base.active)
15684                         dev_priv->active_crtcs |= 1 << crtc->pipe;
15685
15686                 readout_plane_state(crtc);
15687
15688                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15689                               crtc->base.base.id, crtc->base.name,
15690                               enableddisabled(crtc_state->base.active));
15691         }
15692
15693         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15694                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15695
15696                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15697                                                         &pll->state.hw_state);
15698                 pll->state.crtc_mask = 0;
15699                 for_each_intel_crtc(dev, crtc) {
15700                         struct intel_crtc_state *crtc_state =
15701                                 to_intel_crtc_state(crtc->base.state);
15702
15703                         if (crtc_state->base.active &&
15704                             crtc_state->shared_dpll == pll)
15705                                 pll->state.crtc_mask |= 1 << crtc->pipe;
15706                 }
15707                 pll->active_mask = pll->state.crtc_mask;
15708
15709                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15710                               pll->info->name, pll->state.crtc_mask, pll->on);
15711         }
15712
15713         for_each_intel_encoder(dev, encoder) {
15714                 pipe = 0;
15715
15716                 if (encoder->get_hw_state(encoder, &pipe)) {
15717                         struct intel_crtc_state *crtc_state;
15718
15719                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15720                         crtc_state = to_intel_crtc_state(crtc->base.state);
15721
15722                         encoder->base.crtc = &crtc->base;
15723                         encoder->get_config(encoder, crtc_state);
15724                 } else {
15725                         encoder->base.crtc = NULL;
15726                 }
15727
15728                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15729                               encoder->base.base.id, encoder->base.name,
15730                               enableddisabled(encoder->base.crtc),
15731                               pipe_name(pipe));
15732         }
15733
15734         drm_connector_list_iter_begin(dev, &conn_iter);
15735         for_each_intel_connector_iter(connector, &conn_iter) {
15736                 if (connector->get_hw_state(connector)) {
15737                         connector->base.dpms = DRM_MODE_DPMS_ON;
15738
15739                         encoder = connector->encoder;
15740                         connector->base.encoder = &encoder->base;
15741
15742                         if (encoder->base.crtc &&
15743                             encoder->base.crtc->state->active) {
15744                                 /*
15745                                  * This has to be done during hardware readout
15746                                  * because anything calling .crtc_disable may
15747                                  * rely on the connector_mask being accurate.
15748                                  */
15749                                 encoder->base.crtc->state->connector_mask |=
15750                                         drm_connector_mask(&connector->base);
15751                                 encoder->base.crtc->state->encoder_mask |=
15752                                         drm_encoder_mask(&encoder->base);
15753                         }
15754
15755                 } else {
15756                         connector->base.dpms = DRM_MODE_DPMS_OFF;
15757                         connector->base.encoder = NULL;
15758                 }
15759                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15760                               connector->base.base.id, connector->base.name,
15761                               enableddisabled(connector->base.encoder));
15762         }
15763         drm_connector_list_iter_end(&conn_iter);
15764
15765         for_each_intel_crtc(dev, crtc) {
15766                 struct intel_crtc_state *crtc_state =
15767                         to_intel_crtc_state(crtc->base.state);
15768                 int min_cdclk = 0;
15769
15770                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15771                 if (crtc_state->base.active) {
15772                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
15773                         crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15774                         crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
15775                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
15776                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15777
15778                         /*
15779                          * The initial mode needs to be set in order to keep
15780                          * the atomic core happy. It wants a valid mode if the
15781                          * crtc's enabled, so we do the above call.
15782                          *
15783                          * But we don't set all the derived state fully, hence
15784                          * set a flag to indicate that a full recalculation is
15785                          * needed on the next commit.
15786                          */
15787                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
15788
15789                         intel_crtc_compute_pixel_rate(crtc_state);
15790
15791                         if (dev_priv->display.modeset_calc_cdclk) {
15792                                 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
15793                                 if (WARN_ON(min_cdclk < 0))
15794                                         min_cdclk = 0;
15795                         }
15796
15797                         drm_calc_timestamping_constants(&crtc->base,
15798                                                         &crtc_state->base.adjusted_mode);
15799                         update_scanline_offset(crtc);
15800                 }
15801
15802                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
15803                 dev_priv->min_voltage_level[crtc->pipe] =
15804                         crtc_state->min_voltage_level;
15805
15806                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
15807         }
15808 }
15809
15810 static void
15811 get_encoder_power_domains(struct drm_i915_private *dev_priv)
15812 {
15813         struct intel_encoder *encoder;
15814
15815         for_each_intel_encoder(&dev_priv->drm, encoder) {
15816                 u64 get_domains;
15817                 enum intel_display_power_domain domain;
15818                 struct intel_crtc_state *crtc_state;
15819
15820                 if (!encoder->get_power_domains)
15821                         continue;
15822
15823                 /*
15824                  * MST-primary and inactive encoders don't have a crtc state
15825                  * and neither of these require any power domain references.
15826                  */
15827                 if (!encoder->base.crtc)
15828                         continue;
15829
15830                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
15831                 get_domains = encoder->get_power_domains(encoder, crtc_state);
15832                 for_each_power_domain(domain, get_domains)
15833                         intel_display_power_get(dev_priv, domain);
15834         }
15835 }
15836
15837 static void intel_early_display_was(struct drm_i915_private *dev_priv)
15838 {
15839         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
15840         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
15841                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
15842                            DARBF_GATING_DIS);
15843
15844         if (IS_HASWELL(dev_priv)) {
15845                 /*
15846                  * WaRsPkgCStateDisplayPMReq:hsw
15847                  * System hang if this isn't done before disabling all planes!
15848                  */
15849                 I915_WRITE(CHICKEN_PAR1_1,
15850                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
15851         }
15852 }
15853
15854 /* Scan out the current hw modeset state,
15855  * and sanitizes it to the current state
15856  */
15857 static void
15858 intel_modeset_setup_hw_state(struct drm_device *dev,
15859                              struct drm_modeset_acquire_ctx *ctx)
15860 {
15861         struct drm_i915_private *dev_priv = to_i915(dev);
15862         enum pipe pipe;
15863         struct intel_crtc *crtc;
15864         struct intel_encoder *encoder;
15865         int i;
15866
15867         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
15868
15869         intel_early_display_was(dev_priv);
15870         intel_modeset_readout_hw_state(dev);
15871
15872         /* HW state is read out, now we need to sanitize this mess. */
15873         get_encoder_power_domains(dev_priv);
15874
15875         intel_sanitize_plane_mapping(dev_priv);
15876
15877         for_each_intel_encoder(dev, encoder) {
15878                 intel_sanitize_encoder(encoder);
15879         }
15880
15881         for_each_pipe(dev_priv, pipe) {
15882                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15883
15884                 intel_sanitize_crtc(crtc, ctx);
15885                 intel_dump_pipe_config(crtc, crtc->config,
15886                                        "[setup_hw_state]");
15887         }
15888
15889         intel_modeset_update_connector_atomic_state(dev);
15890
15891         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15892                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15893
15894                 if (!pll->on || pll->active_mask)
15895                         continue;
15896
15897                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
15898                               pll->info->name);
15899
15900                 pll->info->funcs->disable(dev_priv, pll);
15901                 pll->on = false;
15902         }
15903
15904         if (IS_G4X(dev_priv)) {
15905                 g4x_wm_get_hw_state(dev);
15906                 g4x_wm_sanitize(dev_priv);
15907         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15908                 vlv_wm_get_hw_state(dev);
15909                 vlv_wm_sanitize(dev_priv);
15910         } else if (INTEL_GEN(dev_priv) >= 9) {
15911                 skl_wm_get_hw_state(dev);
15912         } else if (HAS_PCH_SPLIT(dev_priv)) {
15913                 ilk_wm_get_hw_state(dev);
15914         }
15915
15916         for_each_intel_crtc(dev, crtc) {
15917                 u64 put_domains;
15918
15919                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15920                 if (WARN_ON(put_domains))
15921                         modeset_put_power_domains(dev_priv, put_domains);
15922         }
15923
15924         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
15925
15926         intel_fbc_init_pipe_state(dev_priv);
15927 }
15928
15929 void intel_display_resume(struct drm_device *dev)
15930 {
15931         struct drm_i915_private *dev_priv = to_i915(dev);
15932         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15933         struct drm_modeset_acquire_ctx ctx;
15934         int ret;
15935
15936         dev_priv->modeset_restore_state = NULL;
15937         if (state)
15938                 state->acquire_ctx = &ctx;
15939
15940         drm_modeset_acquire_init(&ctx, 0);
15941
15942         while (1) {
15943                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15944                 if (ret != -EDEADLK)
15945                         break;
15946
15947                 drm_modeset_backoff(&ctx);
15948         }
15949
15950         if (!ret)
15951                 ret = __intel_display_resume(dev, state, &ctx);
15952
15953         intel_enable_ipc(dev_priv);
15954         drm_modeset_drop_locks(&ctx);
15955         drm_modeset_acquire_fini(&ctx);
15956
15957         if (ret)
15958                 DRM_ERROR("Restoring old state failed with %i\n", ret);
15959         if (state)
15960                 drm_atomic_state_put(state);
15961 }
15962
15963 int intel_connector_register(struct drm_connector *connector)
15964 {
15965         struct intel_connector *intel_connector = to_intel_connector(connector);
15966         int ret;
15967
15968         ret = intel_backlight_device_register(intel_connector);
15969         if (ret)
15970                 goto err;
15971
15972         return 0;
15973
15974 err:
15975         return ret;
15976 }
15977
15978 void intel_connector_unregister(struct drm_connector *connector)
15979 {
15980         struct intel_connector *intel_connector = to_intel_connector(connector);
15981
15982         intel_backlight_device_unregister(intel_connector);
15983         intel_panel_destroy_backlight(connector);
15984 }
15985
15986 static void intel_hpd_poll_fini(struct drm_device *dev)
15987 {
15988         struct intel_connector *connector;
15989         struct drm_connector_list_iter conn_iter;
15990
15991         /* Kill all the work that may have been queued by hpd. */
15992         drm_connector_list_iter_begin(dev, &conn_iter);
15993         for_each_intel_connector_iter(connector, &conn_iter) {
15994                 if (connector->modeset_retry_work.func)
15995                         cancel_work_sync(&connector->modeset_retry_work);
15996                 if (connector->hdcp_shim) {
15997                         cancel_delayed_work_sync(&connector->hdcp_check_work);
15998                         cancel_work_sync(&connector->hdcp_prop_work);
15999                 }
16000         }
16001         drm_connector_list_iter_end(&conn_iter);
16002 }
16003
16004 void intel_modeset_cleanup(struct drm_device *dev)
16005 {
16006         struct drm_i915_private *dev_priv = to_i915(dev);
16007
16008         flush_workqueue(dev_priv->modeset_wq);
16009
16010         flush_work(&dev_priv->atomic_helper.free_work);
16011         WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16012
16013         /*
16014          * Interrupts and polling as the first thing to avoid creating havoc.
16015          * Too much stuff here (turning of connectors, ...) would
16016          * experience fancy races otherwise.
16017          */
16018         intel_irq_uninstall(dev_priv);
16019
16020         /*
16021          * Due to the hpd irq storm handling the hotplug work can re-arm the
16022          * poll handlers. Hence disable polling after hpd handling is shut down.
16023          */
16024         intel_hpd_poll_fini(dev);
16025
16026         /* poll work can call into fbdev, hence clean that up afterwards */
16027         intel_fbdev_fini(dev_priv);
16028
16029         intel_unregister_dsm_handler();
16030
16031         intel_fbc_global_disable(dev_priv);
16032
16033         /* flush any delayed tasks or pending work */
16034         flush_scheduled_work();
16035
16036         drm_mode_config_cleanup(dev);
16037
16038         intel_cleanup_overlay(dev_priv);
16039
16040         intel_teardown_gmbus(dev_priv);
16041
16042         destroy_workqueue(dev_priv->modeset_wq);
16043 }
16044
16045 void intel_connector_attach_encoder(struct intel_connector *connector,
16046                                     struct intel_encoder *encoder)
16047 {
16048         connector->encoder = encoder;
16049         drm_connector_attach_encoder(&connector->base, &encoder->base);
16050 }
16051
16052 /*
16053  * set vga decode state - true == enable VGA decode
16054  */
16055 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16056 {
16057         unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16058         u16 gmch_ctrl;
16059
16060         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16061                 DRM_ERROR("failed to read control word\n");
16062                 return -EIO;
16063         }
16064
16065         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16066                 return 0;
16067
16068         if (state)
16069                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16070         else
16071                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16072
16073         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16074                 DRM_ERROR("failed to write control word\n");
16075                 return -EIO;
16076         }
16077
16078         return 0;
16079 }
16080
16081 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16082
16083 struct intel_display_error_state {
16084
16085         u32 power_well_driver;
16086
16087         int num_transcoders;
16088
16089         struct intel_cursor_error_state {
16090                 u32 control;
16091                 u32 position;
16092                 u32 base;
16093                 u32 size;
16094         } cursor[I915_MAX_PIPES];
16095
16096         struct intel_pipe_error_state {
16097                 bool power_domain_on;
16098                 u32 source;
16099                 u32 stat;
16100         } pipe[I915_MAX_PIPES];
16101
16102         struct intel_plane_error_state {
16103                 u32 control;
16104                 u32 stride;
16105                 u32 size;
16106                 u32 pos;
16107                 u32 addr;
16108                 u32 surface;
16109                 u32 tile_offset;
16110         } plane[I915_MAX_PIPES];
16111
16112         struct intel_transcoder_error_state {
16113                 bool power_domain_on;
16114                 enum transcoder cpu_transcoder;
16115
16116                 u32 conf;
16117
16118                 u32 htotal;
16119                 u32 hblank;
16120                 u32 hsync;
16121                 u32 vtotal;
16122                 u32 vblank;
16123                 u32 vsync;
16124         } transcoder[4];
16125 };
16126
16127 struct intel_display_error_state *
16128 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16129 {
16130         struct intel_display_error_state *error;
16131         int transcoders[] = {
16132                 TRANSCODER_A,
16133                 TRANSCODER_B,
16134                 TRANSCODER_C,
16135                 TRANSCODER_EDP,
16136         };
16137         int i;
16138
16139         if (INTEL_INFO(dev_priv)->num_pipes == 0)
16140                 return NULL;
16141
16142         error = kzalloc(sizeof(*error), GFP_ATOMIC);
16143         if (error == NULL)
16144                 return NULL;
16145
16146         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16147                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16148
16149         for_each_pipe(dev_priv, i) {
16150                 error->pipe[i].power_domain_on =
16151                         __intel_display_power_is_enabled(dev_priv,
16152                                                          POWER_DOMAIN_PIPE(i));
16153                 if (!error->pipe[i].power_domain_on)
16154                         continue;
16155
16156                 error->cursor[i].control = I915_READ(CURCNTR(i));
16157                 error->cursor[i].position = I915_READ(CURPOS(i));
16158                 error->cursor[i].base = I915_READ(CURBASE(i));
16159
16160                 error->plane[i].control = I915_READ(DSPCNTR(i));
16161                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16162                 if (INTEL_GEN(dev_priv) <= 3) {
16163                         error->plane[i].size = I915_READ(DSPSIZE(i));
16164                         error->plane[i].pos = I915_READ(DSPPOS(i));
16165                 }
16166                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16167                         error->plane[i].addr = I915_READ(DSPADDR(i));
16168                 if (INTEL_GEN(dev_priv) >= 4) {
16169                         error->plane[i].surface = I915_READ(DSPSURF(i));
16170                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16171                 }
16172
16173                 error->pipe[i].source = I915_READ(PIPESRC(i));
16174
16175                 if (HAS_GMCH_DISPLAY(dev_priv))
16176                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
16177         }
16178
16179         /* Note: this does not include DSI transcoders. */
16180         error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16181         if (HAS_DDI(dev_priv))
16182                 error->num_transcoders++; /* Account for eDP. */
16183
16184         for (i = 0; i < error->num_transcoders; i++) {
16185                 enum transcoder cpu_transcoder = transcoders[i];
16186
16187                 error->transcoder[i].power_domain_on =
16188                         __intel_display_power_is_enabled(dev_priv,
16189                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16190                 if (!error->transcoder[i].power_domain_on)
16191                         continue;
16192
16193                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16194
16195                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16196                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16197                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16198                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16199                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16200                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16201                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16202         }
16203
16204         return error;
16205 }
16206
16207 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16208
16209 void
16210 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16211                                 struct intel_display_error_state *error)
16212 {
16213         struct drm_i915_private *dev_priv = m->i915;
16214         int i;
16215
16216         if (!error)
16217                 return;
16218
16219         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16220         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16221                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16222                            error->power_well_driver);
16223         for_each_pipe(dev_priv, i) {
16224                 err_printf(m, "Pipe [%d]:\n", i);
16225                 err_printf(m, "  Power: %s\n",
16226                            onoff(error->pipe[i].power_domain_on));
16227                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16228                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16229
16230                 err_printf(m, "Plane [%d]:\n", i);
16231                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16232                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16233                 if (INTEL_GEN(dev_priv) <= 3) {
16234                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16235                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16236                 }
16237                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16238                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16239                 if (INTEL_GEN(dev_priv) >= 4) {
16240                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16241                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16242                 }
16243
16244                 err_printf(m, "Cursor [%d]:\n", i);
16245                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16246                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16247                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16248         }
16249
16250         for (i = 0; i < error->num_transcoders; i++) {
16251                 err_printf(m, "CPU transcoder: %s\n",
16252                            transcoder_name(error->transcoder[i].cpu_transcoder));
16253                 err_printf(m, "  Power: %s\n",
16254                            onoff(error->transcoder[i].power_domain_on));
16255                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16256                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16257                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16258                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16259                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16260                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16261                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16262         }
16263 }
16264
16265 #endif